diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000000..d5a08f8a65 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,15 @@ +The title above should be a 1 line short summary of the issue. + +Enter a description of the issue. +This should include what the symptoms are, and steps to reproduce. + +Additionally please enter this information if applicable: + - Compiler and version + - MPI implementation and version + - NetCDF version + - Parallel NetCDF version + - Parallel IO version + - Output of `uname -a` + - The make line used to build the executable + - The line used to run the executable + - Test case used diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..602239bded --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,10 @@ +The title above should be a 1 line short summary of the pull request (i.e. what the project the PR represents is intended to do). + +Enter a description of this PR. This should include why this PR was created, and what it does. + +Testing and relations to other Pull Requests should be added as subsequent comments. + +See the below examples for more information. +https://github.com/MPAS-Dev/MPAS/pull/930 +https://github.com/MPAS-Dev/MPAS/pull/931 + diff --git a/.gitignore b/.gitignore index f1372d7d6f..c0852c1bdb 100644 --- a/.gitignore +++ b/.gitignore @@ -18,17 +18,22 @@ src/operators/*.f90 # Executables *_model +build_tables # NetCDF Files *.nc +# Restart timestamp file +restart_timestamp + # Graph files and partition files *.info *.info.part.* -# Error and Output log files +# Error, Output, and Abort log files *.out *.err +*.abort # Text files (For statistical output from ocean model) *.txt diff --git a/INSTALL b/INSTALL new file mode 100644 index 0000000000..1bdd0d89f1 --- /dev/null +++ b/INSTALL @@ -0,0 +1,59 @@ +Installing MPAS +==== + +For general information on how to install MPAS, see https://mpas-dev.github.io. +Additional notes on building MPAS on specific architectures are summarized here. + + +gfortran-clang: Compiling MPAS on MacOSX (10.11 El Capitan - 10.12 Sierra) +---------- +MPAS should compile out of the box on MacOSX with the standard (OS) clang compiler +and the gfortran compiler. The gfortran compiler can be installed using homebrew +(http://brew.sh), or using pre-compiled binaries from the MacOSX HPC website +(http://hpc.soureforge.net), or it can be compiled by the user from the GNU sources. + +The standard clang compiler does not support OpenMP. Users wanting to compile MPAS +with OpenMP support on MacOSX will have to install the LLVM clang compiler, which is +accomplished easiest with homebrew. Since this alternative clang compiler is not in +the standard search/library path, the user will have to modify the call to the clang +and clang++ executable and add the library path to the compiler flags. Example: + +... + "FC_PARALLEL = mpif90" \ + "CC_PARALLEL = mpicc -cc=clang" \ + "CXX_PARALLEL = mpicxx -cxx=clang++" \ + "FC_SERIAL = gfortran" \ + "CC_SERIAL = clang" \ + "CXX_SERIAL = clang++" \ + "FFLAGS_PROMOTION = -fdefault-real-8 -fdefault-double-8" \ + "FFLAGS_OPT = -O3 -m64 -ffree-line-length-none -fconvert=big-endian -ffree-form" \ + "CFLAGS_OPT = -O3 -m64" \ + "CXXFLAGS_OPT = -O3 -m64" \ + "LDFLAGS_OPT = -O3 -m64" \ + "FFLAGS_DEBUG = -g -m64 -ffree-line-length-none -fconvert=big-endian -ffree-form -fbounds-check -fbacktrace -ffpe-trap=invalid,zero,overflow" \ + "CFLAGS_DEBUG = -g -m64" \ + "CXXFLAGS_DEBUG = -O3 -m64" \ + "LDFLAGS_DEBUG = -g -m64" \ +... + +would become + +... + "FC_PARALLEL = mpif90" \ + "CC_PARALLEL = mpicc -cc=/usr/local/opt/llvm/bin/clang" \ + "CXX_PARALLEL = mpicxx -cxx=/usr/local/opt/llvm/bin/clang++" \ + "FC_SERIAL = gfortran" \ + "CC_SERIAL = /usr/local/opt/llvm/bin/clang" \ + "CXX_SERIAL = /usr/local/opt/llvm/bin/clang++" \ + "FFLAGS_PROMOTION = -fdefault-real-8 -fdefault-double-8" \ + "FFLAGS_OPT = -O3 -m64 -ffree-line-length-none -fconvert=big-endian -ffree-form -L/usr/local/opt/llvm/lib" \ + "CFLAGS_OPT = -O3 -m64 -L/usr/local/opt/llvm/lib" \ + "CXXFLAGS_OPT = -O3 -m64 -L/usr/local/opt/llvm/lib" \ + "LDFLAGS_OPT = -O3 -m64 -L/usr/local/opt/llvm/lib" \ + "FFLAGS_DEBUG = -g -m64 -L/usr/local/opt/llvm/lib -ffree-line-length-none -fconvert=big-endian -ffree-form -fbounds-check -fbacktrace -ffpe-trap=invalid,zero,overflow" \ + "CFLAGS_DEBUG = -g -m64 -L/usr/local/opt/llvm/lib" \ + "CXXFLAGS_DEBUG = -O3 -m64 -L/usr/local/opt/llvm/lib" \ + "LDFLAGS_DEBUG = -g -m64 -L/usr/local/opt/llvm/lib" \ +... + +assuming that the LLVM clang compiler is installed in /usr/local/opt/llvm. diff --git a/Makefile b/Makefile index 866bf53614..97ae93f709 100644 --- a/Makefile +++ b/Makefile @@ -12,17 +12,21 @@ xlf: "FC_SERIAL = xlf90" \ "CC_SERIAL = xlc" \ "CXX_SERIAL = xlcxx" \ - "FFLAGS_OPT = -O3 -qrealsize=8" \ + "FFLAGS_PROMOTION = -qrealsize=8" \ + "FFLAGS_OPT = -O3" \ "CFLAGS_OPT = -O3" \ "CXXFLAGS_OPT = -O3" \ "LDFLAGS_OPT = -O3" \ - "FFLAGS_DEBUG = -O0 -g -C -qrealsize=8" \ + "FFLAGS_DEBUG = -O0 -g -C" \ "CFLAGS_DEBUG = -O0 -g" \ "CXXFLAGS_DEBUG = -O0 -g" \ "LDFLAGS_DEBUG = -O0 -g" \ + "FFLAGS_OMP = -qsmp=omp" \ + "CFLAGS_OMP = -qsmp=omp" \ "CORE = $(CORE)" \ "DEBUG = $(DEBUG)" \ "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI" ) ftn: @@ -33,13 +37,17 @@ ftn: "FC_SERIAL = ftn" \ "CC_SERIAL = cc" \ "CXX_SERIAL = CC" \ - "FFLAGS_OPT = -i4 -r8 -gopt -O2 -Mvect=nosse -Kieee -convert big_endian" \ + "FFLAGS_PROMOTION = -r8" \ + "FFLAGS_OPT = -i4 -gopt -O2 -Mvect=nosse -Kieee -convert big_endian" \ "CFLAGS_OPT = -fast" \ "CXXFLAGS_OPT = -fast" \ "LDFLAGS_OPT = " \ + "FFLAGS_OMP = -mp" \ + "CFLAGS_OMP = -mp" \ "CORE = $(CORE)" \ "DEBUG = $(DEBUG)" \ "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE" ) titan-cray: @@ -48,12 +56,16 @@ titan-cray: "CC_PARALLEL = cc" \ "FC_SERIAL = ftn" \ "CC_SERIAL = gcc" \ - "FFLAGS_OPT = -s integer32 -default64 -O3 -f free -N 255 -em -ef" \ + "FFLAGS_PROMOTION = -default64" \ + "FFLAGS_OPT = -s integer32 -O3 -f free -N 255 -em -ef" \ "CFLAGS_OPT = -O3" \ "LDFLAGS_OPT = -O3" \ + "FFLAGS_OMP = " \ + "CFLAGS_OMP = " \ "CORE = $(CORE)" \ "DEBUG = $(DEBUG)" \ "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE" ) pgi: @@ -64,17 +76,21 @@ pgi: "FC_SERIAL = pgf90" \ "CC_SERIAL = pgcc" \ "CXX_SERIAL = pgc++" \ - "FFLAGS_OPT = -r8 -O3 -byteswapio -Mfree" \ + "FFLAGS_PROMOTION = -r8" \ + "FFLAGS_OPT = -O3 -byteswapio -Mfree" \ "CFLAGS_OPT = -O3" \ "CXXFLAGS_OPT = -O3" \ "LDFLAGS_OPT = -O3" \ - "FFLAGS_DEBUG = -r8 -O0 -g -Mbounds -Mchkptr -byteswapio -Mfree -Ktrap=divz,fp,inv,ovf -traceback" \ + "FFLAGS_DEBUG = -O0 -g -Mbounds -Mchkptr -byteswapio -Mfree -Ktrap=divz,fp,inv,ovf -traceback" \ "CFLAGS_DEBUG = -O0 -g -traceback" \ "CXXFLAGS_DEBUG = -O0 -g -traceback" \ "LDFLAGS_DEBUG = -O0 -g -Mbounds -Mchkptr -Ktrap=divz,fp,inv,ovf -traceback" \ + "FFLAGS_OMP = -mp" \ + "CFLAGS_OMP = -mp" \ "CORE = $(CORE)" \ "DEBUG = $(DEBUG)" \ "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE" ) pgi-nersc: @@ -85,13 +101,17 @@ pgi-nersc: "FC_SERIAL = ftn" \ "CC_SERIAL = cc" \ "CXX_SERIAL = CC" \ - "FFLAGS_OPT = -r8 -O3 -byteswapio -Mfree" \ + "FFLAGS_PROMOTION = -r8" \ + "FFLAGS_OPT = -O3 -byteswapio -Mfree" \ "CFLAGS_OPT = -O3" \ "CXXFLAGS_OPT = -O3" \ "LDFLAGS_OPT = -O3" \ + "FFLAGS_OMP = -mp" \ + "CFLAGS_OMP = -mp" \ "CORE = $(CORE)" \ "DEBUG = $(DEBUG)" \ "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE" ) pgi-llnl: @@ -102,13 +122,17 @@ pgi-llnl: "FC_SERIAL = pgf90" \ "CC_SERIAL = pgcc" \ "CXX_SERIAL = pgc++" \ - "FFLAGS_OPT = -i4 -r8 -g -O2 -byteswapio" \ + "FFLAGS_PROMOTION = -r8" \ + "FFLAGS_OPT = -i4 -g -O2 -byteswapio" \ "CFLAGS_OPT = -fast" \ "CXXFLAGS_OPT = -fast" \ "LDFLAGS_OPT = " \ + "FFLAGS_OMP = -mp" \ + "CFLAGS_OMP = -mp" \ "CORE = $(CORE)" \ "DEBUG = $(DEBUG)" \ "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE" ) ifort: @@ -119,17 +143,46 @@ ifort: "FC_SERIAL = ifort" \ "CC_SERIAL = icc" \ "CXX_SERIAL = icpc" \ - "FFLAGS_OPT = -real-size 64 -O3 -convert big_endian -FR" \ + "FFLAGS_PROMOTION = -real-size 64" \ + "FFLAGS_OPT = -O3 -convert big_endian -FR" \ "CFLAGS_OPT = -O3" \ "CXXFLAGS_OPT = -O3" \ "LDFLAGS_OPT = -O3" \ - "FFLAGS_DEBUG = -real-size 64 -g -convert big_endian -FR -CU -CB -check all -fpe0 -traceback" \ - "CFLAGS_DEBUG = -g -fpe0 -traceback" \ - "CXXFLAGS_DEBUG = -g -fpe0 -traceback" \ + "FFLAGS_DEBUG = -g -convert big_endian -FR -CU -CB -check all -fpe0 -traceback" \ + "CFLAGS_DEBUG = -g -traceback" \ + "CXXFLAGS_DEBUG = -g -traceback" \ "LDFLAGS_DEBUG = -g -fpe0 -traceback" \ + "FFLAGS_OMP = -qopenmp" \ + "CFLAGS_OMP = -qopenmp" \ "CORE = $(CORE)" \ "DEBUG = $(DEBUG)" \ "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ + "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE" ) + +ifort-scorep: + ( $(MAKE) all \ + "FC_PARALLEL = scorep --compiler mpif90" \ + "CC_PARALLEL = scorep --compiler mpicc" \ + "CXX_PARALLEL = scorep --compiler mpicxx" \ + "FC_SERIAL = ifort" \ + "CC_SERIAL = icc" \ + "CXX_SERIAL = icpc" \ + "FFLAGS_PROMOTION = -real-size 64" \ + "FFLAGS_OPT = -O3 -g -convert big_endian -FR" \ + "CFLAGS_OPT = -O3 -g" \ + "CXXFLAGS_OPT = -O3 -g" \ + "LDFLAGS_OPT = -O3 -g" \ + "FFLAGS_DEBUG = -g -convert big_endian -FR -CU -CB -check all -fpe0 -traceback" \ + "CFLAGS_DEBUG = -g -traceback" \ + "CXXFLAGS_DEBUG = -g -traceback" \ + "LDFLAGS_DEBUG = -g -fpe0 -traceback" \ + "FFLAGS_OMP = -qopenmp" \ + "CFLAGS_OMP = -qopenmp" \ + "CORE = $(CORE)" \ + "DEBUG = $(DEBUG)" \ + "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE" ) ifort-gcc: @@ -140,17 +193,21 @@ ifort-gcc: "FC_SERIAL = ifort" \ "CC_SERIAL = gcc" \ "CXX_SERIAL = g++" \ - "FFLAGS_OPT = -real-size 64 -O3 -convert big_endian -FR" \ + "FFLAGS_PROMOTION = -real-size 64" \ + "FFLAGS_OPT = -O3 -convert big_endian -FR" \ "CFLAGS_OPT = -O3" \ "CXXFLAGS_OPT = -O3" \ "LDFLAGS_OPT = -O3" \ - "FFLAGS_DEBUG = -real-size 64 -g -convert big_endian -FR -CU -CB -check all -fpe0 -traceback" \ + "FFLAGS_DEBUG = -g -convert big_endian -FR -CU -CB -check all -fpe0 -traceback" \ "CFLAGS_DEBUG = -g" \ "CXXFLAGS_DEBUG = -g" \ "LDFLAGS_DEBUG = -g -fpe0 -traceback" \ + "FFLAGS_OMP = -qopenmp" \ + "CFLAGS_OMP = -fopenmp" \ "CORE = $(CORE)" \ "DEBUG = $(DEBUG)" \ "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE" ) gfortran: @@ -161,17 +218,46 @@ gfortran: "FC_SERIAL = gfortran" \ "CC_SERIAL = gcc" \ "CXX_SERIAL = g++" \ - "FFLAGS_OPT = -O3 -m64 -ffree-line-length-none -fdefault-real-8 -fdefault-double-8 -fconvert=big-endian -ffree-form" \ + "FFLAGS_PROMOTION = -fdefault-real-8 -fdefault-double-8" \ + "FFLAGS_OPT = -O3 -m64 -ffree-line-length-none -fconvert=big-endian -ffree-form" \ + "CFLAGS_OPT = -O3 -m64" \ + "CXXFLAGS_OPT = -O3 -m64" \ + "LDFLAGS_OPT = -O3 -m64" \ + "FFLAGS_DEBUG = -g -m64 -ffree-line-length-none -fconvert=big-endian -ffree-form -fbounds-check -fbacktrace -ffpe-trap=invalid,zero,overflow" \ + "CFLAGS_DEBUG = -g -m64" \ + "CXXFLAGS_DEBUG = -O3 -m64" \ + "LDFLAGS_DEBUG = -g -m64" \ + "FFLAGS_OMP = -fopenmp" \ + "CFLAGS_OMP = -fopenmp" \ + "CORE = $(CORE)" \ + "DEBUG = $(DEBUG)" \ + "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ + "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE" ) + +gfortran-clang: + ( $(MAKE) all \ + "FC_PARALLEL = mpif90" \ + "CC_PARALLEL = mpicc -cc=clang" \ + "CXX_PARALLEL = mpicxx -cxx=clang++" \ + "FC_SERIAL = gfortran" \ + "CC_SERIAL = clang" \ + "CXX_SERIAL = clang++" \ + "FFLAGS_PROMOTION = -fdefault-real-8 -fdefault-double-8" \ + "FFLAGS_OPT = -O3 -m64 -ffree-line-length-none -fconvert=big-endian -ffree-form" \ "CFLAGS_OPT = -O3 -m64" \ "CXXFLAGS_OPT = -O3 -m64" \ "LDFLAGS_OPT = -O3 -m64" \ - "FFLAGS_DEBUG = -g -m64 -ffree-line-length-none -fdefault-real-8 -fdefault-double-8 -fconvert=big-endian -ffree-form -fbounds-check -fbacktrace -ffpe-trap=invalid,zero,overflow" \ + "FFLAGS_DEBUG = -g -m64 -ffree-line-length-none -fconvert=big-endian -ffree-form -fbounds-check -fbacktrace -ffpe-trap=invalid,zero,overflow" \ "CFLAGS_DEBUG = -g -m64" \ "CXXFLAGS_DEBUG = -O3 -m64" \ "LDFLAGS_DEBUG = -g -m64" \ + "FFLAGS_OMP = -fopenmp" \ + "CFLAGS_OMP = -fopenmp" \ "CORE = $(CORE)" \ "DEBUG = $(DEBUG)" \ "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE" ) g95: @@ -182,13 +268,17 @@ g95: "FC_SERIAL = g95" \ "CC_SERIAL = gcc" \ "CXX_SERIAL = g++" \ - "FFLAGS_OPT = -O3 -ffree-line-length-huge -r8 -fendian=big" \ + "FFLAGS_PROMOTION = -r8" \ + "FFLAGS_OPT = -O3 -ffree-line-length-huge -fendian=big" \ "CFLAGS_OPT = -O3" \ "CXXFLAGS_OPT = -O3" \ "LDFLAGS_OPT = -O3" \ + "FFLAGS_OMP = -fopenmp" \ + "CFLAGS_OMP = -fopenmp" \ "CORE = $(CORE)" \ "DEBUG = $(DEBUG)" \ "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE" ) pathscale-nersc: @@ -199,13 +289,17 @@ pathscale-nersc: "FC_SERIAL = ftn" \ "CC_SERIAL = cc" \ "CXX_SERIAL = CC" \ - "FFLAGS_OPT = -r8 -O3 -freeform -extend-source" \ + "FFLAGS_PROMOTION = -r8" \ + "FFLAGS_OPT = -O3 -freeform -extend-source" \ "CFLAGS_OPT = -O3" \ "CXXFLAGS_OPT = -O3" \ "LDFLAGS_OPT = -O3" \ + "FFLAGS_OMP = -mp" \ + "CFLAGS_OMP = -mp" \ "CORE = $(CORE)" \ "DEBUG = $(DEBUG)" \ "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE" ) cray-nersc: @@ -216,13 +310,17 @@ cray-nersc: "FC_SERIAL = ftn" \ "CC_SERIAL = cc" \ "CXX_SERIAL = CC" \ - "FFLAGS_OPT = -default64 -O3 -f free" \ + "FFLAGS_PROMOTION = -default64" \ + "FFLAGS_OPT = -O3 -f free" \ "CFLAGS_OPT = -O3" \ "CXXFLAGS_OPT = -O3" \ "LDFLAGS_OPT = -O3" \ + "FFLAGS_OMP = " \ + "CFLAGS_OMP = " \ "CORE = $(CORE)" \ "DEBUG = $(DEBUG)" \ "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE" ) gnu-nersc: @@ -233,11 +331,12 @@ gnu-nersc: "FC_SERIAL = ftn" \ "CC_SERIAL = cc" \ "CXX_SERIAL = CC" \ - "FFLAGS_OPT = -O3 -m64 -ffree-line-length-none -fdefault-real-8 -fdefault-double-8 -fconvert=big-endian -ffree-form" \ + "FFLAGS_PROMOTION = -fdefault-real-8 -fdefault-double-8" \ + "FFLAGS_OPT = -O3 -m64 -ffree-line-length-none -fconvert=big-endian -ffree-form" \ "CFLAGS_OPT = -O3 -m64" \ "CXXFLAGS_OPT = -O3 -m64" \ "LDFLAGS_OPT = -O3 -m64" \ - "FFLAGS_DEBUG = -g -m64 -ffree-line-length-none -fdefault-real-8 -fdefault-double-8 -fconvert=big-endian -ffree-form" \ + "FFLAGS_DEBUG = -g -m64 -ffree-line-length-none -fconvert=big-endian -ffree-form" \ "CFLAGS_DEBUG = -g -m64" \ "CXXFLAGS_DEBUG = -g -m64" \ "LDFLAGS_DEBUG = -g -m64" \ @@ -255,13 +354,21 @@ intel-nersc: "FC_SERIAL = ftn" \ "CC_SERIAL = cc" \ "CXX_SERIAL = CC" \ - "FFLAGS_OPT = -real-size 64 -O3 -convert big_endian -FR" \ + "FFLAGS_PROMOTION = -real-size 64" \ + "FFLAGS_OPT = -O3 -convert big_endian -FR" \ "CFLAGS_OPT = -O3" \ "CXXFLAGS_OPT = -O3" \ "LDFLAGS_OPT = -O3" \ + "FFLAGS_OMP = -qopenmp" \ + "CFLAGS_OMP = -qopenmp" \ + "FFLAGS_DEBUG = -real-size 64 -g -convert big_endian -FR -CU -CB -check all -gen-interfaces -warn interfaces -traceback" \ + "CFLAGS_DEBUG = -g -traceback" \ + "CXXFLAGS_DEBUG = -g -traceback" \ + "LDFLAGS_DEBUG = -g -traceback" \ "CORE = $(CORE)" \ "DEBUG = $(DEBUG)" \ "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE" ) bluegene: @@ -272,31 +379,47 @@ bluegene: "FC_SERIAL = bgxlf95_r" \ "CC_SERIAL = bgxlc_r" \ "CXX_SERIAL = bgxlc++_r" \ - "FFLAGS_OPT = -O2 -g -qrealsize=8" \ + "FFLAGS_PROMOTION = -qrealsize=8" \ + "FFLAGS_OPT = -O2 -g" \ "CFLAGS_OPT = -O2 -g" \ "CXXFLAGS_OPT = -O2 -g" \ "LDFLAGS_OPT = -O2 -g" \ - "FFLAGS_DEBUG = -O0 -g -C -qinitalloc -qinitauto -qrealsize=8" \ + "FFLAGS_DEBUG = -O0 -g -C -qinitalloc -qinitauto" \ "CFLAGS_DEBUG = -O0 -g" \ "CXXFLAGS_DEBUG = -O0 -g" \ "LDFLAGS_DEBUG = -O0 -g" \ + "FFLAGS_OMP = -qsmp=omp" \ + "CFLAGS_OMP = -qsmp=omp" \ "CORE = $(CORE)" \ "DEBUG = $(DEBUG)" \ "USE_PAPI = $(USE_PAPI)" \ + "OPENMP = $(OPENMP)" \ "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI" ) CPPINCLUDES = FCINCLUDES = LIBS = ifneq ($(wildcard $(PIO)/lib), ) # Check for newer PIO version +ifeq "$(USE_PIO2)" "true" + CPPINCLUDES = -DUSE_PIO2 -I$(PIO)/include + FCINCLUDES = -DUSE_PIO2 -I$(PIO)/include + LIBS = -L$(PIO)/lib -lpiof -lpioc +else CPPINCLUDES = -I$(PIO)/include FCINCLUDES = -I$(PIO)/include LIBS = -L$(PIO)/lib -lpio +endif +else +ifeq "$(USE_PIO2)" "true" + CPPINCLUDES = -DUSE_PIO2 -I$(PIO)/include + FCINCLUDES = -DUSE_PIO2 -I$(PIO)/include + LIBS = -L$(PIO) -lpiof -lpioc else CPPINCLUDES = -I$(PIO) FCINCLUDES = -I$(PIO) LIBS = -L$(PIO) -lpio endif +endif ifneq "$(PNETCDF)" "" CPPINCLUDES += -I$(PNETCDF)/include @@ -313,6 +436,11 @@ ifneq "$(NETCDF)" "" ifneq ($(wildcard $(NETCDF)/lib/libnetcdff.*), ) # CHECK FOR NETCDF4 LIBS += $(NCLIBF) endif # CHECK FOR NETCDF4 + ifneq "$(NETCDFF)" "" + FCINCLUDES += -I$(NETCDFF)/include + LIBS += -L$(NETCDFF)/lib + LIBS += $(NCLIBF) + endif LIBS += $(NCLIB) endif @@ -372,6 +500,25 @@ SFC=$(FC_SERIAL) SCC=$(CC_SERIAL) PARALLEL_MESSAGE="Parallel version is on." +ifeq "$(OPENMP)" "true" + FFLAGS += $(FFLAGS_OMP) + CFLAGS += $(CFLAGS_OMP) + CXXFLAGS += $(CFLAGS_OMP) + override CPPFLAGS += "-DMPAS_OPENMP" + LDFLAGS += $(FFLAGS_OMP) +endif #OPENMP IF + +ifeq "$(PRECISION)" "single" + FFLAGS += "-DSINGLE_PRECISION" + CFLAGS += "-DSINGLE_PRECISION" + CXXFLAGS += "-DSINGLE_PRECISION" + override CPPFLAGS += "-DSINGLE_PRECISION" + PRECISION_MESSAGE="MPAS was built with default single-precision reals." +else + FFLAGS += $(FFLAGS_PROMOTION) + PRECISION_MESSAGE="MPAS was built with default double-precision reals." +endif #PRECISION IF + ifeq "$(USE_PAPI)" "true" CPPINCLUDES += -I$(PAPI)/include -D_PAPI FCINCLUDES += -I$(PAPI)/include @@ -381,9 +528,40 @@ else # USE_PAPI IF PAPI_MESSAGE="Papi libraries are off." endif # USE_PAPI IF +ifeq "$(USE_PIO2)" "true" + PIO_MESSAGE="Using the PIO 2 library." +else # USE_PIO2 IF + PIO_MESSAGE="Using the PIO 1.x library." +endif # USE_PIO2 IF + +ifdef TIMER_LIB +ifeq "$(TIMER_LIB)" "tau" + override TAU=true + TIMER_MESSAGE="TAU is being used for the timer interface" +endif + +ifeq "$(TIMER_LIB)" "gptl" + override CPPFLAGS += -DMPAS_GPTL_TIMERS + override FCINCLUDES += -I${GPTL}/include + override LIBS += -L${GPTL}/lib -lgptl + TIMER_MESSAGE="GPTL is being used for the timer interface" +endif + +ifeq "$(TIMER_LIB)" "" + override CPPFLAGS += -DMPAS_NATIVE_TIMERS + TIMER_MESSAGE="The native timer interface is being used" +endif + +else # else ifdef $(TIMER_LIB) + + override CPPFLAGS += -DMPAS_NATIVE_TIMERS + TIMER_MESSAGE="The native timer interface is being used" + +endif # endif ifdef $(TIMER_LIB) + ifeq "$(TAU)" "true" LINKER=tau_f90.sh - CPPINCLUDES += -DMPAS_TAU + CPPINCLUDES += -DMPAS_TAU -DMPAS_TAU_TIMERS TAU_MESSAGE="TAU Hooks are on." else LINKER=$(FC) @@ -397,6 +575,12 @@ else GEN_F90_MESSAGE="MPAS was built with .F files." endif +ifeq "$(OPENMP)" "true" + OPENMP_MESSAGE="MPAS was built with OpenMP enabled." +else + OPENMP_MESSAGE="MPAS was built without OpenMP support." +endif + ifneq ($(wildcard .mpas_core_*), ) # CHECK FOR BUILT CORE ifneq ($(wildcard .mpas_core_$(CORE)), ) # CHECK FOR SAME CORE AS ATTEMPTED BUILD. @@ -450,11 +634,14 @@ endif # END OF GIT DESCRIBE VERSION # Section for adding external libraries and includes #################################################### ifdef MPAS_EXTERNAL_LIBS - LIBS += $(MPAS_EXTERNAL_LIBS) + override LIBS += $(MPAS_EXTERNAL_LIBS) endif ifdef MPAS_EXTERNAL_INCLUDES - CPPINCLUDES += $(MPAS_EXTERNAL_INCLUDES) - FCINCLUDES += $(MPAS_EXTERNAL_INCLUDES) + override CPPINCLUDES += $(MPAS_EXTERNAL_INCLUDES) + override FCINCLUDES += $(MPAS_EXTERNAL_INCLUDES) +endif +ifdef MPAS_EXTERNAL_CPPFLAGS + override CPPFLAGS += $(MPAS_EXTERNAL_CPPFLAGS) endif #################################################### @@ -478,7 +665,24 @@ endif endif -mpas_main: +compiler_test: +ifeq "$(OPENMP)" "true" + @echo "Testing compiler for OpenMP support" + @echo "#include " > conftest.c; echo "int main() { int n = omp_get_num_threads(); return 0; }" >> conftest.c; $(SCC) $(CFLAGS) -o conftest.out conftest.c || \ + (echo "$(SCC) does not support OpenMP - see INSTALL in top-level directory for more information"; rm -fr conftest.*; exit 1) + @echo "#include " > conftest.c; echo "int main() { int n = omp_get_num_threads(); return 0; }" >> conftest.c; $(CC) $(CFLAGS) -o conftest.out conftest.c || \ + (echo "$(CC) does not support OpenMP - see INSTALL in top-level directory for more information"; rm -fr conftest.*; exit 1) + @echo "#include " > conftest.cpp; echo "int main() { int n = omp_get_num_threads(); return 0; }" >> conftest.cpp; $(CXX) $(CFLAGS) -o conftest.out conftest.cpp || \ + (echo "$(CXX) does not support OpenMP - see INSTALL in top-level directory for more information"; rm -fr conftest.*; exit 1) + @echo "program test; use omp_lib; integer n; n = OMP_GET_NUM_THREADS(); stop 0; end program" > conftest.f90; $(SFC) $(FFLAGS) -o conftest.out conftest.f90 || \ + (echo "$(SFC) does not support OpenMP - see INSTALL in top-level directory for more information"; rm -fr conftest.*; exit 1) + @echo "program test; use omp_lib; integer n; n = OMP_GET_NUM_THREADS(); stop 0; end program" > conftest.f90; $(FC) $(FFLAGS) -o conftest.out conftest.f90 || \ + (echo "$(FC) does not support OpenMP - see INSTALL in top-level directory for more information"; rm -fr conftest.*; exit 1) + @rm -fr conftest.* +endif + + +mpas_main: compiler_test ifeq "$(AUTOCLEAN)" "true" $(RM) .mpas_core_* endif @@ -508,14 +712,18 @@ endif if [ -e src/$(EXE_NAME) ]; then mv src/$(EXE_NAME) .; fi ( cd src/core_$(CORE); $(MAKE) ROOT_DIR="$(PWD)" post_build ) @echo "*******************************************************************************" + @echo $(PRECISION_MESSAGE) @echo $(DEBUG_MESSAGE) @echo $(PARALLEL_MESSAGE) @echo $(PAPI_MESSAGE) @echo $(TAU_MESSAGE) + @echo $(OPENMP_MESSAGE) ifeq "$(AUTOCLEAN)" "true" @echo $(AUTOCLEAN_MESSAGE) endif @echo $(GEN_F90_MESSAGE) + @echo $(TIMER_MESSAGE) + @echo $(PIO_MESSAGE) @echo "*******************************************************************************" clean: cd src; $(MAKE) clean RM="$(RM)" CORE="$(CORE)" @@ -586,6 +794,13 @@ errmsg: @echo " TAU=true - builds version using TAU hooks for profiling. Default is off." @echo " AUTOCLEAN=true - forces a clean of infrastructure prior to build new core." @echo " GEN_F90=true - Generates intermediate .f90 files through CPP, and builds with them." + @echo " TIMER_LIB=opt - Selects the timer library interface to be used for profiling the model. Options are:" + @echo " TIMER_LIB=native - Uses native built-in timers in MPAS" + @echo " TIMER_LIB=gptl - Uses gptl for the timer interface instead of the native interface" + @echo " TIMER_LIB=tau - Uses TAU for the timer interface instead of the native interface" + @echo " OPENMP=true - builds and links with OpenMP flags. Default is to not use OpenMP." + @echo " USE_PIO2=true - links with the PIO 2 library. Default is to use the PIO 1.x library." + @echo " PRECISION=single - builds with default single-precision real kind. Default is to use double-precision." @echo "" @echo "Ensure that NETCDF, PNETCDF, PIO, and PAPI (if USE_PAPI=true) are environment variables" @echo "that point to the absolute paths for the libraries." diff --git a/README.md b/README.md index 2d292a698c..6484abe1e6 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -MPAS-v4.0 +MPAS-v5.0 ==== The Model for Prediction Across Scales (MPAS) is a collaborative project for diff --git a/src/Makefile b/src/Makefile index 549553d638..cc0cc020d9 100644 --- a/src/Makefile +++ b/src/Makefile @@ -25,7 +25,7 @@ drver: $(AUTOCLEAN_DEPS) externals frame ops dycore endif build_tools: externals - (cd tools; $(MAKE) CPPFLAGS="$(CPPFLAGS)" CC="$(SCC)" ) + (cd tools; $(MAKE) CPPFLAGS="$(CPPFLAGS)" CC="$(SCC)" CFLAGS="$(CFLAGS)") frame: $(AUTOCLEAN_DEPS) externals ( cd framework; $(MAKE) CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)" all ) diff --git a/src/Makefile.in.ACME b/src/Makefile.in.ACME index a25971b524..3e5f6821a9 100644 --- a/src/Makefile.in.ACME +++ b/src/Makefile.in.ACME @@ -1,3 +1,18 @@ +# Duplicate logic from Tools/Makefile to set compile_threaded +compile_threaded = false +ifeq ($(strip $(SMP)),TRUE) + compile_threaded = true + THREADDIR = threads +else + ifeq ($(strip $(BUILD_THREADED)),TRUE) + compile_threaded = true + THREADDIR = threads + else + THREADDIR = nothreads + endif +endif +# End duplicated logic + include $(CASEROOT)/Macros ifneq ($(wildcard core_$(CORE)/build_options.mk), ) # Check for build_options.mk @@ -26,15 +41,16 @@ RM = rm -f CPP = cpp -P -traditional FC=$(MPIFC) CC=$(MPICC) +CXX=$(MPICXX) NETCDF=$(NETCDF_PATH) PNETCDF=$(PNETCDF_PATH) PIO=$(EXEROOT)/pio FILE_OFFSET = -DOFFSET64BIT -CFLAGS += -DMPAS_NO_LOG_REDIRECT -DMPAS_NO_ESMF_INIT -DMPAS_ESM_SHR_CONST -FFLAGS += -DMPAS_NO_LOG_REDIRECT -DMPAS_NO_ESMF_INIT -DMPAS_ESM_SHR_CONST -CPPFLAGS += $(CPPDEFS) $(MODEL_FORMULATION) $(FILE_OFFSET) $(ZOLTAN_DEFINE) -DMPAS_NO_LOG_REDIRECT -DMPAS_NO_ESMF_INIT -DMPAS_ESM_SHR_CONST -D_MPI -DMPAS_NAMELIST_SUFFIX=$(NAMELIST_SUFFIX) -DMPAS_EXE_NAME=$(EXE_NAME) -CPPINCLUDES += -I$(EXEROOT)/$(COMPONENT)/source/inc -I$(SHAREDPATH)/include -I$(SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/$(NINST_VALUE)/csm_share -I$(NETCDF)/include -I$(PIO) -I$(PNETCDF)/include -FCINCLUDES += -I$(EXEROOT)/$(COMPONENT)/source/inc -I$(SHAREDPATH)/include -I$(SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/$(NINST_VALUE)/csm_share -I$(NETCDF)/include -I$(PIO) -I$(PNETCDF)/include +override CFLAGS += -DMPAS_NO_LOG_REDIRECT -DMPAS_NO_ESMF_INIT -DMPAS_ESM_SHR_CONST -DMPAS_PERF_MOD_TIMERS +override FFLAGS += -DMPAS_NO_LOG_REDIRECT -DMPAS_NO_ESMF_INIT -DMPAS_ESM_SHR_CONST -DMPAS_PERF_MOD_TIMERS +override CPPFLAGS += $(CPPDEFS) $(MODEL_FORMULATION) $(FILE_OFFSET) $(ZOLTAN_DEFINE) -DMPAS_NO_LOG_REDIRECT -DMPAS_NO_ESMF_INIT -DMPAS_ESM_SHR_CONST -D_MPI -DMPAS_NAMELIST_SUFFIX=$(NAMELIST_SUFFIX) -DMPAS_EXE_NAME=$(EXE_NAME) -DMPAS_PERF_MOD_TIMERS +override CPPINCLUDES += -I$(EXEROOT)/$(COMPONENT)/source/inc -I$(SHAREDPATH)/include -I$(SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/$(NINST_VALUE)/csm_share -I$(NETCDF)/include -I$(PIO) -I$(PNETCDF)/include +override FCINCLUDES += -I$(EXEROOT)/$(COMPONENT)/source/inc -I$(SHAREDPATH)/include -I$(SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/$(NINST_VALUE)/csm_share -I$(NETCDF)/include -I$(PIO) -I$(PNETCDF)/include LIBS += -L$(PIO) -L$(PNETCDF)/lib -L$(NETCDF)/lib -L$(LIBROOT) -L$(SHAREDPATH)/lib -lpio -lpnetcdf -lnetcdf ifneq (,$(findstring FORTRANUNDERSCORE, $(CPPFLAGS))) @@ -47,11 +63,15 @@ ifeq ($(DEBUG), TRUE) override CPPFLAGS += -DMPAS_DEBUG endif +ifeq ($(compile_threaded), true) + override CPPFLAGS += -DMPAS_OPENMP +endif + all: @echo $(CPPINCLUDES) @echo $(FCINCLUDES) ( $(MAKE) mpas RM="$(RM)" CPP="$(CPP)" NETCDF="$(NETCDF)" PNETCDF="$(PNETCDF)" \ - PIO="$(PIO)" FC="$(FC)" CC="$(CC)" SFC="$(SFC)" SCC="$(SCC)" \ + PIO="$(PIO)" FC="$(FC)" CC="$(CC)" CXX="$(CXX)" SFC="$(SFC)" SCC="$(SCC)" \ CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)" FCINCLUDES="$(FCINCLUDES)" \ FFLAGS="$(FFLAGS)" CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" ) @@ -63,7 +83,7 @@ mpas: externals frame ops dycore drver ar ru lib$(COMPONENT).a $(DRIVER)/*.o externals: - ( cd external; $(MAKE) FC="$(FC)" SFC="$(SFC)" CC="$(CC)" SCC="$(SCC)" FFLAGS="$(FFLAGS)" CFLAGS="$(CFLAGS)" CPP="$(CPP)" NETCDF="$(NETCDF)" CORE="$(CORE)" ezxml-lib ) + ( cd external; $(MAKE) FC="$(FC)" SFC="$(SFC)" CC="$(CC)" CXX="$(CXX)" SCC="$(SCC)" FFLAGS="$(FFLAGS)" CFLAGS="$(CFLAGS)" CPP="$(CPP)" NETCDF="$(NETCDF)" CORE="$(CORE)" ezxml-lib ) drver: externals frame ops dycore ( cd $(DRIVER); $(MAKE) CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)" FREEFLAGS="$(FREEFLAGS)" all ) diff --git a/src/Makefile.in.CESM b/src/Makefile.in.CESM index a25971b524..3e5f6821a9 100644 --- a/src/Makefile.in.CESM +++ b/src/Makefile.in.CESM @@ -1,3 +1,18 @@ +# Duplicate logic from Tools/Makefile to set compile_threaded +compile_threaded = false +ifeq ($(strip $(SMP)),TRUE) + compile_threaded = true + THREADDIR = threads +else + ifeq ($(strip $(BUILD_THREADED)),TRUE) + compile_threaded = true + THREADDIR = threads + else + THREADDIR = nothreads + endif +endif +# End duplicated logic + include $(CASEROOT)/Macros ifneq ($(wildcard core_$(CORE)/build_options.mk), ) # Check for build_options.mk @@ -26,15 +41,16 @@ RM = rm -f CPP = cpp -P -traditional FC=$(MPIFC) CC=$(MPICC) +CXX=$(MPICXX) NETCDF=$(NETCDF_PATH) PNETCDF=$(PNETCDF_PATH) PIO=$(EXEROOT)/pio FILE_OFFSET = -DOFFSET64BIT -CFLAGS += -DMPAS_NO_LOG_REDIRECT -DMPAS_NO_ESMF_INIT -DMPAS_ESM_SHR_CONST -FFLAGS += -DMPAS_NO_LOG_REDIRECT -DMPAS_NO_ESMF_INIT -DMPAS_ESM_SHR_CONST -CPPFLAGS += $(CPPDEFS) $(MODEL_FORMULATION) $(FILE_OFFSET) $(ZOLTAN_DEFINE) -DMPAS_NO_LOG_REDIRECT -DMPAS_NO_ESMF_INIT -DMPAS_ESM_SHR_CONST -D_MPI -DMPAS_NAMELIST_SUFFIX=$(NAMELIST_SUFFIX) -DMPAS_EXE_NAME=$(EXE_NAME) -CPPINCLUDES += -I$(EXEROOT)/$(COMPONENT)/source/inc -I$(SHAREDPATH)/include -I$(SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/$(NINST_VALUE)/csm_share -I$(NETCDF)/include -I$(PIO) -I$(PNETCDF)/include -FCINCLUDES += -I$(EXEROOT)/$(COMPONENT)/source/inc -I$(SHAREDPATH)/include -I$(SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/$(NINST_VALUE)/csm_share -I$(NETCDF)/include -I$(PIO) -I$(PNETCDF)/include +override CFLAGS += -DMPAS_NO_LOG_REDIRECT -DMPAS_NO_ESMF_INIT -DMPAS_ESM_SHR_CONST -DMPAS_PERF_MOD_TIMERS +override FFLAGS += -DMPAS_NO_LOG_REDIRECT -DMPAS_NO_ESMF_INIT -DMPAS_ESM_SHR_CONST -DMPAS_PERF_MOD_TIMERS +override CPPFLAGS += $(CPPDEFS) $(MODEL_FORMULATION) $(FILE_OFFSET) $(ZOLTAN_DEFINE) -DMPAS_NO_LOG_REDIRECT -DMPAS_NO_ESMF_INIT -DMPAS_ESM_SHR_CONST -D_MPI -DMPAS_NAMELIST_SUFFIX=$(NAMELIST_SUFFIX) -DMPAS_EXE_NAME=$(EXE_NAME) -DMPAS_PERF_MOD_TIMERS +override CPPINCLUDES += -I$(EXEROOT)/$(COMPONENT)/source/inc -I$(SHAREDPATH)/include -I$(SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/$(NINST_VALUE)/csm_share -I$(NETCDF)/include -I$(PIO) -I$(PNETCDF)/include +override FCINCLUDES += -I$(EXEROOT)/$(COMPONENT)/source/inc -I$(SHAREDPATH)/include -I$(SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/$(NINST_VALUE)/csm_share -I$(NETCDF)/include -I$(PIO) -I$(PNETCDF)/include LIBS += -L$(PIO) -L$(PNETCDF)/lib -L$(NETCDF)/lib -L$(LIBROOT) -L$(SHAREDPATH)/lib -lpio -lpnetcdf -lnetcdf ifneq (,$(findstring FORTRANUNDERSCORE, $(CPPFLAGS))) @@ -47,11 +63,15 @@ ifeq ($(DEBUG), TRUE) override CPPFLAGS += -DMPAS_DEBUG endif +ifeq ($(compile_threaded), true) + override CPPFLAGS += -DMPAS_OPENMP +endif + all: @echo $(CPPINCLUDES) @echo $(FCINCLUDES) ( $(MAKE) mpas RM="$(RM)" CPP="$(CPP)" NETCDF="$(NETCDF)" PNETCDF="$(PNETCDF)" \ - PIO="$(PIO)" FC="$(FC)" CC="$(CC)" SFC="$(SFC)" SCC="$(SCC)" \ + PIO="$(PIO)" FC="$(FC)" CC="$(CC)" CXX="$(CXX)" SFC="$(SFC)" SCC="$(SCC)" \ CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)" FCINCLUDES="$(FCINCLUDES)" \ FFLAGS="$(FFLAGS)" CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" ) @@ -63,7 +83,7 @@ mpas: externals frame ops dycore drver ar ru lib$(COMPONENT).a $(DRIVER)/*.o externals: - ( cd external; $(MAKE) FC="$(FC)" SFC="$(SFC)" CC="$(CC)" SCC="$(SCC)" FFLAGS="$(FFLAGS)" CFLAGS="$(CFLAGS)" CPP="$(CPP)" NETCDF="$(NETCDF)" CORE="$(CORE)" ezxml-lib ) + ( cd external; $(MAKE) FC="$(FC)" SFC="$(SFC)" CC="$(CC)" CXX="$(CXX)" SCC="$(SCC)" FFLAGS="$(FFLAGS)" CFLAGS="$(CFLAGS)" CPP="$(CPP)" NETCDF="$(NETCDF)" CORE="$(CORE)" ezxml-lib ) drver: externals frame ops dycore ( cd $(DRIVER); $(MAKE) CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)" FREEFLAGS="$(FREEFLAGS)" all ) diff --git a/src/core_atmosphere/Makefile b/src/core_atmosphere/Makefile index 99482b6327..ca73840fc5 100644 --- a/src/core_atmosphere/Makefile +++ b/src/core_atmosphere/Makefile @@ -5,9 +5,10 @@ PHYSICS=-DDO_PHYSICS OBJS = mpas_atm_core.o \ mpas_atm_core_interface.o \ - mpas_atm_interp_diagnostics.o + mpas_atm_dimensions.o \ + mpas_atm_threading.o -all: physcore dycore atmcore +all: physcore dycore diagcore atmcore utilities core_reg: $(CPP) $(CPPFLAGS) $(CPPINCLUDES) Registry.xml > Registry_processed.xml @@ -27,25 +28,35 @@ post_build: cp default_inputs/* $(ROOT_DIR)/default_inputs/. ( cd $(ROOT_DIR)/default_inputs; for FILE in `ls -1`; do if [ ! -e ../$$FILE ]; then cp $$FILE ../.; fi; done ) -physcore: +physcore: mpas_atm_dimensions.o ( cd physics; $(MAKE) all ) ( mkdir libphys; cd libphys; ar -x ../physics/libphys.a ) ( cd ../..; ln -sf ./src/core_atmosphere/physics/physics_wrf/files/*TBL .) ( cd ../..; ln -sf ./src/core_atmosphere/physics/physics_wrf/files/*DATA* .) -dycore: +dycore: mpas_atm_dimensions.o ( cd dynamics; $(MAKE) all PHYSICS="$(PHYSICS)" ) -atmcore: physcore dycore $(OBJS) - ar -ru libdycore.a $(OBJS) dynamics/*.o libphys/*.o +diagcore: physcore dycore + ( cd diagnostics; $(MAKE) all ) + +utilities: physcore + ( cd utils; $(MAKE) all ) + +atmcore: physcore dycore diagcore $(OBJS) + ar -ru libdycore.a $(OBJS) dynamics/*.o libphys/*.o diagnostics/*.o mpas_atm_core_interface.o: mpas_atm_core.o -mpas_atm_core.o: dycore mpas_atm_interp_diagnostics.o +mpas_atm_core.o: dycore mpas_atm_threading.o + +mpas_atm_dimensions.o: clean: ( cd physics; $(MAKE) clean ) ( cd dynamics; $(MAKE) clean ) + ( cd diagnostics; $(MAKE) clean ) + ( cd utils; $(MAKE) clean ) ( cd ../..; rm -f *TBL ) ( cd ../..; rm -f *DATA* ) $(RM) -r libphys @@ -60,7 +71,7 @@ clean: $(RM) $@ $*.mod ifeq "$(GEN_F90)" "true" $(CPP) $(CPPFLAGS) $(PHYSICS) $(CPPINCLUDES) $< > $*.f90 - $(FC) $(FFLAGS) -c $*.f90 $(FCINCLUDES) -I../framework -I../operators -I./physics -I./dynamics -I./physics/physics_wrf -I../external/esmf_time_f90 + $(FC) $(FFLAGS) -c $*.f90 $(FCINCLUDES) -I../framework -I../operators -I./physics -I./dynamics -I./diagnostics -I./physics/physics_wrf -I../external/esmf_time_f90 else - $(FC) $(CPPFLAGS) $(PHYSICS) $(FFLAGS) -c $*.F $(CPPINCLUDES) $(FCINCLUDES) -I../framework -I../operators -I./physics -I./dynamics -I./physics/physics_wrf -I../external/esmf_time_f90 + $(FC) $(CPPFLAGS) $(PHYSICS) $(FFLAGS) -c $*.F $(CPPINCLUDES) $(FCINCLUDES) -I../framework -I../operators -I./physics -I./dynamics -I./diagnostics -I./physics/physics_wrf -I../external/esmf_time_f90 endif diff --git a/src/core_atmosphere/Registry.xml b/src/core_atmosphere/Registry.xml index a908908ed5..253970d1b0 100644 --- a/src/core_atmosphere/Registry.xml +++ b/src/core_atmosphere/Registry.xml @@ -1,31 +1,51 @@ - + - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + @@ -34,73 +54,312 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + - - - + + + + + - - - - + + + + + + + - - + + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -162,7 +421,7 @@ - + @@ -193,7 +452,6 @@ - @@ -207,6 +465,11 @@ + + + + + @@ -221,6 +484,7 @@ + - - - + - - @@ -301,6 +561,13 @@ + + + + + + + @@ -312,7 +579,6 @@ - @@ -329,7 +595,6 @@ - @@ -338,11 +603,11 @@ - + @@ -351,6 +616,18 @@ + + + + + + + + + + + + @@ -429,7 +706,7 @@ - + @@ -454,6 +731,14 @@ + + + + + + + + @@ -461,6 +746,7 @@ + @@ -469,6 +755,7 @@ + @@ -508,6 +795,13 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -597,7 +912,6 @@ - @@ -620,26 +934,99 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -662,133 +1061,346 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + - - - + + + - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + - + + + + - - - + + + - - - - - + + + + + + + + + - - + + + - - - + + + + + - + - + - - - - - + + + + + + + + + + - - - - - - + + + + + + + + + + + + + + + - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + @@ -796,146 +1408,273 @@ - - - - - - - - + + + + + + + + + + + + + + + - + - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + - - - - - - + + + + + + + + + + + + + + + - @@ -944,397 +1683,872 @@ - - - - + + + + + + + - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - + - - - - + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - + - - - - + + + + + + + + + - - - - - - + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + + + + + - - - - + + + - - - - + - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + description="effective radius of cloud ice crystals calculated in RRTMG radiation"/> @@ -1342,79 +2556,139 @@ - - - + + + + + - - + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -1422,48 +2696,88 @@ - - - - - - - - + + + + + + + + - - - + + - - + - - - - - - - + + + + + + + + + + + - - - - - - - - - - - - + + + + + + + + + + + + + + + + @@ -1472,104 +2786,182 @@ - - + + + + - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#include "diagnostics/Registry_diagnostics.xml" + diff --git a/src/core_atmosphere/diagnostics/Makefile b/src/core_atmosphere/diagnostics/Makefile new file mode 100644 index 0000000000..9d83d39c6b --- /dev/null +++ b/src/core_atmosphere/diagnostics/Makefile @@ -0,0 +1,44 @@ +.SUFFIXES: .F .o + +# +# Add new diagnostic modules to DIAGNOSTIC_MODULES +# +DIAGNOSTIC_MODULES = \ + mpas_atm_diagnostic_template.o \ + isobaric_diagnostics.o \ + convective_diagnostics.o \ + pv_diagnostics.o \ + soundings.o \ + +isobaric_diagnostics.o: mpas_atm_diagnostics_utils.o + +convective_diagnostics.o: mpas_atm_diagnostics_utils.o + +pv_diagnostics.o: mpas_atm_diagnostics_utils.o + +soundings.o: + + +################### Generally no need to modify below here ################### + + +OBJS = mpas_atm_diagnostics_manager.o mpas_atm_diagnostics_utils.o + +all: $(DIAGNOSTIC_MODULS) $(OBJS) + +mpas_atm_diagnostics_manager.o: mpas_atm_diagnostics_utils.o $(DIAGNOSTIC_MODULES) + + +clean: + $(RM) *.o *.mod *.f90 + @# Some Intel compilers generate *.i files; clean them up, too + $(RM) *.i + +.F.o: + $(RM) $@ $*.mod +ifeq "$(GEN_F90)" "true" + $(CPP) $(CPPFLAGS) $(PHYSICS) $(CPPINCLUDES) $< > $*.f90 + $(FC) $(FFLAGS) -c $*.f90 $(FCINCLUDES) -I../../framework -I../../operators -I../dynamics -I../physics -I../physics/physics_wrf -I../../external/esmf_time_f90 +else + $(FC) $(CPPFLAGS) $(PHYSICS) $(FFLAGS) -c $*.F $(CPPINCLUDES) $(FCINCLUDES) -I../../framework -I../../operators -I../dynamics -I../physics -I../physics/physics_wrf -I../../external/esmf_time_f90 +endif diff --git a/src/core_atmosphere/diagnostics/README b/src/core_atmosphere/diagnostics/README new file mode 100644 index 0000000000..e7ed654859 --- /dev/null +++ b/src/core_atmosphere/diagnostics/README @@ -0,0 +1,22 @@ +This document summarizes the steps that are required to add a new diagnostic +to MPAS-Atmosphere. The "tempate" files may be used as a guide for what is +generally required to implement a diagnostic. + +1) Define namelist options, dimensions, packages, and fields needed by + the diagnostic in a new Registry_.xml file. + + Add a #include statement for this new Registry file in + Registry_diagnostics.xml. + +2) Create a new module for the diagnostic; the "mpas_atm_diagnostic_template.F" + module file may be used as a template. + +3) Add calls to the diagnostic's "setup", "update", "compute", "reset", and + "cleanup" routines in the main diagnostic driver. Note that some diagnostics + may not require all of these routines, in which case they can simply be + omitted. + +3) Add the object file for the new diagnostic module to the definition of + DIAGNOSTIC_MODULES in the Makefile, and add a dependency on the new module + for the diagnostic driver. If the diagnostic has any other dependencies + within the diagnostics/ subdirectory, specify those as well in the Makefile. diff --git a/src/core_atmosphere/diagnostics/Registry_convective.xml b/src/core_atmosphere/diagnostics/Registry_convective.xml new file mode 100644 index 0000000000..59ad1c2f58 --- /dev/null +++ b/src/core_atmosphere/diagnostics/Registry_convective.xml @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/core_atmosphere/diagnostics/Registry_diagnostics.xml b/src/core_atmosphere/diagnostics/Registry_diagnostics.xml new file mode 100644 index 0000000000..8d2b815842 --- /dev/null +++ b/src/core_atmosphere/diagnostics/Registry_diagnostics.xml @@ -0,0 +1,21 @@ + + + + +#include "Registry_template.xml" + + +#include "Registry_isobaric.xml" + + +#include "Registry_convective.xml" + + +#include "Registry_pv.xml" + + +#include "Registry_soundings.xml" + + + + diff --git a/src/core_atmosphere/diagnostics/Registry_isobaric.xml b/src/core_atmosphere/diagnostics/Registry_isobaric.xml new file mode 100644 index 0000000000..daa758706b --- /dev/null +++ b/src/core_atmosphere/diagnostics/Registry_isobaric.xml @@ -0,0 +1,178 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/core_atmosphere/diagnostics/Registry_pv.xml b/src/core_atmosphere/diagnostics/Registry_pv.xml new file mode 100644 index 0000000000..fdf5d3b674 --- /dev/null +++ b/src/core_atmosphere/diagnostics/Registry_pv.xml @@ -0,0 +1,66 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/core_atmosphere/diagnostics/Registry_soundings.xml b/src/core_atmosphere/diagnostics/Registry_soundings.xml new file mode 100644 index 0000000000..a04efb6b03 --- /dev/null +++ b/src/core_atmosphere/diagnostics/Registry_soundings.xml @@ -0,0 +1,12 @@ + + + + + + + + + diff --git a/src/core_atmosphere/diagnostics/Registry_template.xml b/src/core_atmosphere/diagnostics/Registry_template.xml new file mode 100644 index 0000000000..c50af5ac11 --- /dev/null +++ b/src/core_atmosphere/diagnostics/Registry_template.xml @@ -0,0 +1,5 @@ + + + + + diff --git a/src/core_atmosphere/diagnostics/convective_diagnostics.F b/src/core_atmosphere/diagnostics/convective_diagnostics.F new file mode 100644 index 0000000000..dd5f8eed6c --- /dev/null +++ b/src/core_atmosphere/diagnostics/convective_diagnostics.F @@ -0,0 +1,1101 @@ +! Copyright (c) 2016, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +module convective_diagnostics + + use mpas_derived_types, only : MPAS_pool_type, MPAS_clock_type + use mpas_kind_types, only : RKIND + + type (MPAS_pool_type), pointer :: mesh + type (MPAS_pool_type), pointer :: state + type (MPAS_pool_type), pointer :: diag + + type (MPAS_clock_type), pointer :: clock + + public :: convective_diagnostics_setup, & + convective_diagnostics_update, & + convective_diagnostics_compute, & + convective_diagnostics_reset + + private + + ! + ! For any fields where we need the min/max/mean, it is helpful to determine + ! in the setup routine whether these fields will actually appear in any + ! output streams; if so, we need to handle the field in the update/reset + ! routines, but if not, we can save some computation + ! + logical :: is_needed_updraft_helicity + logical :: is_needed_w_max + logical :: is_needed_lml_wsp_max + + + contains + + + !----------------------------------------------------------------------- + ! routine convective_diagnostics_setup + ! + !> \brief Set-up the convective diagnostics module + !> \author Michael Duda + !> \date 14 October 2016 + !> \details + !> To avoid later work in dereferencing pointers to various pools, + !> this routine saves pool pointers for use by + !> the convective_diagnostics_compute routine. + ! + !----------------------------------------------------------------------- + subroutine convective_diagnostics_setup(all_pools, simulation_clock) + + use mpas_derived_types, only : MPAS_pool_type, MPAS_clock_type, MPAS_STREAM_OUTPUT, MPAS_STREAM_INPUT, & + MPAS_STREAM_INPUT_OUTPUT + use mpas_pool_routines, only : mpas_pool_get_subpool + use mpas_atm_diagnostics_utils, only : mpas_stream_inclusion_count + + implicit none + + type (MPAS_pool_type), pointer :: all_pools + type (MPAS_clock_type), pointer :: simulation_clock + + call mpas_pool_get_subpool(all_pools, 'mesh', mesh) + call mpas_pool_get_subpool(all_pools, 'state', state) + call mpas_pool_get_subpool(all_pools, 'diag', diag) + + clock => simulation_clock + + is_needed_updraft_helicity = .false. + is_needed_w_max = .false. + is_needed_lml_wsp_max = .false. + + if (mpas_stream_inclusion_count('updraft_helicity_max', direction=MPAS_STREAM_OUTPUT) > 0) then + is_needed_updraft_helicity = .true. + end if + if (mpas_stream_inclusion_count('w_velocity_max', direction=MPAS_STREAM_OUTPUT) > 0) then + is_needed_w_max = .true. + end if + if (mpas_stream_inclusion_count('wind_speed_level1_max', direction=MPAS_STREAM_OUTPUT) > 0) then + is_needed_lml_wsp_max = .true. + end if + + end subroutine convective_diagnostics_setup + + + !----------------------------------------------------------------------- + ! routine convective_diagnostics_update + ! + !> \brief Updates the maximum pointwise values for convective diagnostics + !> \author Michael Duda + !> \date 18 October 2016 + !> \details + !> Updates the maximum pointwise values for updraft helicity, w velocity, + !> and lowest-model-level wind speed. + ! + !----------------------------------------------------------------------- + subroutine convective_diagnostics_update() + + use mpas_pool_routines, only : mpas_pool_get_dimension, mpas_pool_get_array + + implicit none + +! - subroutine to compute max values of convective diagnostics over some time period +! - called at the end of every timestep +! - values must be zeroed out after output, and this determines the period for the max. +! +! WCS, March 2015, for the SPC spring experiment forecasts + + integer :: iCell, k, nVertLevelsP1, i, j + integer, pointer :: nCells, nVertLevels, nCellsSolve, nVertexDegree, nVertices + integer, pointer :: moist_start, moist_end + integer, dimension(:,:), pointer :: cellsOnVertex + real (kind=RKIND), dimension(:,:,:), pointer :: scalars + real (kind=RKIND), dimension(:,:), pointer :: cqu + + real (kind=RKIND), allocatable, dimension(:,:) :: updraft_helicity + real (kind=RKIND), allocatable, dimension(:) :: z_agl + real (kind=RKIND), dimension(:,:), pointer :: kiteAreasOnVertex, vorticity, w, zgrid + real (kind=RKIND), dimension(:), pointer :: areaCell, updraft_helicity_max, w_velocity_max + real (kind=RKIND), dimension(:), pointer :: wind_speed_level1_max + real (kind=RKIND), dimension(:,:), pointer :: uzonal, umeridional + real (kind=RKIND) :: uph + + if (is_needed_updraft_helicity .or. is_needed_w_max .or. is_needed_lml_wsp_max) then + + call mpas_pool_get_dimension(mesh, 'nCells', nCells) + call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) + call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(mesh, 'nVertices', nVertices) + call mpas_pool_get_dimension(mesh, 'vertexDegree', nVertexDegree) + call mpas_pool_get_array(mesh, 'areaCell', areaCell) + call mpas_pool_get_array(mesh, 'kiteAreasOnVertex', kiteAreasOnVertex) + call mpas_pool_get_array(mesh, 'cellsOnVertex', cellsOnVertex) + call mpas_pool_get_array(mesh, 'zgrid', zgrid) + + call mpas_pool_get_array(diag, 'vorticity', vorticity) + call mpas_pool_get_array(diag, 'updraft_helicity_max', updraft_helicity_max) + call mpas_pool_get_array(diag, 'w_velocity_max', w_velocity_max) + call mpas_pool_get_array(diag, 'wind_speed_level1_max', wind_speed_level1_max) + call mpas_pool_get_array(diag, 'uReconstructZonal', uzonal) + call mpas_pool_get_array(diag, 'uReconstructMeridional', umeridional) + + call mpas_pool_get_array(state, 'w', w, 1) + + nVertLevelsP1 = nVertLevels + 1 + + allocate(z_agl(nVertLevelsP1)) +! +! updraft helicity, first compute vorticity at cell center, then mulitply by w +! *** where does cellOnVertex point if cell is outside block and block halo? -> cell nCell+1 +! + if (is_needed_w_max) then + do iCell=1, nCellsSolve + w_velocity_max(iCell) = max( w_velocity_max(iCell), maxval(w(1:nVertLevels,iCell))) + end do + end if + + if (is_needed_lml_wsp_max) then + do iCell=1, nCellsSolve + wind_speed_level1_max(iCell) = max(wind_speed_level1_max(iCell), sqrt(uzonal(1,iCell)**2+umeridional(1,iCell)**2)) + end do + end if + + if (is_needed_updraft_helicity) then + allocate(updraft_helicity(nVertLevels,nCells+1)) + updraft_helicity(:,:) = 0. + do i=1,nVertices + do j=1,nvertexDegree + iCell = cellsOnVertex(j,i) + updraft_helicity(1:nVertLevels,iCell) = updraft_helicity(1:nVertLevels,iCell) + kiteAreasOnVertex(j,i)*vorticity(1:nVertLevels,i) + end do + end do + do iCell=1,nCellsSolve + do k=1,nVertLevels + updraft_helicity(k,iCell) = max(0.,0.5*(w(k,iCell)+w(k+1,iCell))) & + * max(0.,updraft_helicity(k,iCell)/areaCell(iCell)) + end do + end do +! +! compute diagnostics +! + do iCell=1,nCellsSolve + + ! compute above ground level (AGL) heights + z_agl(1:nVertLevelsP1) = zgrid(1:nVertLevelsP1,iCell) - zgrid(1,iCell) + uph = integral_zstaggered(updraft_helicity(1:nVertLevels,iCell),z_agl,2000.,5000.,nVertLevels,nVertLevelsP1) + updraft_helicity_max(iCell) = max( updraft_helicity_max(iCell),uph) + + end do + deallocate(updraft_helicity) + end if + + deallocate(z_agl) + end if + + end subroutine convective_diagnostics_update + + + !----------------------------------------------------------------------- + ! routine convective_diagnostics_compute + ! + !> \brief Computes convective diagnostic + !> \author Michael Duda + !> \date 14 October 2016 + !> \details + !> This routine computes several diagnostics used in Spring Experiment + !> runs and originally added by WCS in March 2015. + !> The following fields are computed by this routine: + !> cape + !> cin + !> lcl + !> lfc + !> srh_0_1km + !> srh_0_3km + !> uzonal_surface + !> uzonal_1km + !> uzonal_6km + !> umeridional_surface + !> umeridional_1km + !> umeridional_6km + !> temperature_surface + !> dewpoint_surface + ! + !----------------------------------------------------------------------- + subroutine convective_diagnostics_compute() + + use mpas_atm_diagnostics_utils, only : MPAS_field_will_be_written + use mpas_constants, only : rvord + use mpas_pool_routines, only : mpas_pool_get_dimension, mpas_pool_get_array + + implicit none + + integer :: iCell, k + integer, pointer :: nCells, nCellsSolve, nVertLevels, nVertLevelsP1 + + ! Fields that are computed in this routine + real (kind=RKIND), dimension(:), pointer :: cape + real (kind=RKIND), dimension(:), pointer :: cin + real (kind=RKIND), dimension(:), pointer :: lcl + real (kind=RKIND), dimension(:), pointer :: lfc + real (kind=RKIND), dimension(:), pointer :: srh_0_1km + real (kind=RKIND), dimension(:), pointer :: srh_0_3km + real (kind=RKIND), dimension(:), pointer :: uzonal_surface + real (kind=RKIND), dimension(:), pointer :: uzonal_1km + real (kind=RKIND), dimension(:), pointer :: uzonal_6km + real (kind=RKIND), dimension(:), pointer :: umeridional_surface + real (kind=RKIND), dimension(:), pointer :: umeridional_1km + real (kind=RKIND), dimension(:), pointer :: umeridional_6km + real (kind=RKIND), dimension(:), pointer :: temperature_surface + real (kind=RKIND), dimension(:), pointer :: dewpoint_surface + + ! Other fields used in the computation of convective diagnostics + ! defined above + real (kind=RKIND), dimension(:,:), pointer :: height + real (kind=RKIND), dimension(:,:), pointer :: uzonal + real (kind=RKIND), dimension(:,:), pointer :: umeridional + real (kind=RKIND), dimension(:,:), pointer :: relhum + real (kind=RKIND), dimension(:,:), pointer :: exner + real (kind=RKIND), dimension(:,:), pointer :: theta_m + real (kind=RKIND), dimension(:,:), pointer :: pressure_p + real (kind=RKIND), dimension(:,:), pointer :: pressure_base + real (kind=RKIND), dimension(:,:,:), pointer :: scalars + integer, pointer :: index_qv + + real (kind=RKIND), parameter :: dev_motion = 7.5, z_bunker_bot = 0., z_bunker_top = 6000. + real (kind=RKIND) :: u_storm, v_storm, u_srh_bot, v_srh_bot, u_srh_top, v_srh_top + real (kind=RKIND) :: u_mean, v_mean, u_shear, v_shear, shear_magnitude + real (kind=RKIND) :: b_term, cape_out, cin_out + real (kind=RKIND), dimension(:), allocatable :: dudz, dvdz, zp + real (kind=RKIND), dimension(:), allocatable :: zrel, srh + real (kind=RKIND), dimension(:), allocatable :: p_in, t_in, td_in + + real (kind=RKIND), dimension(:,:), allocatable :: temperature, dewpoint + + real (kind=RKIND) :: evp + + logical :: need_cape, need_cin, need_lcl, need_lfc, need_srh_01km, need_srh_03km, need_uzonal_sfc, need_uzonal_1km, & + need_uzonal_6km, need_umerid_sfc, need_umerid_1km, need_umerid_6km, need_tsfc, need_tdsfc + logical :: need_any_diags + + need_any_diags = .false. + + need_cape = MPAS_field_will_be_written('cape') + need_any_diags = need_any_diags .or. need_cape + need_cin = MPAS_field_will_be_written('cin') + need_any_diags = need_any_diags .or. need_cin + need_lcl = MPAS_field_will_be_written('lcl') + need_any_diags = need_any_diags .or. need_lcl + need_lfc = MPAS_field_will_be_written('lfc') + need_any_diags = need_any_diags .or. need_lfc + need_srh_01km = MPAS_field_will_be_written('srh_0_1km') + need_any_diags = need_any_diags .or. need_srh_01km + need_srh_03km = MPAS_field_will_be_written('srh_0_3km') + need_any_diags = need_any_diags .or. need_srh_03km + need_uzonal_sfc = MPAS_field_will_be_written('uzonal_surface') + need_any_diags = need_any_diags .or. need_uzonal_sfc + need_uzonal_1km = MPAS_field_will_be_written('uzonal_1km') + need_any_diags = need_any_diags .or. need_uzonal_1km + need_uzonal_6km = MPAS_field_will_be_written('uzonal_6km') + need_any_diags = need_any_diags .or. need_uzonal_6km + need_umerid_sfc = MPAS_field_will_be_written('umeridional_surface') + need_any_diags = need_any_diags .or. need_umerid_sfc + need_umerid_1km = MPAS_field_will_be_written('umeridional_1km') + need_any_diags = need_any_diags .or. need_umerid_1km + need_umerid_6km = MPAS_field_will_be_written('umeridional_6km') + need_any_diags = need_any_diags .or. need_umerid_6km + need_tsfc = MPAS_field_will_be_written('temperature_surface') + need_any_diags = need_any_diags .or. need_tsfc + need_tdsfc = MPAS_field_will_be_written('dewpoint_surface') + need_any_diags = need_any_diags .or. need_tdsfc + + if (need_any_diags) then + + call mpas_pool_get_dimension(mesh, 'nCells', nCells) + call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) + call mpas_pool_get_dimension(mesh, 'nVertLevelsP1', nVertLevelsP1) + + call mpas_pool_get_array(diag, 'cape', cape) + call mpas_pool_get_array(diag, 'cin', cin) + call mpas_pool_get_array(diag, 'lcl', lcl) + call mpas_pool_get_array(diag, 'lfc', lfc) + call mpas_pool_get_array(diag, 'srh_0_1km', srh_0_1km) + call mpas_pool_get_array(diag, 'srh_0_3km', srh_0_3km) + call mpas_pool_get_array(diag, 'uzonal_surface', uzonal_surface) + call mpas_pool_get_array(diag, 'uzonal_1km', uzonal_1km) + call mpas_pool_get_array(diag, 'uzonal_6km', uzonal_6km) + call mpas_pool_get_array(diag, 'umeridional_surface', umeridional_surface) + call mpas_pool_get_array(diag, 'umeridional_1km', umeridional_1km) + call mpas_pool_get_array(diag, 'umeridional_6km', umeridional_6km) + call mpas_pool_get_array(diag, 'temperature_surface', temperature_surface) + call mpas_pool_get_array(diag, 'dewpoint_surface', dewpoint_surface) + + call mpas_pool_get_array(mesh, 'zgrid', height) + call mpas_pool_get_array(diag, 'uReconstructMeridional', umeridional) + call mpas_pool_get_array(diag, 'uReconstructZonal', uzonal) + call mpas_pool_get_array(state, 'theta_m', theta_m, 1) + call mpas_pool_get_array(state, 'scalars', scalars, 1) + call mpas_pool_get_array(diag, 'exner', exner) + call mpas_pool_get_array(diag, 'relhum', relhum) + call mpas_pool_get_array(diag, 'pressure_base', pressure_base) + call mpas_pool_get_array(diag, 'pressure_p', pressure_p) + + call mpas_pool_get_dimension(state, 'index_qv', index_qv) + + allocate(temperature(nVertLevels,nCells)) + allocate(dewpoint(nVertLevels,nCells)) + + allocate(dudz(nVertLevels)) + allocate(dvdz(nVertLevels)) + allocate(zp(nVertLevels)) + allocate(zrel(nVertLevels+1)) + allocate(srh(nVertLevels+1)) + + allocate(p_in(nVertLevels)) + allocate(t_in(nVertLevels)) + allocate(td_in(nVertLevels)) + + do iCell = 1,nCells + do k = 1,nVertLevels + temperature(k,iCell) = (theta_m(k,iCell)/(1._RKIND+rvord*scalars(index_qv,k,iCell)))*exner(k,iCell) + + ! Vapor pressure + evp = 0.01_RKIND * (pressure_base(k,iCell) + pressure_p(k,iCell)) & + * scalars(index_qv,k,iCell) / (scalars(index_qv,k,iCell) + 0.622_RKIND) + evp = max(evp, 1.0e-8_RKIND) + + ! Dewpoint temperature following Bolton (1980) + dewpoint(k,iCell) = (243.5_RKIND * log(evp/6.112_RKIND)) / (17.67_RKIND - log(evp/6.112_RKIND)) + dewpoint(k,iCell) = dewpoint(k,iCell) + 273.15 + enddo + enddo + + + ! first the shear values. We will use lowest model level velocity for surface velocity + do iCell=1,nCellsSolve + + zp(1:nVertLevels) = 0.5*(height(1:nVertLevels,iCell)+height(2:nVertlevels+1,iCell)) - height(1,iCell) + zrel(1:nVertLevels+1) = height(1:nVertLevels+1,iCell) - height(1,iCell) + + uzonal_surface(iCell) = uzonal(1,iCell) + umeridional_surface(iCell) = umeridional(1,iCell) + temperature_surface(iCell) = temperature(1,iCell) + dewpoint_surface(iCell) = dewpoint(1,iCell) + if (need_uzonal_1km) then + uzonal_1km(iCell) = column_height_value(uzonal(1:nVertLevels,iCell), zp, 1000., nVertLevels) + end if + if (need_umerid_1km) then + umeridional_1km(iCell) = column_height_value(umeridional(1:nVertLevels,iCell), zp, 1000., nVertLevels) + end if + if (need_uzonal_6km) then + uzonal_6km(iCell) = column_height_value(uzonal(1:nVertLevels,iCell), zp, 6000., nVertLevels) + end if + if (need_umerid_6km) then + umeridional_6km(iCell) = column_height_value(umeridional(1:nVertLevels,iCell), zp, 6000., nVertLevels) + end if + + ! storm-relative helicity + ! first, calculate storm motion, using Bunkers formula for right-moving storms + + if (need_srh_01km .or. need_srh_03km) then + u_srh_bot = uzonal(1,iCell) + v_srh_bot = umeridional(1,iCell) + if(z_bunker_bot .gt. zp(1)) then + u_srh_bot = column_height_value( uzonal(1:nVertLevels,iCell), zp, z_bunker_bot, nVertLevels) + v_srh_bot = column_height_value( umeridional(1:nVertLevels,iCell), zp, z_bunker_bot, nVertLevels) + end if + u_srh_top = column_height_value( uzonal(1:nVertLevels,iCell), zp, z_bunker_top, nVertLevels) + v_srh_top = column_height_value( umeridional(1:nVertLevels,iCell), zp, z_bunker_top, nVertLevels) + u_shear = u_srh_top - u_srh_bot + v_shear = v_srh_top - v_srh_bot + u_mean = integral_zstaggered(uzonal(1:nVertLevels,iCell),zrel,z_bunker_bot,z_bunker_top,nVertLevels,nVertLevelsP1)/(z_bunker_top-z_bunker_bot) + v_mean = integral_zstaggered(umeridional(1:nVertLevels,iCell),zrel,z_bunker_bot,z_bunker_top,nVertLevels,nVertLevelsP1)/(z_bunker_top-z_bunker_bot) + shear_magnitude = max(0.0001,sqrt(u_shear**2 + v_shear**2)) + u_storm = u_mean + dev_motion * v_shear/shear_magnitude + v_storm = v_mean - dev_motion * u_shear/shear_magnitude + + ! calculate horizontal vorticity + + do k=2, nVertLevels-1 + dudz(k) = (uzonal(k,iCell) -uzonal(k-1,iCell) )/(0.5*(height(k+1,iCell)-height(k-1,iCell))) + dvdz(k) = (umeridional(k,iCell)-umeridional(k-1,iCell))/(0.5*(height(k+1,iCell)-height(k-1,iCell))) + end do + dudz(1) = dudz(2) + dvdz(1) = dvdz(2) + dudz(nVertLevels) = dudz(nVertLevels-1) + dvdz(nVertLevels) = dvdz(nVertLevels-1) + + do k=2,nVertLevels + srh(k) = - (0.5*(uzonal(k,iCell) + uzonal(k-1,iCell) )-u_storm)*dvdz(k) & + + (0.5*(umeridional(k,iCell) + umeridional(k-1,iCell) )-v_storm)*dudz(k) + end do + srh(1) = - (uzonal(1,iCell) - u_storm)*dvdz(1) & + + (umeridional(1,iCell)- v_storm)*dudz(1) + srh(nVertLevelsP1) = srh(nVertLevels) + + do k=1, nVertLevels+1 + srh(k) = max(0.,srh(k)) ! discounting negative SRH + end do + + if (need_srh_01km) then + srh_0_1km(iCell) = integral_zpoint(srh, zrel, 0., 1000., nVertLevelsP1) + end if + if (need_srh_03km) then + srh_0_3km(iCell) = integral_zpoint(srh, zrel, 0., 3000., nVertLevelsP1) + end if + end if + + end do + + ! calculate cape and cin + if (need_cape .or. need_cin) then + do iCell=1, nCellsSolve + p_in(1:nVertLevels) = (pressure_p(1:nVertLevels,iCell) + pressure_base(1:nVertLevels,iCell)) / 100.0_RKIND + t_in(1:nVertLevels) = temperature(1:nVertLevels,iCell) - 273.15 + td_in(1:nVertLevels) = dewpoint(1:nVertLevels,iCell) - 273.15 + + ! do k=1,nVertLevels + ! relhum(k,iCell) = max(1.e-08,min(1.,relhum(k,iCell))) + ! td_in(k) = 243.04*(log(relhum(k,iCell))+((17.625*t_in(k))/(243.04+t_in(k)))) & + ! /(17.625-log(relhum(k,iCell))-((17.625*t_in(k))/(243.04+t_in(k)))) + ! end do + + call getcape( nVertLevels, p_in, t_in, td_in, cape_out, cin_out ) + + cape(iCell) = cape_out + cin(iCell) = cin_out + + end do + end if + + deallocate(temperature) + deallocate(dewpoint) + + deallocate(dudz) + deallocate(dvdz) + deallocate(zp) + deallocate(zrel) + deallocate(srh) + deallocate(p_in) + deallocate(t_in) + deallocate(td_in) + end if + + end subroutine convective_diagnostics_compute + + + !----------------------------------------------------------------------- + ! routine convective_diagnostics_reset + ! + !> \brief Reset maximum values for convective diagnostics + !> \author + !> \date + !> \details + !> Resets the maximum pointwise values for updraft helicity, w velocity, + !> and lowest-model-level wind speed. + ! + !----------------------------------------------------------------------- + subroutine convective_diagnostics_reset() + + use mpas_atm_diagnostics_utils, only : MPAS_field_will_be_written + use mpas_pool_routines, only : mpas_pool_get_array + + implicit none + + real (kind=RKIND), dimension(:), pointer :: updraft_helicity_max + real (kind=RKIND), dimension(:), pointer :: w_velocity_max + real (kind=RKIND), dimension(:), pointer :: wind_speed_level1_max + + if (MPAS_field_will_be_written('updraft_helicity_max')) then + call mpas_pool_get_array(diag, 'updraft_helicity_max', updraft_helicity_max) + updraft_helicity_max(:) = 0. + end if + if (MPAS_field_will_be_written('w_velocity_max')) then + call mpas_pool_get_array(diag, 'w_velocity_max', w_velocity_max) + w_velocity_max(:) = 0. + end if + if (MPAS_field_will_be_written('wind_speed_level1_max')) then + call mpas_pool_get_array(diag, 'wind_speed_level1_max', wind_speed_level1_max) + wind_speed_level1_max(:) = 0. + end if + + end subroutine convective_diagnostics_reset + + + real (kind=RKIND) function column_height_value( column_values, z, z_interp, n ) + implicit none + integer n + real (kind=RKIND) :: column_values(n), z(n), z_interp, wz, wzp1 + integer :: kz, k +! we assume height increases monotonically with n + kz = 1 + do k=1,n + if(z(k) <= z_interp) kz = k + end do + kz = min(kz,n-1) + + wz = (z(kz+1)-z_interp)/(z(kz+1)-z(kz)) + wzp1 = 1. - wz + column_height_value = wz*column_values(kz) + wzp1*column_values(kz+1) + + end function column_height_value + +!--------------------------- + + real (kind=RKIND) function integral_zstaggered( column_values, z, zbot, ztop, n, np1 ) + implicit none + integer n, np1 + real (kind=RKIND) :: column_values(n), z(np1), zbot, ztop + real (kind=RKIND) :: zb, zt + + integer :: k + +! integral from z_bot to z_top, assume cell-average values (first-order integration) +! z increases monotonically + + integral_zstaggered = 0. + do k=1,n + zb = max(z(k), zbot) + zt = min(z(k+1), ztop) + integral_zstaggered = integral_zstaggered + column_values(k)*max(0.,(zt-zb)) + end do + end function integral_zstaggered + +!--------------------------------- + + real (kind=RKIND) function integral_zpoint( column_values, z, zbot, ztop, n ) + implicit none + integer n + real :: column_values(n), z(n), zbot, ztop + real :: zb, zt, dz, zr_midpoint, midpoint_value + + integer :: k + +! integral from z_bot to z_top, assume point values (second-order integration) +! z increases monotonically + + integral_zpoint = 0. + do k=1,n-1 + zb = max(z(k), zbot) + zt = min(z(k+1), ztop) + dz = max(0.,zt-zb) + zr_midpoint = (0.5*(zt+zb) - z(k))/(z(k+1)-z(k)) + midpoint_value = column_values(k) + (column_values(k+1)-column_values(k))*zr_midpoint + integral_zpoint = integral_zpoint + dz*midpoint_value + end do + end function integral_zpoint + +!----------------------------------------------------------------------- +!ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc +!----------------------------------------------------------------------- + + subroutine getcape( nk , p_in , t_in , td_in, cape , cin ) + implicit none + + integer, intent(in) :: nk + real (kind=RKIND), dimension(nk), intent(in) :: p_in,t_in,td_in + real (kind=RKIND), intent(out) :: cape,cin + +!----------------------------------------------------------------------- +! +! getcape - a fortran90 subroutine to calculate Convective Available +! Potential Energy (CAPE) from a sounding. +! +! Version 1.02 Last modified: 10 October 2008 +! +! Author: George H. Bryan +! Mesoscale and Microscale Meteorology Division +! National Center for Atmospheric Research +! Boulder, Colorado, USA +! gbryan@ucar.edu +! +! Disclaimer: This code is made available WITHOUT WARRANTY. +! +! References: Bolton (1980, MWR, p. 1046) (constants and definitions) +! Bryan and Fritsch (2004, MWR, p. 2421) (ice processes) +! +!----------------------------------------------------------------------- +! +! Input: nk - number of levels in the sounding (integer) +! +! p_in - one-dimensional array of pressure (mb) (real) +! +! t_in - one-dimensional array of temperature (C) (real) +! +! td_in - one-dimensional array of dewpoint temperature (C) (real) +! +! Output: cape - Convective Available Potential Energy (J/kg) (real) +! +! cin - Convective Inhibition (J/kg) (real) +! +!----------------------------------------------------------------------- +! User options: + + real (kind=RKIND), parameter :: pinc = 100.0 ! Pressure increment (Pa) + ! (smaller number yields more accurate + ! results,larger number makes code + ! go faster) + + integer, parameter :: source = 2 ! Source parcel: + ! 1 = surface + ! 2 = most unstable (max theta-e) + ! 3 = mixed-layer (specify ml_depth) + + real (kind=RKIND), parameter :: ml_depth = 200.0 ! depth (m) of mixed layer + ! for source=3 + + integer, parameter :: adiabat = 1 ! Formulation of moist adiabat: + ! 1 = pseudoadiabatic, liquid only + ! 2 = reversible, liquid only + ! 3 = pseudoadiabatic, with ice + ! 4 = reversible, with ice + +!----------------------------------------------------------------------- +!ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc +!----------------------------------------------------------------------- +! No need to modify anything below here: +!----------------------------------------------------------------------- +!ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc +!----------------------------------------------------------------------- + + logical :: doit,ice,cloud,not_converged + integer :: k,kmax,n,nloop,i,orec + real (kind=RKIND), dimension(nk) :: p,t,td,pi,q,th,thv,z,pt,pb,pc,pn,ptv + + real (kind=RKIND) :: the,maxthe,parea,narea,lfc + real (kind=RKIND) :: th1,p1,t1,qv1,ql1,qi1,b1,pi1,thv1,qt,dp,dz,ps,frac + real (kind=RKIND) :: th2,p2,t2,qv2,ql2,qi2,b2,pi2,thv2 + real (kind=RKIND) :: thlast,fliq,fice,tbar,qvbar,qlbar,qibar,lhv,lhs,lhf,rm,cpm + real*8 :: avgth,avgqv +! real (kind=RKIND) :: getqvs,getqvi,getthe + +!----------------------------------------------------------------------- + + real (kind=RKIND), parameter :: g = 9.81 + real (kind=RKIND), parameter :: p00 = 100000.0 + real (kind=RKIND), parameter :: cp = 1005.7 + real (kind=RKIND), parameter :: rd = 287.04 + real (kind=RKIND), parameter :: rv = 461.5 + real (kind=RKIND), parameter :: xlv = 2501000.0 + real (kind=RKIND), parameter :: xls = 2836017.0 + real (kind=RKIND), parameter :: t0 = 273.15 + real (kind=RKIND), parameter :: cpv = 1875.0 + real (kind=RKIND), parameter :: cpl = 4190.0 + real (kind=RKIND), parameter :: cpi = 2118.636 + real (kind=RKIND), parameter :: lv1 = xlv+(cpl-cpv)*t0 + real (kind=RKIND), parameter :: lv2 = cpl-cpv + real (kind=RKIND), parameter :: ls1 = xls+(cpi-cpv)*t0 + real (kind=RKIND), parameter :: ls2 = cpi-cpv + + real (kind=RKIND), parameter :: rp00 = 1.0/p00 + real (kind=RKIND), parameter :: eps = rd/rv + real (kind=RKIND), parameter :: reps = rv/rd + real (kind=RKIND), parameter :: rddcp = rd/cp + real (kind=RKIND), parameter :: cpdrd = cp/rd + real (kind=RKIND), parameter :: cpdg = cp/g + + real (kind=RKIND), parameter :: converge = 0.0002 + + integer, parameter :: debug_level = 0 + +!----------------------------------------------------------------------- + +!---- convert p,t,td to mks units; get pi,q,th,thv ----! + + do k=1,nk + p(k) = 100.0*p_in(k) + t(k) = 273.15+t_in(k) + td(k) = 273.15+td_in(k) + pi(k) = (p(k)*rp00)**rddcp + q(k) = getqvs(p(k),td(k)) + th(k) = t(k)/pi(k) + thv(k) = th(k)*(1.0+reps*q(k))/(1.0+q(k)) + enddo + +!---- get height using the hydrostatic equation ----! + + z(1) = 0.0 + do k=2,nk + dz = -cpdg*0.5*(thv(k)+thv(k-1))*(pi(k)-pi(k-1)) + z(k) = z(k-1) + dz + enddo + +!---- find source parcel ----! + + IF(source.eq.1)THEN + ! use surface parcel + kmax = 1 + + ELSEIF(source.eq.2)THEN + ! use most unstable parcel (max theta-e) + + IF(p(1).lt.50000.0)THEN + ! first report is above 500 mb ... just use the first level reported + kmax = 1 + maxthe = getthe(p(1),t(1),td(1),q(1)) + ELSE + ! find max thetae below 500 mb + maxthe = 0.0 + do k=1,nk + if(p(k).ge.50000.0)then + the = getthe(p(k),t(k),td(k),q(k)) + if( the.gt.maxthe )then + maxthe = the + kmax = k + endif + endif + enddo + ENDIF + if(debug_level.ge.100) print *,' kmax,maxthe = ',kmax,maxthe + + ELSEIF(source.eq.3)THEN + ! use mixed layer + + IF( (z(2)-z(1)).gt.ml_depth )THEN + ! the second level is above the mixed-layer depth: just use the + ! lowest level + + avgth = th(1) + avgqv = q(1) + kmax = 1 + + ELSEIF( z(nk).lt.ml_depth )THEN + ! the top-most level is within the mixed layer: just use the + ! upper-most level + + avgth = th(nk) + avgqv = q(nk) + kmax = nk + + ELSE + ! calculate the mixed-layer properties: + + avgth = 0.0 + avgqv = 0.0 + k = 2 + if(debug_level.ge.100) print *,' ml_depth = ',ml_depth + if(debug_level.ge.100) print *,' k,z,th,q:' + if(debug_level.ge.100) print *,1,z(1),th(1),q(1) + + do while( (z(k).le.ml_depth) .and. (k.le.nk) ) + + if(debug_level.ge.100) print *,k,z(k),th(k),q(k) + + avgth = avgth + 0.5*(z(k)-z(k-1))*(th(k)+th(k-1)) + avgqv = avgqv + 0.5*(z(k)-z(k-1))*(q(k)+q(k-1)) + + k = k + 1 + + enddo + + th2 = th(k-1)+(th(k)-th(k-1))*(ml_depth-z(k-1))/(z(k)-z(k-1)) + qv2 = q(k-1)+( q(k)- q(k-1))*(ml_depth-z(k-1))/(z(k)-z(k-1)) + + if(debug_level.ge.100) print *,999,ml_depth,th2,qv2 + + avgth = avgth + 0.5*(ml_depth-z(k-1))*(th2+th(k-1)) + avgqv = avgqv + 0.5*(ml_depth-z(k-1))*(qv2+q(k-1)) + + if(debug_level.ge.100) print *,k,z(k),th(k),q(k) + + avgth = avgth/ml_depth + avgqv = avgqv/ml_depth + + kmax = 1 + + ENDIF + + if(debug_level.ge.100) print *,avgth,avgqv + + ELSE + +! print * +! print *,' Unknown value for source' +! print * +! print *,' source = ',source +! print * +! stop + write(0,*) 'getcape: unknown value for source' + return + + ENDIF + +!---- define parcel properties at initial location ----! + narea = 0.0 + + if( (source.eq.1).or.(source.eq.2) )then + k = kmax + th2 = th(kmax) + pi2 = pi(kmax) + p2 = p(kmax) + t2 = t(kmax) + thv2 = thv(kmax) + qv2 = q(kmax) + b2 = 0.0 + elseif( source.eq.3 )then + k = kmax + th2 = avgth + qv2 = avgqv + thv2 = th2*(1.0+reps*qv2)/(1.0+qv2) + pi2 = pi(kmax) + p2 = p(kmax) + t2 = th2*pi2 + b2 = g*( thv2-thv(kmax) )/thv(kmax) + endif + + ql2 = 0.0 + qi2 = 0.0 + qt = qv2 + + cape = 0.0 + cin = 0.0 + lfc = 0.0 + + doit = .true. + cloud = .false. + if(adiabat.eq.1.or.adiabat.eq.2)then + ice = .false. + else + ice = .true. + endif + + the = getthe(p2,t2,t2,qv2) + if(debug_level.ge.100) print *,' the = ',the + +!---- begin ascent of parcel ----! + + if(debug_level.ge.100)then + print *,' Start loop:' + print *,' p2,th2,qv2 = ',p2,th2,qv2 + endif + + do while( doit .and. (k.lt.nk) ) + + k = k+1 + b1 = b2 + + dp = p(k-1)-p(k) + + if( dp.lt.pinc )then + nloop = 1 + else + nloop = 1 + int( dp/pinc ) + dp = dp/float(nloop) + endif + + do n=1,nloop + + p1 = p2 + t1 = t2 + pi1 = pi2 + th1 = th2 + qv1 = qv2 + ql1 = ql2 + qi1 = qi2 + thv1 = thv2 + + p2 = p2 - dp + pi2 = (p2*rp00)**rddcp + + thlast = th1 + i = 0 + not_converged = .true. + + do while( not_converged ) + i = i + 1 + t2 = thlast*pi2 + if(ice)then + fliq = max(min((t2-233.15)/(273.15-233.15),1.0),0.0) + fice = 1.0-fliq + else + fliq = 1.0 + fice = 0.0 + endif + qv2 = min( qt , fliq*getqvs(p2,t2) + fice*getqvi(p2,t2) ) + qi2 = max( fice*(qt-qv2) , 0.0 ) + ql2 = max( qt-qv2-qi2 , 0.0 ) + + tbar = 0.5*(t1+t2) + qvbar = 0.5*(qv1+qv2) + qlbar = 0.5*(ql1+ql2) + qibar = 0.5*(qi1+qi2) + + lhv = lv1-lv2*tbar + lhs = ls1-ls2*tbar + lhf = lhs-lhv + + rm=rd+rv*qvbar + cpm=cp+cpv*qvbar+cpl*qlbar+cpi*qibar + th2=th1*exp( lhv*(ql2-ql1)/(cpm*tbar) & + +lhs*(qi2-qi1)/(cpm*tbar) & + +(rm/cpm-rd/cp)*alog(p2/p1) ) + + if(i .gt. 90 .and. debug_level .gt. 0) print *,i,th2,thlast,th2-thlast + if(i .gt. 100)then +! print * +! print *,' Error: lack of convergence' +! print * +! print *,' ... stopping iteration ' +! print * +! stop 1001 + if (debug_level .gt. 0) then + write(0,*) 'getcape: lack of convergence' + end if + return + endif + if( abs(th2-thlast).gt.converge )then + thlast=thlast+0.3*(th2-thlast) + else + not_converged = .false. + endif + enddo + + ! Latest pressure increment is complete. Calculate some + ! important stuff: + + if( ql2.ge.1.0e-10 ) cloud = .true. + + IF(adiabat.eq.1.or.adiabat.eq.3)THEN + ! pseudoadiabat + qt = qv2 + ql2 = 0.0 + qi2 = 0.0 + ELSEIF(adiabat.le.0.or.adiabat.ge.5)THEN +! print * +! print *,' Undefined adiabat' +! print * +! stop 10000 + write(0,*) 'getcape: Undefined adiabat' + return + ENDIF + + enddo + + thv2 = th2*(1.0+reps*qv2)/(1.0+qv2+ql2+qi2) + b2 = g*( thv2-thv(k) )/thv(k) + dz = -cpdg*0.5*(thv(k)+thv(k-1))*(pi(k)-pi(k-1)) + + the = getthe(p2,t2,t2,qv2) + + ! Get contributions to CAPE and CIN: + + if( (b2.ge.0.0) .and. (b1.lt.0.0) )then + ! first trip into positive area + ps = p(k-1)+(p(k)-p(k-1))*(0.0-b1)/(b2-b1) + frac = b2/(b2-b1) + parea = 0.5*b2*dz*frac + narea = narea-0.5*b1*dz*(1.0-frac) + if(debug_level.ge.200)then + print *,' b1,b2 = ',b1,b2 + print *,' p1,ps,p2 = ',p(k-1),ps,p(k) + print *,' frac = ',frac + print *,' parea = ',parea + print *,' narea = ',narea + endif + cin = cin + narea + narea = 0.0 + elseif( (b2.lt.0.0) .and. (b1.gt.0.0) )then + ! first trip into neg area + ps = p(k-1)+(p(k)-p(k-1))*(0.0-b1)/(b2-b1) + frac = b1/(b1-b2) + parea = 0.5*b1*dz*frac + narea = -0.5*b2*dz*(1.0-frac) + if(debug_level.ge.200)then + print *,' b1,b2 = ',b1,b2 + print *,' p1,ps,p2 = ',p(k-1),ps,p(k) + print *,' frac = ',frac + print *,' parea = ',parea + print *,' narea = ',narea + endif + elseif( b2.lt.0.0 )then + ! still collecting negative buoyancy + parea = 0.0 + narea = narea-0.5*dz*(b1+b2) + else + ! still collecting positive buoyancy + parea = 0.5*dz*(b1+b2) + narea = 0.0 + endif + + cape = cape + max(0.0,parea) + + if(debug_level.ge.200)then + write(6,102) p2,b1,b2,cape,cin,cloud +102 format(5(f13.4),2x,l1) + endif + + if( (p(k).le.10000.0).and.(b2.lt.0.0) )then + ! stop if b < 0 and p < 100 mb + doit = .false. + endif + + enddo + +!---- All done ----! + + return + end subroutine getcape + +!----------------------------------------------------------------------- +!ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc +!----------------------------------------------------------------------- + + real (kind=RKIND) function getqvs(p,t) + implicit none + + real (kind=RKIND) :: p,t,es + + real (kind=RKIND), parameter :: eps = 287.04/461.5 + + es = 611.2*exp(17.67*(t-273.15)/(t-29.65)) + getqvs = eps*es/(p-es) + + return + end function getqvs + +!----------------------------------------------------------------------- +!ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc +!----------------------------------------------------------------------- + + real (kind=RKIND) function getqvi(p,t) + implicit none + + real (kind=RKIND) :: p,t,es + + real (kind=RKIND), parameter :: eps = 287.04/461.5 + + es = 611.2*exp(21.8745584*(t-273.15)/(t-7.66)) + getqvi = eps*es/(p-es) + + return + end function getqvi + +!----------------------------------------------------------------------- +!ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc +!----------------------------------------------------------------------- + + real (kind=RKIND) function getthe(p,t,td,q) + implicit none + + real (kind=RKIND) :: p,t,td,q + real (kind=RKIND) :: tlcl + + if( (td-t).ge.-0.1 )then + tlcl = t + else + tlcl = 56.0 + ( (td-56.0)**(-1) + 0.00125*alog(t/td) )**(-1) + endif + + getthe=t*( (100000.0/p)**(0.2854*(1.0-0.28*q)) ) & + *exp( ((3376.0/tlcl)-2.54)*q*(1.0+0.81*q) ) + + return + end function getthe + +!----------------------------------------------------------------------- +!ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc +!----------------------------------------------------------------------- + +end module convective_diagnostics diff --git a/src/core_atmosphere/diagnostics/isobaric_diagnostics.F b/src/core_atmosphere/diagnostics/isobaric_diagnostics.F new file mode 100644 index 0000000000..7ce0ba7183 --- /dev/null +++ b/src/core_atmosphere/diagnostics/isobaric_diagnostics.F @@ -0,0 +1,1257 @@ +! Copyright (c) 2016, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +module isobaric_diagnostics + + use mpas_dmpar + use mpas_kind_types + use mpas_derived_types + use mpas_pool_routines + use mpas_constants + + type (MPAS_pool_type), pointer :: mesh + type (MPAS_pool_type), pointer :: state + type (MPAS_pool_type), pointer :: diag + + type (MPAS_clock_type), pointer :: clock + + public :: isobaric_diagnostics_setup, & + isobaric_diagnostics_compute + + private + + logical :: need_mslp, & + need_relhum_200, need_relhum_250, need_relhum_500, need_relhum_700, need_relhum_850, need_relhum_925, & + need_dewpoint_200, need_dewpoint_250, need_dewpoint_500, need_dewpoint_700, need_dewpoint_850, need_dewpoint_925, & + need_temp_200, need_temp_250, need_temp_500, need_temp_700, need_temp_850, need_temp_925, & + need_height_200, need_height_250, need_height_500, need_height_700, need_height_850, need_height_925, & + need_uzonal_200, need_uzonal_250, need_uzonal_500, need_uzonal_700, need_uzonal_850, need_uzonal_925, & + need_umeridional_200, need_umeridional_250, need_umeridional_500, need_umeridional_700, need_umeridional_850, need_umeridional_925, & + need_w_200, need_w_250, need_w_500, need_w_700, need_w_850, need_w_925, & + need_vorticity_200, need_vorticity_250, need_vorticity_500, need_vorticity_700, need_vorticity_850, need_vorticity_925, & + need_t_isobaric, need_z_isobaric, need_meanT_500_300 + logical :: need_temp, need_relhum, need_dewpoint, need_w, need_uzonal, need_umeridional, need_vorticity, need_height + + + contains + + + !----------------------------------------------------------------------- + ! routine isobaric_diagnostics_setup + ! + !> \brief Set up the isobaric diagnostics module + !> \author Michael Duda + !> \date 21 October 2016 + !> \details + !> This routine sets up the isobaric diagnostics module, principally by + !> saving pointers to pools that are used in the computation of diagnostics. + ! + !----------------------------------------------------------------------- + subroutine isobaric_diagnostics_setup(all_pools, simulation_clock) + + use mpas_derived_types, only : MPAS_pool_type, MPAS_clock_type + use mpas_pool_routines, only : mpas_pool_get_subpool + + implicit none + + type (MPAS_pool_type), pointer :: all_pools + type (MPAS_clock_type), pointer :: simulation_clock + + clock => simulation_clock + + call mpas_pool_get_subpool(all_pools, 'mesh', mesh) + call mpas_pool_get_subpool(all_pools, 'state', state) + call mpas_pool_get_subpool(all_pools, 'diag', diag) + + end subroutine isobaric_diagnostics_setup + + + !----------------------------------------------------------------------- + ! routine isobaric_diagnostics_compute + ! + !> \brief Compute isobaric diagnostic before model output is written + !> \author Michael Duda + !> \date 21 October 2016 + !> \details + !> Compute isobaric diagnostic before model output is written. Code called + !> from here was previously in mpas_atm_interp_diagnostics.F. + ! + !----------------------------------------------------------------------- + subroutine isobaric_diagnostics_compute() + + use mpas_atm_diagnostics_utils, only : MPAS_field_will_be_written + + implicit none + + logical :: need_any_diags + + need_any_diags = .false. + + need_temp = .false. + need_dewpoint = .false. + need_relhum = .false. + need_w = .false. + need_uzonal = .false. + need_umeridional = .false. + need_vorticity = .false. + need_height = .false. + + need_mslp = MPAS_field_will_be_written('mslp') + need_any_diags = need_any_diags .or. need_mslp + need_relhum_200 = MPAS_field_will_be_written('relhum_200hPa') + need_relhum = need_relhum .or. need_relhum_200 + need_any_diags = need_any_diags .or. need_relhum_200 + need_relhum_250 = MPAS_field_will_be_written('relhum_250hPa') + need_relhum = need_relhum .or. need_relhum_250 + need_any_diags = need_any_diags .or. need_relhum_250 + need_relhum_500 = MPAS_field_will_be_written('relhum_500hPa') + need_relhum = need_relhum .or. need_relhum_500 + need_any_diags = need_any_diags .or. need_relhum_500 + need_relhum_700 = MPAS_field_will_be_written('relhum_700hPa') + need_relhum = need_relhum .or. need_relhum_700 + need_any_diags = need_any_diags .or. need_relhum_700 + need_relhum_850 = MPAS_field_will_be_written('relhum_850hPa') + need_relhum = need_relhum .or. need_relhum_850 + need_any_diags = need_any_diags .or. need_relhum_850 + need_relhum_925 = MPAS_field_will_be_written('relhum_925hPa') + need_relhum = need_relhum .or. need_relhum_925 + need_any_diags = need_any_diags .or. need_relhum_925 + need_dewpoint_200 = MPAS_field_will_be_written('dewpoint_200hPa') + need_dewpoint = need_dewpoint .or. need_dewpoint_200 + need_any_diags = need_any_diags .or. need_dewpoint_200 + need_dewpoint_250 = MPAS_field_will_be_written('dewpoint_250hPa') + need_dewpoint = need_dewpoint .or. need_dewpoint_250 + need_any_diags = need_any_diags .or. need_dewpoint_250 + need_dewpoint_500 = MPAS_field_will_be_written('dewpoint_500hPa') + need_dewpoint = need_dewpoint .or. need_dewpoint_500 + need_any_diags = need_any_diags .or. need_dewpoint_500 + need_dewpoint_700 = MPAS_field_will_be_written('dewpoint_700hPa') + need_dewpoint = need_dewpoint .or. need_dewpoint_700 + need_any_diags = need_any_diags .or. need_dewpoint_700 + need_dewpoint_850 = MPAS_field_will_be_written('dewpoint_850hPa') + need_dewpoint = need_dewpoint .or. need_dewpoint_850 + need_any_diags = need_any_diags .or. need_dewpoint_850 + need_dewpoint_925 = MPAS_field_will_be_written('dewpoint_925hPa') + need_dewpoint = need_dewpoint .or. need_dewpoint_925 + need_any_diags = need_any_diags .or. need_dewpoint_925 + need_temp_200 = MPAS_field_will_be_written('temperature_200hPa') + need_temp = need_temp .or. need_temp_200 + need_any_diags = need_any_diags .or. need_temp_200 + need_temp_250 = MPAS_field_will_be_written('temperature_250hPa') + need_temp = need_temp .or. need_temp_250 + need_any_diags = need_any_diags .or. need_temp_250 + need_temp_500 = MPAS_field_will_be_written('temperature_500hPa') + need_temp = need_temp .or. need_temp_500 + need_any_diags = need_any_diags .or. need_temp_500 + need_temp_700 = MPAS_field_will_be_written('temperature_700hPa') + need_temp = need_temp .or. need_temp_700 + need_any_diags = need_any_diags .or. need_temp_700 + need_temp_850 = MPAS_field_will_be_written('temperature_850hPa') + need_temp = need_temp .or. need_temp_850 + need_any_diags = need_any_diags .or. need_temp_850 + need_temp_925 = MPAS_field_will_be_written('temperature_925hPa') + need_temp = need_temp .or. need_temp_925 + need_any_diags = need_any_diags .or. need_temp_925 + need_height_200 = MPAS_field_will_be_written('height_200hPa') + need_height = need_height .or. need_height_200 + need_any_diags = need_any_diags .or. need_height_200 + need_height_250 = MPAS_field_will_be_written('height_250hPa') + need_height = need_height .or. need_height_250 + need_any_diags = need_any_diags .or. need_height_250 + need_height_500 = MPAS_field_will_be_written('height_500hPa') + need_height = need_height .or. need_height_500 + need_any_diags = need_any_diags .or. need_height_500 + need_height_700 = MPAS_field_will_be_written('height_700hPa') + need_height = need_height .or. need_height_700 + need_any_diags = need_any_diags .or. need_height_700 + need_height_850 = MPAS_field_will_be_written('height_850hPa') + need_height = need_height .or. need_height_850 + need_any_diags = need_any_diags .or. need_height_850 + need_height_925 = MPAS_field_will_be_written('height_925hPa') + need_height = need_height .or. need_height_925 + need_any_diags = need_any_diags .or. need_height_925 + need_uzonal_200 = MPAS_field_will_be_written('uzonal_200hPa') + need_uzonal = need_uzonal .or. need_uzonal_200 + need_any_diags = need_any_diags .or. need_uzonal_200 + need_uzonal_250 = MPAS_field_will_be_written('uzonal_250hPa') + need_uzonal = need_uzonal .or. need_uzonal_250 + need_any_diags = need_any_diags .or. need_uzonal_250 + need_uzonal_500 = MPAS_field_will_be_written('uzonal_500hPa') + need_uzonal = need_uzonal .or. need_uzonal_500 + need_any_diags = need_any_diags .or. need_uzonal_500 + need_uzonal_700 = MPAS_field_will_be_written('uzonal_700hPa') + need_uzonal = need_uzonal .or. need_uzonal_700 + need_any_diags = need_any_diags .or. need_uzonal_700 + need_uzonal_850 = MPAS_field_will_be_written('uzonal_850hPa') + need_uzonal = need_uzonal .or. need_uzonal_850 + need_any_diags = need_any_diags .or. need_uzonal_850 + need_uzonal_925 = MPAS_field_will_be_written('uzonal_925hPa') + need_uzonal = need_uzonal .or. need_uzonal_925 + need_any_diags = need_any_diags .or. need_uzonal_925 + need_umeridional_200 = MPAS_field_will_be_written('umeridional_200hPa') + need_umeridional = need_umeridional .or. need_umeridional_200 + need_any_diags = need_any_diags .or. need_umeridional_200 + need_umeridional_250 = MPAS_field_will_be_written('umeridional_250hPa') + need_umeridional = need_umeridional .or. need_umeridional_250 + need_any_diags = need_any_diags .or. need_umeridional_250 + need_umeridional_500 = MPAS_field_will_be_written('umeridional_500hPa') + need_umeridional = need_umeridional .or. need_umeridional_500 + need_any_diags = need_any_diags .or. need_umeridional_500 + need_umeridional_700 = MPAS_field_will_be_written('umeridional_700hPa') + need_umeridional = need_umeridional .or. need_umeridional_700 + need_any_diags = need_any_diags .or. need_umeridional_700 + need_umeridional_850 = MPAS_field_will_be_written('umeridional_850hPa') + need_umeridional = need_umeridional .or. need_umeridional_850 + need_any_diags = need_any_diags .or. need_umeridional_850 + need_umeridional_925 = MPAS_field_will_be_written('umeridional_925hPa') + need_umeridional = need_umeridional .or. need_umeridional_925 + need_any_diags = need_any_diags .or. need_umeridional_925 + need_w_200 = MPAS_field_will_be_written('w_200hPa') + need_w = need_w .or. need_w_200 + need_any_diags = need_any_diags .or. need_w_200 + need_w_250 = MPAS_field_will_be_written('w_250hPa') + need_w = need_w .or. need_w_250 + need_any_diags = need_any_diags .or. need_w_250 + need_w_500 = MPAS_field_will_be_written('w_500hPa') + need_w = need_w .or. need_w_500 + need_any_diags = need_any_diags .or. need_w_500 + need_w_700 = MPAS_field_will_be_written('w_700hPa') + need_w = need_w .or. need_w_700 + need_any_diags = need_any_diags .or. need_w_700 + need_w_850 = MPAS_field_will_be_written('w_850hPa') + need_w = need_w .or. need_w_850 + need_any_diags = need_any_diags .or. need_w_850 + need_w_925 = MPAS_field_will_be_written('w_925hPa') + need_w = need_w .or. need_w_925 + need_any_diags = need_any_diags .or. need_w_925 + need_vorticity_200 = MPAS_field_will_be_written('vorticity_200hPa') + need_vorticity = need_vorticity .or. need_vorticity_200 + need_any_diags = need_any_diags .or. need_vorticity_200 + need_vorticity_250 = MPAS_field_will_be_written('vorticity_250hPa') + need_vorticity = need_vorticity .or. need_vorticity_250 + need_any_diags = need_any_diags .or. need_vorticity_250 + need_vorticity_500 = MPAS_field_will_be_written('vorticity_500hPa') + need_vorticity = need_vorticity .or. need_vorticity_500 + need_any_diags = need_any_diags .or. need_vorticity_500 + need_vorticity_700 = MPAS_field_will_be_written('vorticity_700hPa') + need_vorticity = need_vorticity .or. need_vorticity_700 + need_any_diags = need_any_diags .or. need_vorticity_700 + need_vorticity_850 = MPAS_field_will_be_written('vorticity_850hPa') + need_vorticity = need_vorticity .or. need_vorticity_850 + need_any_diags = need_any_diags .or. need_vorticity_850 + need_vorticity_925 = MPAS_field_will_be_written('vorticity_925hPa') + need_vorticity = need_vorticity .or. need_vorticity_925 + need_any_diags = need_any_diags .or. need_vorticity_925 + need_t_isobaric = MPAS_field_will_be_written('t_isobaric') + need_any_diags = need_any_diags .or. need_t_isobaric + need_z_isobaric = MPAS_field_will_be_written('z_isobaric') + need_any_diags = need_any_diags .or. need_z_isobaric + need_meanT_500_300 = MPAS_field_will_be_written('meanT_500_300') + need_any_diags = need_any_diags .or. need_meanT_500_300 + + if (need_any_diags) then + call interp_diagnostics(mesh, state, 1, diag) + end if + + end subroutine isobaric_diagnostics_compute + + + !================================================================================================== + subroutine interp_diagnostics(mesh, state, time_lev, diag) + !================================================================================================== + + !input arguments: + type (mpas_pool_type), intent(in) :: mesh + type (mpas_pool_type), intent(in) :: state + integer, intent(in) :: time_lev ! which time level to use from state + + !inout arguments: + type (mpas_pool_type), intent(inout) :: diag + + !local variables: + integer :: iCell,iVert,iVertD,k,kk + integer, pointer :: nCells, nCellsSolve, nVertLevels, nVertices, vertexDegree, nIsoLevelsT, nIsoLevelsZ + integer :: nVertLevelsP1 + integer, pointer :: index_qv, num_scalars + integer, dimension(:,:), pointer :: cellsOnVertex + + type (field2DReal), pointer:: pressure_p_field + + real (kind=RKIND), dimension(:), pointer :: areaTriangle + real (kind=RKIND), dimension(:,:), pointer :: kiteAreasOnVertex + + real (kind=RKIND), dimension(:,:), pointer :: exner, height + real (kind=RKIND), dimension(:,:), pointer :: pressure_b, pressure_p + real (kind=RKIND), dimension(:,:), pointer :: relhum, theta_m, vorticity + real (kind=RKIND), dimension(:,:), pointer :: umeridional, uzonal, vvel + real (kind=RKIND), dimension(:,:,:), pointer :: scalars + + real (kind=RKIND), dimension(:), pointer :: t_iso_levels + real (kind=RKIND), dimension(:), pointer :: z_iso_levels + real (kind=RKIND), dimension(:,:), pointer :: t_isobaric + real (kind=RKIND), dimension(:,:), pointer :: z_isobaric + real (kind=RKIND), dimension(:), pointer :: meanT_500_300 + + real (kind=RKIND), dimension(:), pointer :: temperature_200hPa + real (kind=RKIND), dimension(:), pointer :: temperature_250hPa + real (kind=RKIND), dimension(:), pointer :: temperature_500hPa + real (kind=RKIND), dimension(:), pointer :: temperature_700hPa + real (kind=RKIND), dimension(:), pointer :: temperature_850hPa + real (kind=RKIND), dimension(:), pointer :: temperature_925hPa + + real (kind=RKIND), dimension(:), pointer :: relhum_200hPa + real (kind=RKIND), dimension(:), pointer :: relhum_250hPa + real (kind=RKIND), dimension(:), pointer :: relhum_500hPa + real (kind=RKIND), dimension(:), pointer :: relhum_700hPa + real (kind=RKIND), dimension(:), pointer :: relhum_850hPa + real (kind=RKIND), dimension(:), pointer :: relhum_925hPa + + real (kind=RKIND), dimension(:), pointer :: dewpoint_200hPa + real (kind=RKIND), dimension(:), pointer :: dewpoint_250hPa + real (kind=RKIND), dimension(:), pointer :: dewpoint_500hPa + real (kind=RKIND), dimension(:), pointer :: dewpoint_700hPa + real (kind=RKIND), dimension(:), pointer :: dewpoint_850hPa + real (kind=RKIND), dimension(:), pointer :: dewpoint_925hPa + + real (kind=RKIND), dimension(:), pointer :: uzonal_200hPa + real (kind=RKIND), dimension(:), pointer :: uzonal_250hPa + real (kind=RKIND), dimension(:), pointer :: uzonal_500hPa + real (kind=RKIND), dimension(:), pointer :: uzonal_700hPa + real (kind=RKIND), dimension(:), pointer :: uzonal_850hPa + real (kind=RKIND), dimension(:), pointer :: uzonal_925hPa + + real (kind=RKIND), dimension(:), pointer :: umeridional_200hPa + real (kind=RKIND), dimension(:), pointer :: umeridional_250hPa + real (kind=RKIND), dimension(:), pointer :: umeridional_500hPa + real (kind=RKIND), dimension(:), pointer :: umeridional_700hPa + real (kind=RKIND), dimension(:), pointer :: umeridional_850hPa + real (kind=RKIND), dimension(:), pointer :: umeridional_925hPa + + real (kind=RKIND), dimension(:), pointer :: height_200hPa + real (kind=RKIND), dimension(:), pointer :: height_250hPa + real (kind=RKIND), dimension(:), pointer :: height_500hPa + real (kind=RKIND), dimension(:), pointer :: height_700hPa + real (kind=RKIND), dimension(:), pointer :: height_850hPa + real (kind=RKIND), dimension(:), pointer :: height_925hPa + + real (kind=RKIND), dimension(:), pointer :: w_200hPa + real (kind=RKIND), dimension(:), pointer :: w_250hPa + real (kind=RKIND), dimension(:), pointer :: w_500hPa + real (kind=RKIND), dimension(:), pointer :: w_700hPa + real (kind=RKIND), dimension(:), pointer :: w_850hPa + real (kind=RKIND), dimension(:), pointer :: w_925hPa + + real (kind=RKIND), dimension(:), pointer :: vorticity_200hPa + real (kind=RKIND), dimension(:), pointer :: vorticity_250hPa + real (kind=RKIND), dimension(:), pointer :: vorticity_500hPa + real (kind=RKIND), dimension(:), pointer :: vorticity_700hPa + real (kind=RKIND), dimension(:), pointer :: vorticity_850hPa + real (kind=RKIND), dimension(:), pointer :: vorticity_925hPa + + real (kind=RKIND) :: evp + + !-------------------- + + real (kind=RKIND), dimension(:), pointer :: mslp + + real (kind=RKIND), dimension(:,:), allocatable :: pressure, pressureCp1, pressure2, pressure_v, temperature + real (kind=RKIND), dimension(:,:), allocatable :: dewpoint + + !local interpolated fields: + integer :: nIntP + real (kind=RKIND) :: w1,w2,z0,z1,z2 + real (kind=RKIND), dimension(:,:), allocatable :: field_in,press_in + real (kind=RKIND), dimension(:,:), allocatable :: field_interp,press_interp + + !-------------------------------------------------------------------------------------------------- + + ! write(0,*) + ! write(0,*) '--- enter subroutine interp_diagnostics:' + + call mpas_pool_get_dimension(mesh, 'nCells', nCells) + call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) + call mpas_pool_get_dimension(mesh, 'nVertices', nVertices) + call mpas_pool_get_dimension(mesh, 'vertexDegree', vertexDegree) + call mpas_pool_get_dimension(mesh, 'nIsoLevelsT', nIsoLevelsT) + call mpas_pool_get_dimension(mesh, 'nIsoLevelsZ', nIsoLevelsZ) + call mpas_pool_get_dimension(state, 'index_qv', index_qv) + call mpas_pool_get_dimension(state, 'num_scalars', num_scalars) + + nVertLevelsP1 = nVertLevels + 1 + + call mpas_pool_get_array(mesh, 'cellsOnVertex', cellsOnVertex) + call mpas_pool_get_array(mesh, 'areaTriangle', areaTriangle) + call mpas_pool_get_array(mesh, 'kiteAreasOnVertex', kiteAreasOnVertex) + + call mpas_pool_get_array(mesh, 'zgrid', height) + call mpas_pool_get_array(state, 'w', vvel, time_lev) + call mpas_pool_get_array(state, 'theta_m', theta_m, time_lev) + call mpas_pool_get_array(state, 'scalars', scalars, time_lev) + + call mpas_pool_get_field(diag, 'pressure_p', pressure_p_field) + call mpas_dmpar_exch_halo_field(pressure_p_field) + + call mpas_pool_get_array(diag, 'exner', exner) + call mpas_pool_get_array(diag, 'pressure_base', pressure_b) + call mpas_pool_get_array(diag, 'pressure_p', pressure_p) + call mpas_pool_get_array(diag, 'vorticity', vorticity) + call mpas_pool_get_array(diag, 'uReconstructMeridional', umeridional) + call mpas_pool_get_array(diag, 'uReconstructZonal', uzonal) + call mpas_pool_get_array(diag, 'relhum', relhum) + + call mpas_pool_get_array(diag, 't_iso_levels', t_iso_levels) + call mpas_pool_get_array(diag, 'z_iso_levels', z_iso_levels) + call mpas_pool_get_array(diag, 't_isobaric', t_isobaric) + call mpas_pool_get_array(diag, 'z_isobaric', z_isobaric) + call mpas_pool_get_array(diag, 'meanT_500_300', meanT_500_300) + + call mpas_pool_get_array(diag, 'temperature_200hPa', temperature_200hPa) + call mpas_pool_get_array(diag, 'temperature_250hPa', temperature_250hPa) + call mpas_pool_get_array(diag, 'temperature_500hPa', temperature_500hPa) + call mpas_pool_get_array(diag, 'temperature_700hPa', temperature_700hPa) + call mpas_pool_get_array(diag, 'temperature_850hPa', temperature_850hPa) + call mpas_pool_get_array(diag, 'temperature_925hPa', temperature_925hPa) + + call mpas_pool_get_array(diag, 'relhum_200hPa', relhum_200hPa) + call mpas_pool_get_array(diag, 'relhum_250hPa', relhum_250hPa) + call mpas_pool_get_array(diag, 'relhum_500hPa', relhum_500hPa) + call mpas_pool_get_array(diag, 'relhum_700hPa', relhum_700hPa) + call mpas_pool_get_array(diag, 'relhum_850hPa', relhum_850hPa) + call mpas_pool_get_array(diag, 'relhum_925hPa', relhum_925hPa) + + call mpas_pool_get_array(diag, 'dewpoint_200hPa', dewpoint_200hPa) + call mpas_pool_get_array(diag, 'dewpoint_250hPa', dewpoint_250hPa) + call mpas_pool_get_array(diag, 'dewpoint_500hPa', dewpoint_500hPa) + call mpas_pool_get_array(diag, 'dewpoint_700hPa', dewpoint_700hPa) + call mpas_pool_get_array(diag, 'dewpoint_850hPa', dewpoint_850hPa) + call mpas_pool_get_array(diag, 'dewpoint_925hPa', dewpoint_925hPa) + + call mpas_pool_get_array(diag, 'uzonal_200hPa', uzonal_200hPa) + call mpas_pool_get_array(diag, 'uzonal_250hPa', uzonal_250hPa) + call mpas_pool_get_array(diag, 'uzonal_500hPa', uzonal_500hPa) + call mpas_pool_get_array(diag, 'uzonal_700hPa', uzonal_700hPa) + call mpas_pool_get_array(diag, 'uzonal_850hPa', uzonal_850hPa) + call mpas_pool_get_array(diag, 'uzonal_925hPa', uzonal_925hPa) + + call mpas_pool_get_array(diag, 'umeridional_200hPa', umeridional_200hPa) + call mpas_pool_get_array(diag, 'umeridional_250hPa', umeridional_250hPa) + call mpas_pool_get_array(diag, 'umeridional_500hPa', umeridional_500hPa) + call mpas_pool_get_array(diag, 'umeridional_700hPa', umeridional_700hPa) + call mpas_pool_get_array(diag, 'umeridional_850hPa', umeridional_850hPa) + call mpas_pool_get_array(diag, 'umeridional_925hPa', umeridional_925hPa) + + call mpas_pool_get_array(diag, 'height_200hPa', height_200hPa) + call mpas_pool_get_array(diag, 'height_250hPa', height_250hPa) + call mpas_pool_get_array(diag, 'height_500hPa', height_500hPa) + call mpas_pool_get_array(diag, 'height_700hPa', height_700hPa) + call mpas_pool_get_array(diag, 'height_850hPa', height_850hPa) + call mpas_pool_get_array(diag, 'height_925hPa', height_925hPa) + + call mpas_pool_get_array(diag, 'w_200hPa', w_200hPa) + call mpas_pool_get_array(diag, 'w_250hPa', w_250hPa) + call mpas_pool_get_array(diag, 'w_500hPa', w_500hPa) + call mpas_pool_get_array(diag, 'w_700hPa', w_700hPa) + call mpas_pool_get_array(diag, 'w_850hPa', w_850hPa) + call mpas_pool_get_array(diag, 'w_925hPa', w_925hPa) + + call mpas_pool_get_array(diag, 'vorticity_200hPa', vorticity_200hPa) + call mpas_pool_get_array(diag, 'vorticity_250hPa', vorticity_250hPa) + call mpas_pool_get_array(diag, 'vorticity_500hPa', vorticity_500hPa) + call mpas_pool_get_array(diag, 'vorticity_700hPa', vorticity_700hPa) + call mpas_pool_get_array(diag, 'vorticity_850hPa', vorticity_850hPa) + call mpas_pool_get_array(diag, 'vorticity_925hPa', vorticity_925hPa) + + call mpas_pool_get_array(diag, 'mslp', mslp) + + if(.not.allocated(pressure) ) allocate(pressure(nVertLevels,nCells) ) + if(.not.allocated(pressureCp1) ) allocate(pressureCp1(nVertLevels,nCells+1) ) + if(.not.allocated(pressure2) ) allocate(pressure2(nVertLevelsP1,nCells) ) + if(.not.allocated(pressure_v) ) allocate(pressure_v(nVertLevels,nVertices) ) + if(.not.allocated(temperature) ) allocate(temperature(nVertLevels,nCells) ) + if(.not.allocated(dewpoint) ) allocate(dewpoint(nVertLevels,nCells) ) + + if (need_t_isobaric) then + t_iso_levels(1) = 30000.0 + t_iso_levels(2) = 35000.0 + t_iso_levels(3) = 40000.0 + t_iso_levels(4) = 45000.0 + t_iso_levels(5) = 50000.0 + end if + + if (need_z_isobaric) then + z_iso_levels(1) = 30000.0 + z_iso_levels(2) = 35000.0 + z_iso_levels(3) = 40000.0 + z_iso_levels(4) = 45000.0 + z_iso_levels(5) = 50000.0 + z_iso_levels(6) = 55000.0 + z_iso_levels(7) = 60000.0 + z_iso_levels(8) = 65000.0 + z_iso_levels(9) = 70000.0 + z_iso_levels(10) = 75000.0 + z_iso_levels(11) = 80000.0 + z_iso_levels(12) = 85000.0 + z_iso_levels(13) = 90000.0 + end if + + !calculation of total pressure at cell centers (at mass points): + do iCell = 1, nCells + do k = 1, nVertLevels + pressure(k,iCell) = (pressure_p(k,iCell) + pressure_b(k,iCell)) / 100._RKIND + pressureCp1(k,iCell) = pressure(k,iCell) + enddo + enddo + do iCell = nCells+1, nCells+1 + do k = 1, nVertLevels + pressureCp1(k,iCell) = (pressure_p(k,iCell) + pressure_b(k,iCell)) / 100._RKIND + enddo + enddo + + !calculation of total pressure at cell centers (at vertical velocity points): + k = nVertLevelsP1 + do iCell = 1, nCells + z0 = height(k,iCell) + z1 = 0.5*(height(k,iCell)+height(k-1,iCell)) + z2 = 0.5*(height(k-1,iCell)+height(k-2,iCell)) + w1 = (z0-z2)/(z1-z2) + w2 = 1.-w1 + !use log of pressure to avoid occurrences of negative top-of-the-model pressure. + pressure2(k,iCell) = exp(w1*log(pressure(k-1,iCell))+w2*log(pressure(k-2,iCell))) + enddo + do k = 2, nVertLevels + do iCell = 1, nCells + w1 = (height(k,iCell)-height(k-1,iCell)) / (height(k+1,iCell)-height(k-1,iCell)) + w2 = (height(k+1,iCell)-height(k,iCell)) / (height(k+1,iCell)-height(k-1,iCell)) + pressure2(k,iCell) = w1*pressure(k,iCell) + w2*pressure(k-1,iCell) + enddo + enddo + k = 1 + do iCell = 1, nCells + z0 = height(k,iCell) + z1 = 0.5*(height(k,iCell)+height(k+1,iCell)) + z2 = 0.5*(height(k+1,iCell)+height(k+2,iCell)) + w1 = (z0-z2)/(z1-z2) + w2 = 1.-w1 + pressure2(k,iCell) = w1*pressure(k,iCell)+w2*pressure(k+1,iCell) + enddo + + !calculation of total pressure at cell vertices (at mass points): + do iVert = 1, nVertices + pressure_v(:,iVert) = 0._RKIND + + do k = 1, nVertLevels + do iVertD = 1, vertexDegree + pressure_v(k,iVert) = pressure_v(k,iVert) & + + kiteAreasOnVertex(iVertD,iVert)*pressureCp1(k,cellsOnVertex(iVertD,iVert)) + enddo + pressure_v(k,iVert) = pressure_v(k,iVert) / areaTriangle(iVert) + enddo + enddo + + if (NEED_TEMP .or. NEED_RELHUM .or. NEED_DEWPOINT) then + !calculation of temperature at cell centers: + do iCell = 1,nCells + do k = 1,nVertLevels + temperature(k,iCell) = (theta_m(k,iCell)/(1._RKIND+rvord*scalars(index_qv,k,iCell)))*exner(k,iCell) + + ! Vapor pressure (NB: pressure here is already in hPa) + evp = pressure(k,iCell) * scalars(index_qv,k,iCell) / (scalars(index_qv,k,iCell) + 0.622_RKIND) + evp = max(evp, 1.0e-8_RKIND) + + ! Dewpoint temperature following Bolton (1980) + dewpoint(k,iCell) = (243.5_RKIND * log(evp/6.112_RKIND)) / (17.67_RKIND - log(evp/6.112_RKIND)) + dewpoint(k,iCell) = dewpoint(k,iCell) + 273.15 + enddo + enddo + end if + + !interpolation to fixed pressure levels for fields located at cells centers and at mass points: + nIntP = 6 + if(.not.allocated(field_interp)) allocate(field_interp(nCells,nIntP) ) + if(.not.allocated(press_interp)) allocate(press_interp(nCells,nIntP) ) + do iCell = 1, nCells + press_interp(iCell,1) = 200.0_RKIND + press_interp(iCell,2) = 250.0_RKIND + press_interp(iCell,3) = 500.0_RKIND + press_interp(iCell,4) = 700.0_RKIND + press_interp(iCell,5) = 850.0_RKIND + press_interp(iCell,6) = 925.0_RKIND + enddo + + if(.not.allocated(press_in)) allocate(press_in(nCells,nVertLevels)) + do iCell = 1, nCells + do k = 1, nVertLevels + kk = nVertLevels+1-k + press_in(iCell,kk) = pressure(k,iCell) + enddo + enddo + + if(.not.allocated(field_in)) allocate(field_in(nCells,nVertLevels)) + + if (NEED_TEMP) then + !... temperature: + do iCell = 1, nCells + do k = 1, nVertLevels + kk = nVertLevels+1-k + field_in(iCell,kk) = temperature(k,iCell) + enddo + enddo + call interp_tofixed_pressure(nCells,nVertLevels,nIntP,press_in,field_in,press_interp,field_interp) + temperature_200hPa(1:nCells) = field_interp(1:nCells,1) + temperature_250hPa(1:nCells) = field_interp(1:nCells,2) + temperature_500hPa(1:nCells) = field_interp(1:nCells,3) + temperature_700hPa(1:nCells) = field_interp(1:nCells,4) + temperature_850hPa(1:nCells) = field_interp(1:nCells,5) + temperature_925hPa(1:nCells) = field_interp(1:nCells,6) + ! write(0,*) '--- end interpolate temperature:' + end if + + + if (NEED_RELHUM) then + !... relative humidity: + do iCell = 1, nCells + do k = 1, nVertLevels + kk = nVertLevels+1-k + field_in(iCell,kk) = relhum(k,iCell) + enddo + enddo + call interp_tofixed_pressure(nCells,nVertLevels,nIntP,press_in,field_in,press_interp,field_interp) + relhum_200hPa(1:nCells) = field_interp(1:nCells,1) + relhum_250hPa(1:nCells) = field_interp(1:nCells,2) + relhum_500hPa(1:nCells) = field_interp(1:nCells,3) + relhum_700hPa(1:nCells) = field_interp(1:nCells,4) + relhum_850hPa(1:nCells) = field_interp(1:nCells,5) + relhum_925hPa(1:nCells) = field_interp(1:nCells,6) + ! write(0,*) '--- end interpolate relative humidity:' + end if + + if (NEED_DEWPOINT) then + !... dewpoint + do iCell = 1, nCells + do k = 1, nVertLevels + kk = nVertLevels+1-k + field_in(iCell,kk) = dewpoint(k,iCell) + enddo + enddo + call interp_tofixed_pressure(nCells,nVertLevels,nIntP,press_in,field_in,press_interp,field_interp) + dewpoint_200hPa(1:nCells) = field_interp(1:nCells,1) + dewpoint_250hPa(1:nCells) = field_interp(1:nCells,2) + dewpoint_500hPa(1:nCells) = field_interp(1:nCells,3) + dewpoint_700hPa(1:nCells) = field_interp(1:nCells,4) + dewpoint_850hPa(1:nCells) = field_interp(1:nCells,5) + dewpoint_925hPa(1:nCells) = field_interp(1:nCells,6) + ! write(0,*) '--- end interpolate relative humidity:' + end if + + if (NEED_UZONAL) then + !... u zonal wind: + do iCell = 1, nCells + do k = 1, nVertLevels + kk = nVertLevels+1-k + field_in(iCell,kk) = uzonal(k,iCell) + enddo + enddo + call interp_tofixed_pressure(nCells,nVertLevels,nIntP,press_in,field_in,press_interp,field_interp) + uzonal_200hPa(1:nCells) = field_interp(1:nCells,1) + uzonal_250hPa(1:nCells) = field_interp(1:nCells,2) + uzonal_500hPa(1:nCells) = field_interp(1:nCells,3) + uzonal_700hPa(1:nCells) = field_interp(1:nCells,4) + uzonal_850hPa(1:nCells) = field_interp(1:nCells,5) + uzonal_925hPa(1:nCells) = field_interp(1:nCells,6) + ! write(0,*) '--- end interpolate zonal wind:' + end if + + if (NEED_UMERIDIONAL) then + !... u meridional wind: + do iCell = 1, nCells + do k = 1, nVertLevels + kk = nVertLevels+1-k + field_in(iCell,kk) = umeridional(k,iCell) + enddo + enddo + call interp_tofixed_pressure(nCells,nVertLevels,nIntP,press_in,field_in,press_interp,field_interp) + umeridional_200hPa(1:nCells) = field_interp(1:nCells,1) + umeridional_250hPa(1:nCells) = field_interp(1:nCells,2) + umeridional_500hPa(1:nCells) = field_interp(1:nCells,3) + umeridional_700hPa(1:nCells) = field_interp(1:nCells,4) + umeridional_850hPa(1:nCells) = field_interp(1:nCells,5) + umeridional_925hPa(1:nCells) = field_interp(1:nCells,6) + ! write(0,*) '--- end interpolate meridional wind:' + end if + + if(allocated(field_in)) deallocate(field_in) + if(allocated(press_in)) deallocate(press_in) + + if (NEED_W .or. NEED_HEIGHT) then + !interpolation to fixed pressure levels for fields located at cells centers and at vertical + !velocity points: + if(.not.allocated(press_in)) allocate(press_in(nCells,nVertLevelsP1)) + do iCell = 1, nCells + do k = 1, nVertLevelsP1 + kk = nVertLevelsP1+1-k + press_in(iCell,kk) = pressure2(k,iCell) + enddo + enddo + + if(.not.allocated(field_in)) allocate(field_in(nCells,nVertLevelsP1)) + !... height: + do iCell = 1, nCells + do k = 1, nVertLevelsP1 + kk = nVertLevelsP1+1-k + field_in(iCell,kk) = height(k,iCell) + enddo + enddo + call interp_tofixed_pressure(nCells,nVertLevelsP1,nIntP,press_in,field_in,press_interp,field_interp) + height_200hPa(1:nCells) = field_interp(1:nCells,1) + height_250hPa(1:nCells) = field_interp(1:nCells,2) + height_500hPa(1:nCells) = field_interp(1:nCells,3) + height_700hPa(1:nCells) = field_interp(1:nCells,4) + height_850hPa(1:nCells) = field_interp(1:nCells,5) + height_925hPa(1:nCells) = field_interp(1:nCells,6) + ! write(0,*) '--- end interpolate height:' + + !... vertical velocity + do iCell = 1, nCells + do k = 1, nVertLevelsP1 + kk = nVertLevelsP1+1-k + field_in(iCell,kk) = vvel(k,iCell) + enddo + enddo + call interp_tofixed_pressure(nCells,nVertLevelsP1,nIntP,press_in,field_in,press_interp,field_interp) + w_200hPa(1:nCells) = field_interp(1:nCells,1) + w_250hPa(1:nCells) = field_interp(1:nCells,2) + w_500hPa(1:nCells) = field_interp(1:nCells,3) + w_700hPa(1:nCells) = field_interp(1:nCells,4) + w_850hPa(1:nCells) = field_interp(1:nCells,5) + w_925hPa(1:nCells) = field_interp(1:nCells,6) + + if(allocated(field_in)) deallocate(field_in) + if(allocated(press_in)) deallocate(press_in) + ! write(0,*) '--- end interpolate vertical velocity:' + end if + + if(allocated(field_interp)) deallocate(field_interp) + if(allocated(press_interp)) deallocate(press_interp) + + if (NEED_VORTICITY) then + !interpolation to fixed pressure levels for fields located at cell vertices and at mass points: + nIntP = 6 + if(.not.allocated(field_interp)) allocate(field_interp(nVertices,nIntP) ) + if(.not.allocated(press_interp)) allocate(press_interp(nVertices,nIntP) ) + do iVert = 1, nVertices + press_interp(iVert,1) = 200.0_RKIND + press_interp(iVert,2) = 250.0_RKIND + press_interp(iVert,3) = 500.0_RKIND + press_interp(iVert,4) = 700.0_RKIND + press_interp(iVert,5) = 850.0_RKIND + press_interp(iVert,6) = 925.0_RKIND + enddo + + if(.not.allocated(press_in)) allocate(press_in(nVertices,nVertLevels)) + do iVert = 1, nVertices + do k = 1, nVertLevels + kk = nVertLevels+1-k + press_in(iVert,kk) = pressure_v(k,iVert) + enddo + enddo + + if(.not.allocated(field_in)) allocate(field_in(nVertices,nVertLevels)) + !... relative vorticity: + do iVert = 1, nVertices + do k = 1, nVertLevels + kk = nVertLevels+1-k + field_in(iVert,kk) = vorticity(k,iVert) + enddo + enddo + call interp_tofixed_pressure(nVertices,nVertLevels,nIntP,press_in,field_in,press_interp,field_interp) + vorticity_200hPa(1:nVertices) = field_interp(1:nVertices,1) + vorticity_250hPa(1:nVertices) = field_interp(1:nVertices,2) + vorticity_500hPa(1:nVertices) = field_interp(1:nVertices,3) + vorticity_700hPa(1:nVertices) = field_interp(1:nVertices,4) + vorticity_850hPa(1:nVertices) = field_interp(1:nVertices,5) + vorticity_925hPa(1:nVertices) = field_interp(1:nVertices,6) + ! write(0,*) '--- end interpolate relative vorticity:' + + if(allocated(field_interp)) deallocate(field_interp) + if(allocated(press_interp)) deallocate(press_interp) + + if(allocated(field_in )) deallocate(field_in) + if(allocated(press_in )) deallocate(press_in) + end if + + if(allocated(pressureCp1) ) deallocate(pressureCp1 ) + if(allocated(pressure_v) ) deallocate(pressure_v ) + + if (need_mslp) then + !... compute SLP (requires temp, height, pressure, qvapor) + call compute_slp(nCells, nVertLevels, num_scalars, temperature, height, pressure, index_qv, scalars, mslp) + mslp(:) = mslp(:) * 100.0 ! Convert from hPa to Pa + !... alternative way + !do iCell = 1, nCells + ! mslp(iCell) = diag % surface_pressure % array(iCell) + 11.38*height(1,iCell) + ! mslp(iCell) = mslp(iCell)/100. + !enddo + end if + + + !!!!!!!!!!! Additional temperature levels for vortex tracking !!!!!!!!!!! + if (need_t_isobaric .or. need_meanT_500_300) then + + allocate(field_in(nCells, nVertLevels)) + allocate(press_in(nCells, nVertLevels)) + allocate(field_interp(nCells, nIsoLevelsT)) + allocate(press_interp(nCells, nIsoLevelsT)) + + do k=1,nIsoLevelsT + press_interp(:,k) = t_iso_levels(k) + end do + + ! Additional temperature levels for vortex tracking + do iCell=1,nCells + do k=1,nVertLevels + kk = nVertLevels+1-k + field_in(iCell,kk) = temperature(k,iCell) + end do + end do + + do iCell=1,nCells + do k=1,nVertLevels + kk = nVertLevels+1-k + press_in(iCell,kk) = pressure(k,iCell) * 100.0 + end do + end do + + if (need_t_isobaric) then + call interp_tofixed_pressure(nCells, nVertLevels, nIsoLevelsT, press_in, field_in, press_interp, field_interp) + + do k=1,nIsoLevelsT + t_isobaric(k,1:nCells) = field_interp(1:nCells,k) + end do + end if + + + !!!!!!!!!!! Calculate mean temperature in 500 hPa - 300 hPa layer !!!!!!!!!!! + + if (need_meanT_500_300) then + call compute_layer_mean(meanT_500_300, 50000.0, 30000.0, field_in, press_in) + end if + + + deallocate(field_in) + deallocate(field_interp) + deallocate(press_in) + deallocate(press_interp) + end if + + + !!!!!!!!!!! Additional height levels for vortex tracking !!!!!!!!!!! + if (need_z_isobaric) then + allocate(field_in(nCells, nVertLevelsP1)) + allocate(press_in(nCells, nVertLevelsP1)) + allocate(field_interp(nCells, nIsoLevelsZ)) + allocate(press_interp(nCells, nIsoLevelsZ)) + + do k=1,nIsoLevelsZ + press_interp(:,k) = z_iso_levels(k) + end do + + do iCell=1,nCells + do k=1,nVertLevelsP1 + kk = nVertLevelsP1+1-k + field_in(iCell,kk) = height(k,iCell) + end do + end do + + do iCell=1,nCells + do k=1,nVertLevelsP1 + kk = nVertLevelsP1+1-k + press_in(iCell,kk) = pressure2(k,iCell) * 100.0 + end do + end do + + call interp_tofixed_pressure(nCells, nVertLevelsP1, nIsoLevelsZ, press_in, field_in, press_interp, field_interp) + + do k=1,nIsoLevelsZ + z_isobaric(k,1:nCells) = field_interp(1:nCells,k) + end do + + deallocate(field_in) + deallocate(field_interp) + deallocate(press_in) + deallocate(press_interp) + end if + + if(allocated(temperature) ) deallocate(temperature ) + if(allocated(pressure2) ) deallocate(pressure2 ) + if(allocated(pressure) ) deallocate(pressure ) + if(allocated(dewpoint) ) deallocate(dewpoint ) + + !formats: + ! 201 format(i5,4(1x,e15.8)) + + end subroutine interp_diagnostics + + + !================================================================================================== + subroutine interp_tofixed_pressure(ncol,nlev_in,nlev_out,pres_in,field_in,pres_out,field_out) + !================================================================================================== + + !input arguments: + integer,intent(in):: ncol,nlev_in,nlev_out + + real(kind=RKIND),intent(in),dimension(ncol,nlev_in) :: pres_in,field_in + real(kind=RKIND),intent(in),dimension(ncol,nlev_out):: pres_out + + !output arguments: + real(kind=RKIND),intent(out),dimension(ncol,nlev_out):: field_out + + !local variables: + ! integer:: i1,i2,icol,k,kk + integer:: icol,k,kk + integer:: kkstart,kount + integer,dimension(ncol):: kupper + + real(kind=RKIND):: dpl,dpu + + !-------------------------------------------------------------------------------------------------- + + !formats: + ! 201 format(i5,8(1x,e15.8)) + + !write(0,*) + !write(0,*) '--- enter subroutine interp_tofixed_pressure:' + !write(0,*) '... ncol = ',ncol + !write(0,*) '... nlev_in = ',nlev_in + !write(0,*) '... nlev_out = ',nlev_out + !i1=1 ; i2=ncol + !do k = 1, nlev_in + ! write(0,201) k,pres_in(i1,k),field_in(i1,k),pres_in(i2,k),field_in(i2,k) + !enddo + !write(0,*) + + do icol = 1, ncol + kupper(icol) = 1 + enddo + + do k = 1, nlev_out + + kkstart = nlev_in + do icol = 1, ncol + kkstart = min0(kkstart,kupper(icol)) + enddo + kount = 0 + + do kk = kkstart, nlev_in-1 + do icol = 1, ncol + if(pres_out(icol,k).gt.pres_in(icol,kk).and.pres_out(icol,k).le.pres_in(icol,kk+1)) then + kupper(icol) = kk + kount = kount + 1 + ! write(0,201) kupper(icol),pres_out(icol,k),pres_in(icol,kk),pres_in(icol,kk+1) + endif + enddo + + if(kount.eq.ncol) then + do icol = 1, ncol + dpu = pres_out(icol,k) - pres_in(icol,kupper(icol)) + dpl = pres_in(icol,kupper(icol)+1) - pres_out(icol,k) + field_out(icol,k) = (field_in(icol,kupper(icol))*dpl & + + field_in(icol,kupper(icol)+1)*dpu)/(dpl + dpu) + end do + goto 35 + end if + enddo + + do icol = 1, ncol + if(pres_out(icol,k) .lt. pres_in(icol,1)) then + field_out(icol,k) = field_in(icol,1)*pres_out(icol,k)/pres_in(icol,1) + elseif(pres_out(icol,k) .gt. pres_in(icol,nlev_in)) then + field_out(icol,k) = field_in(icol,nlev_in) + else + dpu = pres_out(icol,k) - pres_in(icol,kupper(icol)) + dpl = pres_in(icol,kupper(icol)+1) - pres_out(icol,k) + field_out(icol,k) = (field_in(icol,kupper(icol))*dpl & + + field_in(icol,kupper(icol)+1)*dpu)/(dpl + dpu) + endif + enddo + + 35 continue + ! write(0,201) kupper(i1),pres_out(i1,k),pres_in(i1,kupper(i1)),pres_in(i1,kupper(i1)+1), & + ! field_out(i1,k),field_in(i1,kupper(i1)),field_in(i1,kupper(i1)+1) + ! write(0,201) kupper(i2),pres_out(i2,k),pres_in(i2,kupper(i2)),pres_in(i2,kupper(i2)+1), & + ! field_out(i2,k),field_in(i2,kupper(i2)),field_in(i2,kupper(i2)+1) + + enddo + + end subroutine interp_tofixed_pressure + + + subroutine compute_slp(ncol,nlev_in,nscalars,t,height,p,index_qv,scalars,slp) + + implicit none + + !input arguments: + integer, intent(in) :: ncol, nlev_in, nscalars + + !p: in mb + !t: in K + !scalars: in kg/kg + !height: in m + real(kind=RKIND), intent(in), dimension(nlev_in,ncol) :: p,t + real(kind=RKIND), intent(in), dimension(nlev_in+1,ncol) :: height + integer, intent(in) :: index_qv + real(kind=RKIND), intent(in), dimension(nscalars,nlev_in,ncol) :: scalars + + !output arguments: + real(kind=RKIND), intent(out), dimension(ncol) :: slp + + !local variables: + integer :: icol, k, kcount + integer :: klo, khi + + real(kind=RKIND) :: gamma, rr, grav + parameter (rr=287.0, grav=9.80616, gamma=0.0065) + + real(kind=RKIND) :: tc, pconst + parameter (tc=273.16+17.5, pconst=100.) + + logical mm5_test + parameter (mm5_test=.true.) + + integer, dimension(:), allocatable :: level + real(kind=RKIND), dimension(:), allocatable :: t_surf, t_msl + real(kind=RKIND) :: plo , phi , tlo, thi , zlo , zhi + real(kind=RKIND) :: p_at_pconst , t_at_pconst , z_at_pconst, z_half_lowest + + logical :: l1, l2, l3, found + + ! Find least zeta level that is PCONST Pa above the surface. We later use this + ! level to extrapolate a surface pressure and temperature, which is supposed + ! to reduce the effect of the diurnal heating cycle in the pressure field. + + if (.not.allocated(level)) allocate(level(ncol)) + if (.not.allocated(t_surf)) allocate(t_surf(ncol)) + if (.not.allocated(t_msl)) allocate(t_msl(ncol)) + + do icol = 1 , ncol + level(icol) = -1 + + k = 1 + found = .false. + do while ( (.not. found) .and. (k.le.nlev_in)) + if ( p(k,icol) .lt. p(1,icol)-pconst ) then + level(icol) = k + found = .true. + end if + k = k+1 + end do + + if ( level(icol) .eq. -1 ) then + write(0,*) 'Troubles finding level ', pconst,' above ground.' + write(0,*) 'Problems first occur at (',icol,')' + write(0,*) 'Surface pressure = ',p(1,icol),' hPa.' + write(0,*) '*** MSLP field will not be computed' + slp(:) = 0.0 + return + end if + + end do + + ! Get temperature PCONST hPa above surface. Use this to extrapolate + ! the temperature at the surface and down to sea level. + + do icol = 1 , ncol + + klo = max ( level(icol) - 1 , 1 ) + khi = min ( klo + 1 , nlev_in - 1 ) + + if ( klo .eq. khi ) then + write(0,*) 'Trapping levels are weird.' + write(0,*) 'icol = ',icol + write(0,*) 'klo = ',klo,', khi = ',khi, ': and they should not be equal.' + write(0,*) '*** MSLP field will not be computed' + slp(:) = 0.0 + return + end if + + plo = p(klo,icol) + phi = p(khi,icol) + tlo = t(klo,icol) * (1. + 0.608 * scalars(index_qv,klo,icol)) + thi = t(khi,icol) * (1. + 0.608 * scalars(index_qv,khi,icol)) + zlo = 0.5*(height(klo,icol)+height(klo+1,icol)) + zhi = 0.5*(height(khi,icol)+height(khi+1,icol)) + + p_at_pconst = p(1,icol) - pconst + t_at_pconst = thi-(thi-tlo)*log(p_at_pconst/phi)*log(plo/phi) + z_at_pconst = zhi-(zhi-zlo)*log(p_at_pconst/phi)*log(plo/phi) + + t_surf(icol) = t_at_pconst*(p(1,icol)/p_at_pconst)**(gamma*rr/grav) + t_msl(icol) = t_at_pconst+gamma*z_at_pconst + ! if (icol.eq.500) then + ! write(0,*) plo,phi,tlo,thi,zlo,zhi,p_at_pconst,t_at_pconst,z_at_pconst + ! write(0,*) t_surf(icol),t_msl(icol),level(icol),klo,khi + ! write(0,*) height(klo,icol),height(khi,icol),height(khi+1,icol) + ! endif + + end do + + ! If we follow a traditional computation, there is a correction to the sea level + ! temperature if both the surface and sea level temnperatures are *too* hot. + + if ( mm5_test ) then + kcount = 0 + do icol = 1 , ncol + l1 = t_msl(icol) .lt. tc + l2 = t_surf(icol) .le. tc + l3 = .not. l1 + if ( l2 .and. l3 ) then + t_msl(icol) = tc + else + t_msl(icol) = tc - 0.005*(t_surf(icol)-tc)**2 + kcount = kcount+1 + end if + end do + ! write(0,*) 'These number of points had t_msl adjusted ', kcount + end if + + do icol = 1 , ncol + z_half_lowest=0.5*(height(1,icol)+height(2,icol)) + slp(icol) = p(1,icol) * exp((2.*grav*z_half_lowest)/ & + (rr*(t_msl(icol)+t_surf(icol)))) + end do + + if (allocated(level)) deallocate(level) + if (allocated(t_surf)) deallocate(t_surf) + if (allocated(t_msl)) deallocate(t_msl) + + end subroutine compute_slp + + + !*********************************************************************** + ! + ! routine compute_layer_mean + ! + !> \brief Computes the mean of a field in the specified layer. + !> \author Michael Duda + !> \date 3 July 2014 + !> \details + !> Given a 3d pressure field, press_in(nCells,nVertLevels), with pressure + !> increasing with vertical index, and a 3d field, + !> field_in(nCells,nVertLevels) with levels in the same order, this routine + !> will compute the mean of the field for each column between pressures + !> p1 and p2. + ! + !----------------------------------------------------------------------- + subroutine compute_layer_mean(layerMean, p1, p2, field_in, press_in) + + implicit none + + real(kind=RKIND), dimension(:), intent(out) :: layerMean + real(kind=RKIND), intent(in) :: p1, p2 + real(kind=RKIND), dimension(:,:), intent(in) :: field_in + real(kind=RKIND), dimension(:,:), intent(in) :: press_in + + integer :: nCells, nVertLevels + integer :: iCell, k + integer :: k_bot, k_top + real(kind=RKIND) :: p_bot, p_top + real(kind=RKIND) :: wtop_p, wtop_m + real(kind=RKIND) :: wbot_p, wbot_m + real(kind=RKIND) :: wtotal, w + real(kind=RKIND) :: temp + + + ! + ! Get dimensions of input arrays + ! + nCells = size(field_in, 1) + nVertLevels = size(field_in, 2) + + + ! + ! Check that pressure is increasing with index + ! + if (press_in(1,1) > press_in(1,nVertLevels)) then + write(stderrUnit,*) 'Error in compute_layer_mean: pressure should increase with index' + layerMean(:) = 0.0 + return + end if + + + ! + ! Set the pressure at the top and bottom of the layer + ! + if (p1 < p2) then + p_top = p1 + p_bot = p2 + else + p_top = p2 + p_bot = p1 + end if + + + ! + ! For each column, compute the mean value of the field between p_bot and + ! p_top, with the field weighted by delta-p in each layer + ! + do iCell=1,nCells + k_bot = -1 + k_top = -1 + + ! Search for trapping levels: k_top is the index just above (or equal to) + ! p_top, and k_bot is the index just above (or equal to) p_bot. + do k=1,nVertLevels-1 + if (press_in(iCell,k) <= p_top .and. press_in(iCell,k+1) > p_top) then + k_top = k + wtop_p = (p_top - press_in(iCell,k)) / (press_in(iCell,k+1) - press_in(iCell,k)) + wtop_m = (press_in(iCell,k+1) - p_top) / (press_in(iCell,k+1) - press_in(iCell,k)) + end if + if (press_in(iCell,k) <= p_bot .and. press_in(iCell,k+1) > p_bot) then + k_bot = k + wbot_m = (p_bot - press_in(iCell,k)) / (press_in(iCell,k+1) - press_in(iCell,k)) + wbot_p = (press_in(iCell,k+1) - p_bot) / (press_in(iCell,k+1) - press_in(iCell,k)) + end if + end do + + if (k_top == -1 .or. k_bot == -1) then ! Layer intersects top or bottom boundary + + layerMean(iCell) = 0.0 + + else if (k_top == k_bot) then ! Layer lies entirely within a single model layer + + layerMean(iCell) = wtop_m * field_in(iCell,k_top) + wtop_p * field_in(iCell,k_top+1) + layerMean(iCell) = layerMean(iCell) + wbot_m * field_in(iCell,k_bot) + wbot_p * field_in(iCell,k_bot+1) + layerMean(iCell) = 0.5 * layerMean(iCell) + + else + + ! First layer: from p_top down to press_in(iCell,k_top+1) + wtotal = press_in(iCell,k_top+1) - p_top + temp = wtop_m * field_in(iCell,k_top) + wtop_p * field_in(iCell,k_top+1) + layerMean(iCell) = wtotal * 0.5 * (field_in(iCell,k_top+1) + temp) + + ! Middle layers + do k=k_top+1,k_bot-1 + w = press_in(iCell,k+1) - press_in(iCell,k) + wtotal = wtotal + w + layerMean(iCell) = layerMean(iCell) + w * 0.5 * (field_in(iCell,k) + field_in(iCell,k+1)) + end do + + ! Last layer: from press_in(iCell,k_bot) down to p_bot + w = p_bot - press_in(iCell,k_bot) + wtotal = wtotal + w + temp = wbot_m * field_in(iCell,k_bot) + wbot_p * field_in(iCell,k_bot+1) + layerMean(iCell) = layerMean(iCell) + w * 0.5 * (field_in(iCell,k_bot) + temp) + + layerMean(iCell) = layerMean(iCell) / wtotal + end if + + end do + + end subroutine compute_layer_mean + +end module isobaric_diagnostics diff --git a/src/core_atmosphere/diagnostics/mpas_atm_diagnostic_template.F b/src/core_atmosphere/diagnostics/mpas_atm_diagnostic_template.F new file mode 100644 index 0000000000..85c4876633 --- /dev/null +++ b/src/core_atmosphere/diagnostics/mpas_atm_diagnostic_template.F @@ -0,0 +1,137 @@ +! Copyright (c) 2016, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +module diagnostic_template + + use mpas_derived_types, only : MPAS_pool_type, MPAS_clock_type + + type (MPAS_pool_type), pointer :: mesh + type (MPAS_clock_type), pointer :: clock + + public :: diagnostic_template_setup, & + diagnostic_template_update, & + diagnostic_template_compute, & + diagnostic_template_reset, & + diagnostic_template_cleanup + + private + + + contains + + + !----------------------------------------------------------------------- + ! routine diagnostic_template_setup + ! + !> \brief Initialize the diagnostic + !> \author + !> \date + !> \details + !> Initialize the diagnostic + ! + !----------------------------------------------------------------------- + subroutine diagnostic_template_setup(configs, all_pools, simulation_clock) + + use mpas_derived_types, only : MPAS_pool_type, MPAS_clock_type + use mpas_pool_routines, only : mpas_pool_get_subpool + + implicit none + + type (MPAS_pool_type), pointer :: configs + type (MPAS_pool_type), pointer :: all_pools + type (MPAS_clock_type), pointer :: simulation_clock + + ! Perform initialization, memory allocation, etc. + + ! Also, save pointers to any pools that will be used by this diagnostic + ! E.g., + call mpas_pool_get_subpool(all_pools, 'mesh', mesh) + + clock => simulation_clock + + end subroutine diagnostic_template_setup + + + !----------------------------------------------------------------------- + ! routine diagnostic_template_update + ! + !> \brief Handle diagnostic calculation needed after each timestep + !> \author + !> \date + !> \details + !> Handle diagnostic calculation needed after each timestep + ! + !----------------------------------------------------------------------- + subroutine diagnostic_template_update() + + implicit none + + ! Called at the end of every timestep + ! Update extrema, accumulations, etc. + + end subroutine diagnostic_template_update + + + !----------------------------------------------------------------------- + ! routine diagnostic_template_compute + ! + !> \brief Compute diagnostic before model output is written + !> \author + !> \date + !> \details + !> Compute diagnostic before model output is written + ! + !----------------------------------------------------------------------- + subroutine diagnostic_template_compute() + + implicit none + + ! Called immediately before diagnostics will be written + ! Compute the diagnostic + + end subroutine diagnostic_template_compute + + + !----------------------------------------------------------------------- + ! routine diagnostic_template_reset + ! + !> \brief Reset diagnostic after it has been written + !> \author + !> \date + !> \details + !> Reset diagnostic after it has been written + ! + !----------------------------------------------------------------------- + subroutine diagnostic_template_reset() + + implicit none + + ! Called immediately after diagnostics have been written + ! Reset counters, accumulations, etc. + + end subroutine diagnostic_template_reset + + + !----------------------------------------------------------------------- + ! routine diagnostic_template_cleanup + ! + !> \brief Finalizes diagnostic + !> \author Michael Duda + !> \date 6 September 2016 + !> \details + !> Finalizes diagnostic + ! + !----------------------------------------------------------------------- + subroutine diagnostic_template_cleanup() + + implicit none + + ! Deallocate scratch arrays, etc. + + end subroutine diagnostic_template_cleanup + +end module diagnostic_template diff --git a/src/core_atmosphere/diagnostics/mpas_atm_diagnostics_manager.F b/src/core_atmosphere/diagnostics/mpas_atm_diagnostics_manager.F new file mode 100644 index 0000000000..15c5c57c67 --- /dev/null +++ b/src/core_atmosphere/diagnostics/mpas_atm_diagnostics_manager.F @@ -0,0 +1,173 @@ +! Copyright (c) 2016, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +module mpas_atm_diagnostics_manager + + use mpas_io_units, only : stderrUnit + + private + + public :: mpas_atm_diag_setup, & + mpas_atm_diag_update, & + mpas_atm_diag_compute, & + mpas_atm_diag_reset, & + mpas_atm_diag_cleanup + + contains + + + !----------------------------------------------------------------------- + ! routine MPAS_atm_diag_setup + ! + !> \brief Initialize the diagnostics manager and all diagnostics + !> \author Michael Duda + !> \date 6 September 2016 + !> \details + !> Initialize the diagnostics manager and all diagnostics. + ! + !----------------------------------------------------------------------- + subroutine mpas_atm_diag_setup(stream_mgr, configs, structs, clock, dminfo) + + use mpas_atm_diagnostics_utils, only : mpas_atm_diag_utils_init + use mpas_derived_types, only : MPAS_streamManager_type, MPAS_pool_type, MPAS_clock_type, dm_info + use diagnostic_template, only : diagnostic_template_setup + use isobaric_diagnostics, only : isobaric_diagnostics_setup + use convective_diagnostics, only : convective_diagnostics_setup + use pv_diagnostics, only : pv_diagnostics_setup + use soundings, only : soundings_setup + + implicit none + + type (MPAS_streamManager_type), target, intent(inout) :: stream_mgr + type (MPAS_pool_type), pointer :: configs + type (MPAS_pool_type), pointer :: structs + type (MPAS_clock_type), pointer :: clock + type (dm_info), intent(in) :: dminfo + + + ! + ! Prepare the diagnostics utilities module for later use by diagnostics + ! + call mpas_atm_diag_utils_init(stream_mgr) + + call diagnostic_template_setup(configs, structs, clock) + call isobaric_diagnostics_setup(structs, clock) + call convective_diagnostics_setup(structs, clock) + call pv_diagnostics_setup(structs, clock) + call soundings_setup(configs, structs, clock, dminfo) + + end subroutine mpas_atm_diag_setup + + + !----------------------------------------------------------------------- + ! routine MPAS_atm_diag_update + ! + !> \brief Handle diagnostics accumulation at the end of each timestep + !> \author Michael Duda + !> \date 6 September 2016 + !> \details + !> MPAS_atm_diag_update. + ! + !----------------------------------------------------------------------- + subroutine mpas_atm_diag_update() + + use diagnostic_template, only : diagnostic_template_update + use convective_diagnostics, only : convective_diagnostics_update + + implicit none + + + call diagnostic_template_update() + call convective_diagnostics_update() + + end subroutine mpas_atm_diag_update + + + !----------------------------------------------------------------------- + ! routine MPAS_atm_diag_compute + ! + !> \brief Compute diagnostics before they are written to output streams + !> \author Michael Duda + !> \date 6 September 2016 + !> \details + !> MPAS_atm_diag_compute. + ! + !----------------------------------------------------------------------- + subroutine mpas_atm_diag_compute() + + use diagnostic_template, only : diagnostic_template_compute + use isobaric_diagnostics, only : isobaric_diagnostics_compute + use convective_diagnostics, only : convective_diagnostics_compute + use pv_diagnostics, only : pv_diagnostics_compute + use soundings, only : soundings_compute + + implicit none + + + call diagnostic_template_compute() + call isobaric_diagnostics_compute() + call convective_diagnostics_compute() + call pv_diagnostics_compute() + call soundings_compute() + + end subroutine mpas_atm_diag_compute + + + !----------------------------------------------------------------------- + ! routine MPAS_atm_diag_reset + ! + !> \brief Resets a diagnostic after it has been computed and written + !> \author Michael Duda + !> \date 6 September 2016 + !> \details + !> MPAS_atm_diag_reset. + ! + !----------------------------------------------------------------------- + subroutine mpas_atm_diag_reset() + + use diagnostic_template, only : diagnostic_template_reset + use convective_diagnostics, only : convective_diagnostics_reset + + implicit none + + + call diagnostic_template_reset() + call convective_diagnostics_reset() + + end subroutine mpas_atm_diag_reset + + + !----------------------------------------------------------------------- + ! routine MPAS_atm_diag_cleanup + ! + !> \brief Finalizes diagnostics manager and all diagnostics + !> \author Michael Duda + !> \date 6 September 2016 + !> \details + !> Finalizes diagnostics manager and all diagnostics. + ! + !----------------------------------------------------------------------- + subroutine mpas_atm_diag_cleanup() + + use mpas_atm_diagnostics_utils, only : mpas_atm_diag_utils_finalize + use diagnostic_template, only : diagnostic_template_cleanup + use soundings, only : soundings_cleanup + + implicit none + + + call diagnostic_template_cleanup() + call soundings_cleanup() + + ! + ! Take care of any needed cleanup in the diagnostics utility module + ! + call mpas_atm_diag_utils_finalize() + + end subroutine mpas_atm_diag_cleanup + +end module mpas_atm_diagnostics_manager diff --git a/src/core_atmosphere/diagnostics/mpas_atm_diagnostics_utils.F b/src/core_atmosphere/diagnostics/mpas_atm_diagnostics_utils.F new file mode 100644 index 0000000000..b29a93ebb0 --- /dev/null +++ b/src/core_atmosphere/diagnostics/mpas_atm_diagnostics_utils.F @@ -0,0 +1,235 @@ +! Copyright (c) 2016, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +module mpas_atm_diagnostics_utils + + use mpas_derived_types, only : MPAS_streamManager_type + + private + + public :: mpas_atm_diag_utils_init, & + mpas_atm_diag_utils_finalize, & + mpas_field_will_be_written, & + mpas_stream_inclusion_count + + + type (MPAS_streamManager_type), pointer :: streamManager + + contains + + + !----------------------------------------------------------------------- + ! routine MPAS_atm_diag_utils_init + ! + !> \brief Prepares diagnostics utilities for use by diagnostics modules + !> \author Michael Duda + !> \date 12 October 2016 + !> \details + !> This routine prepares the diagnostics utilities for use by diagnostics + !> modules. + ! + !----------------------------------------------------------------------- + subroutine mpas_atm_diag_utils_init(stream_mgr) + + use mpas_derived_types, only : MPAS_streamManager_type + + implicit none + + type (MPAS_streamManager_type), target, intent(inout) :: stream_mgr + + streamManager => stream_mgr + + end subroutine mpas_atm_diag_utils_init + + + !----------------------------------------------------------------------- + ! routine MPAS_atm_diag_utils_finalize + ! + !> \brief Performs cleanup after diagnostics utilities will no longer be used + !> \author Michael Duda + !> \date 12 October 2016 + !> \details + !> This routine performs cleanup after diagnostics utilities will no + !> longer be used + ! + !----------------------------------------------------------------------- + subroutine mpas_atm_diag_utils_finalize() + + implicit none + + ! Nothing to do here at present... + + end subroutine mpas_atm_diag_utils_finalize + + + !----------------------------------------------------------------------- + ! routine MPAS_field_will_be_written + ! + !> \brief Decide if a field will be written in next call to mpas_stream_mgr_write + !> \author Michael Duda + !> \date 12 October 2016 + !> \details + !> This function queries the stream manager to see whether there are any + !> streams that contain the field 'fieldName' and whose output alarms + !> are also ringing. If so, the function returns .true.. + !> The assumption is that, between the call to this function and the next + !> call to write all streams with mpas_stream_mgr_write(), the stream + !> (or streams) containing the named field will not have their alarms + !> externally reset. + ! + !----------------------------------------------------------------------- + logical function mpas_field_will_be_written(fieldName) + + use mpas_kind_types, only : StrKIND + use mpas_derived_types, only : MPAS_STREAM_OUTPUT, MPAS_STREAM_INPUT_OUTPUT + use mpas_stream_manager, only : mpas_stream_mgr_begin_iteration, mpas_stream_mgr_get_next_stream, & + MPAS_stream_mgr_ringing_alarms, mpas_stream_mgr_get_next_field + + implicit none + + character(len=*), intent(in) :: fieldName + + character (len=StrKIND) :: streamNameItr + character (len=StrKIND) :: fieldNameItr + integer :: streamDirection + logical :: streamActive + logical :: fieldActive + integer :: ierr + + mpas_field_will_be_written = .false. + + call mpas_stream_mgr_begin_iteration(streamManager) + do while (mpas_stream_mgr_get_next_stream(streamManager, streamID = streamNameItr, & + directionProperty = streamDirection, activeProperty = streamActive)) + + if (streamActive .and. ( streamDirection == MPAS_STREAM_OUTPUT .or. streamDirection == MPAS_STREAM_INPUT_OUTPUT )) then + + if (MPAS_stream_mgr_ringing_alarms(streamManager, streamID=streamNameItr, & + direction=MPAS_STREAM_OUTPUT, ierr=ierr)) then + + call mpas_stream_mgr_begin_iteration(streamManager, streamID=streamNameItr) + do while (mpas_stream_mgr_get_next_field(streamManager, streamNameItr, fieldNameItr, isActive=fieldActive)) + + if (fieldActive .and. (fieldNameItr == fieldName)) then + mpas_field_will_be_written = .true. + return + end if + + end do + end if + + end if + + end do + + end function mpas_field_will_be_written + + + !----------------------------------------------------------------------- + ! routine MPAS_stream_inclusion_count + ! + !> \brief Returns the number of streams containing the specified field + !> \author Michael Duda + !> \date 18 October 2016 + !> \details + !> This function queries the stream manager to determine how many streams + !> contain the specified field. The optional argument 'direction' can be + !> used to limit the count to only input streams, only output streams, or + !> both input and output streams. By default, the function only counts + !> streams that have a non-'none' input/output interval, but even streams + !> with input_interval or output_interval equal to 'none' can be considered + !> by setting the optional argument 'includeInactive' to .true.. + ! + !----------------------------------------------------------------------- + integer function mpas_stream_inclusion_count(fieldName, direction, includeInactive) + + use mpas_kind_types, only : StrKIND + use mpas_derived_types, only : MPAS_STREAM_INPUT, MPAS_STREAM_OUTPUT, MPAS_STREAM_INPUT_OUTPUT, & + MPAS_STREAM_PROPERTY_RECORD_INTV + use mpas_stream_manager, only : mpas_stream_mgr_begin_iteration, mpas_stream_mgr_get_next_stream, & + MPAS_stream_mgr_ringing_alarms, mpas_stream_mgr_get_next_field, & + mpas_stream_mgr_get_property + + implicit none + + character(len=*), intent(in) :: fieldName + integer, intent(in), optional :: direction + logical, intent(in), optional :: includeInactive + + character (len=StrKIND) :: streamNameItr + character (len=StrKIND) :: fieldNameItr + character (len=StrKIND) :: recordIntervalIn + character (len=StrKIND) :: recordIntervalOut + integer :: streamDirection + logical :: streamActive + logical :: fieldActive + integer :: ierr + + integer :: local_direction + logical :: local_includeInactive + + + if (present(direction)) then + local_direction = direction + else + local_direction = MPAS_STREAM_INPUT_OUTPUT + end if + + if (present(includeInactive)) then + local_includeInactive = includeInactive + else + local_includeInactive = .false. + end if + + + mpas_stream_inclusion_count = 0 + + call mpas_stream_mgr_begin_iteration(streamManager) + STREAM_LOOP: do while (mpas_stream_mgr_get_next_stream(streamManager, streamID = streamNameItr, & + directionProperty = streamDirection, activeProperty = streamActive)) + + call MPAS_stream_mgr_get_property(streamManager, trim(streamNameItr), MPAS_STREAM_PROPERTY_RECORD_INTV, & + recordIntervalIn, direction=MPAS_STREAM_INPUT, ierr=ierr) + + call MPAS_stream_mgr_get_property(streamManager, trim(streamNameItr), MPAS_STREAM_PROPERTY_RECORD_INTV, & + recordIntervalOut, direction=MPAS_STREAM_OUTPUT, ierr=ierr) + + ! Determine whether this stream is "active" for the purposes of consideration here + if (.not. local_includeInactive) then + if (streamActive) then + streamActive = ((local_direction == MPAS_STREAM_INPUT .and. trim(recordIntervalIn) /= 'none') & + .or. (local_direction == MPAS_STREAM_OUTPUT .and. trim(recordIntervalOut) /= 'none') & + .or. (local_direction == MPAS_STREAM_INPUT_OUTPUT .and. ((trim(recordIntervalIn) /= 'none') & + .or. (trim(recordIntervalOut) /= 'none')))) + end if + else + streamActive = .true. + end if + + if (streamActive .and. ((local_direction == MPAS_STREAM_INPUT .and. streamDirection == MPAS_STREAM_INPUT) & + .or. (local_direction == MPAS_STREAM_OUTPUT .and. streamDirection == MPAS_STREAM_OUTPUT) & + .or. (local_direction == MPAS_STREAM_INPUT_OUTPUT) & + .or. (streamDirection == MPAS_STREAM_INPUT_OUTPUT))) then + + call mpas_stream_mgr_begin_iteration(streamManager, streamID=streamNameItr) + do while (mpas_stream_mgr_get_next_field(streamManager, streamNameItr, fieldNameItr, isActive=fieldActive)) + + if (fieldActive .and. (fieldNameItr == fieldName)) then + mpas_stream_inclusion_count = mpas_stream_inclusion_count + 1 + cycle STREAM_LOOP + end if + + end do + + end if + + end do STREAM_LOOP + + end function mpas_stream_inclusion_count + + +end module mpas_atm_diagnostics_utils diff --git a/src/core_atmosphere/diagnostics/pv_diagnostics.F b/src/core_atmosphere/diagnostics/pv_diagnostics.F new file mode 100644 index 0000000000..0d48c036ea --- /dev/null +++ b/src/core_atmosphere/diagnostics/pv_diagnostics.F @@ -0,0 +1,1615 @@ +! Copyright (c) 2016, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +module pv_diagnostics + + use mpas_derived_types, only : MPAS_pool_type, MPAS_clock_type + use mpas_kind_types, only : RKIND + + type (MPAS_pool_type), pointer :: mesh + type (MPAS_pool_type), pointer :: state + type (MPAS_pool_type), pointer :: diag + type (MPAS_pool_type), pointer :: tend + type (MPAS_pool_type), pointer :: tend_physics + + type (MPAS_clock_type), pointer :: clock + + public :: pv_diagnostics_setup, & + pv_diagnostics_compute + + private + + logical :: need_ertel_pv, need_u_pv, need_v_pv, need_theta_pv, need_vort_pv, need_iLev_DT, & + need_tend_lw, need_tend_sw, need_tend_bl, need_tend_cu, need_tend_mix, need_tend_mp, & + need_tend_diab, need_tend_fric, need_tend_diab_pv, need_tend_fric_pv, need_dtheta_mp + + + contains + + + !----------------------------------------------------------------------- + ! routine pv_diagnostics_setup + ! + !> \brief Initialize the diagnostic + !> \author + !> \date + !> \details + !> Initialize the diagnostic + ! + !----------------------------------------------------------------------- + subroutine pv_diagnostics_setup(all_pools, simulation_clock) + + use mpas_derived_types, only : MPAS_pool_type, MPAS_clock_type, MPAS_STREAM_OUTPUT, MPAS_STREAM_INPUT, & + MPAS_STREAM_INPUT_OUTPUT + use mpas_derived_types, only : MPAS_pool_type, MPAS_clock_type + use mpas_pool_routines, only : mpas_pool_get_subpool + + implicit none + + type (MPAS_pool_type), pointer :: all_pools + type (MPAS_clock_type), pointer :: simulation_clock + + + call mpas_pool_get_subpool(all_pools, 'mesh', mesh) + call mpas_pool_get_subpool(all_pools, 'state', state) + call mpas_pool_get_subpool(all_pools, 'diag', diag) + call mpas_pool_get_subpool(all_pools, 'tend', tend) + call mpas_pool_get_subpool(all_pools, 'tend_physics', tend_physics) + + clock => simulation_clock + + end subroutine pv_diagnostics_setup + + + !----------------------------------------------------------------------- + ! routine pv_diagnostics_compute + ! + !> \brief Compute diagnostic before model output is written + !> \author + !> \date + !> \details + !> Compute diagnostic before model output is written + ! + !----------------------------------------------------------------------- + subroutine pv_diagnostics_compute() + + use mpas_atm_diagnostics_utils, only : MPAS_field_will_be_written + + implicit none + + logical :: need_any_diags, need_any_budget + + need_any_diags = .false. + need_any_budget = .false. + + + need_ertel_pv = MPAS_field_will_be_written('ertel_pv') + need_any_diags = need_any_diags .or. need_ertel_pv + need_u_pv = MPAS_field_will_be_written('u_pv') + need_any_diags = need_any_diags .or. need_u_pv + need_v_pv = MPAS_field_will_be_written('v_pv') + need_any_diags = need_any_diags .or. need_v_pv + need_theta_pv = MPAS_field_will_be_written('theta_pv') + need_any_diags = need_any_diags .or. need_theta_pv + need_vort_pv = MPAS_field_will_be_written('vort_pv') + need_any_diags = need_any_diags .or. need_vort_pv + need_iLev_DT = MPAS_field_will_be_written('iLev_DT') + need_any_diags = need_any_diags .or. need_iLev_DT + + need_tend_lw = MPAS_field_will_be_written('depv_dt_lw') + need_any_diags = need_any_diags .or. need_tend_lw + need_any_budget = need_any_budget .or. need_tend_lw + need_tend_sw = MPAS_field_will_be_written('depv_dt_sw') + need_any_diags = need_any_diags .or. need_tend_sw + need_any_budget = need_any_budget .or. need_tend_sw + need_tend_bl = MPAS_field_will_be_written('depv_dt_bl') + need_any_diags = need_any_diags .or. need_tend_bl + need_any_budget = need_any_budget .or. need_tend_bl + need_tend_cu = MPAS_field_will_be_written('depv_dt_cu') + need_any_diags = need_any_diags .or. need_tend_cu + need_any_budget = need_any_budget .or. need_tend_cu + need_tend_mix = MPAS_field_will_be_written('depv_dt_mix') + need_any_diags = need_any_diags .or. need_tend_mix + need_any_budget = need_any_budget .or. need_tend_mix + need_dtheta_mp = MPAS_field_will_be_written('dtheta_dt_mp') + need_any_diags = need_any_diags .or. need_dtheta_mp + need_any_budget = need_any_budget .or. need_dtheta_mp + need_tend_mp = MPAS_field_will_be_written('depv_dt_mp') + need_any_diags = need_any_diags .or. need_tend_mp + need_any_budget = need_any_budget .or. need_tend_mp + need_tend_diab = MPAS_field_will_be_written('depv_dt_diab') + need_any_diags = need_any_diags .or. need_tend_diab + need_any_budget = need_any_budget .or. need_tend_diab + need_tend_fric = MPAS_field_will_be_written('depv_dt_fric') + need_any_diags = need_any_diags .or. need_tend_fric + need_any_budget = need_any_budget .or. need_tend_fric + need_tend_diab_pv = MPAS_field_will_be_written('depv_dt_diab_pv') + need_any_diags = need_any_diags .or. need_tend_diab_pv + need_any_budget = need_any_budget .or. need_tend_diab_pv + need_tend_fric_pv = MPAS_field_will_be_written('depv_dt_fric_pv') + need_any_diags = need_any_diags .or. need_tend_fric_pv + need_any_budget = need_any_budget .or. need_tend_fric_pv + + if (need_any_diags) then + call atm_compute_pv_diagnostics(state, 1, diag, mesh) + end if + if (need_any_budget) then + call atm_compute_pvBudget_diagnostics(state, 1, diag, mesh, tend, tend_physics) + end if + + end subroutine pv_diagnostics_compute + + + real(kind=RKIND) function dotProduct(a, b, sz) + + implicit none + + real(kind=RKIND), dimension(:), intent(in) :: a, b + integer, intent(in) :: sz + + integer :: i + real(kind=RKIND) :: rsum + + rsum = 0.0_RKIND + + do i=1,sz + rsum = rsum + a(i)*b(i) + end do + + dotProduct = rsum + end function dotProduct + + integer function elementIndexInArray(val, array, sz) + + implicit none + + integer, intent(in) :: val + integer, dimension(:), intent(in) :: array + integer, intent(in) :: sz + + integer :: i, ind + ind = -1 + do i=1,sz + if (array(i)==val) then + ind = i + elementIndexInArray = ind !This returns, right? + exit !just in case :) + end if + end do + elementIndexInArray = ind + end function elementIndexInArray + + real(kind=RKIND) function formErtelPV(gradxu, gradtheta, density, unitX, unitY, unitZ) + + use mpas_constants, only : omega_e => omega + + implicit none + + real(kind=RKIND), dimension(3), intent(inout) :: gradxu + real(kind=RKIND), dimension(3), intent(in) :: gradtheta + real(kind=RKIND), intent(in) :: density + real(kind=RKIND), dimension(3), intent(in) :: unitX, unitY, unitZ + + real(kind=RKIND) :: epv, eVort + real(kind=RKIND), dimension(3) :: eVortDir, eVortComponents + + !earth vorticity is in +z-direction in global Cartesian space + eVort = 2.0 * omega_e + eVortDir(1) = 0.0_RKIND + eVortDir(2) = 0.0_RKIND + eVortDir(3) = eVort + + eVortComponents(1) = dotProduct(eVortDir, unitX,3) + eVortComponents(2) = dotProduct(eVortDir, unitY,3) + eVortComponents(3) = dotProduct(eVortDir, unitZ,3) + + gradxu(:) = gradxu(:) + eVortComponents(:) + + epv = dotProduct(gradxu, gradtheta,3) / density + + epv = epv * 1.0e6 !SI to PVUs + + formErtelPV = epv + end function formErtelPV + + subroutine local2FullVorticity(gradxu, unitX, unitY, unitZ) + !given gradxu, return gradxu+earthVort + + use mpas_constants, only : omega_e => omega + + implicit none + + real(kind=RKIND), dimension(3), intent(inout) :: gradxu + real(kind=RKIND), dimension(3), intent(in) :: unitX, unitY, unitZ + + real(kind=RKIND) :: eVort + real(kind=RKIND), dimension(3) :: eVortDir, eVortComponents + + !earth vorticity is in z-direction in global Cartesian space + eVort = 2.0 * omega_e + eVortDir(1) = 0.0_RKIND + eVortDir(2) = 0.0_RKIND + eVortDir(3) = eVort + + eVortComponents(1) = dotProduct(eVortDir, unitX,3) + eVortComponents(2) = dotProduct(eVortDir, unitY,3) + eVortComponents(3) = dotProduct(eVortDir, unitZ,3) + + gradxu(:) = gradxu(:) + eVortComponents(:) + end subroutine local2FullVorticity + + real(kind=RKIND) function calc_verticalVorticity_cell(c0, level, nVerticesOnCell, verticesOnCell, cellsOnVertex, & + kiteAreasOnVertex, areaCell, vVortVertex) + !area weighted average of vorticity at vertices to cell center for the specified cell + ! + implicit none + + real(kind=RKIND), intent(in) :: areaCell + integer, intent(in) :: c0, level, nVerticesOnCell + integer, dimension(:,:), intent(in) :: verticesOnCell, cellsOnVertex + real(kind=RKIND), dimension(:,:), intent(in) :: kiteAreasOnVertex, vVortVertex + + real(kind=RKIND) :: vVortCell + integer :: i, iVertex, cellIndOnVertex + + vVortCell = 0.0_RKIND + do i = 1,nVerticesOnCell + iVertex = verticesOnCell(i,c0) + cellIndOnVertex = elementIndexInArray(c0, cellsOnVertex(:,iVertex), 3) + vVortCell = vVortCell + kiteAreasOnVertex(cellIndOnVertex, iVertex)*vVortVertex(level, iVertex)/areaCell + end do + + calc_verticalVorticity_cell = vVortCell + end function calc_verticalVorticity_cell + + subroutine coordinateSystem_cell(cellTangentPlane, localVerticalUnitVectors, c0, xyz) + + implicit none + + real(kind=RKIND), dimension(3,2,*), intent(in) :: cellTangentPlane + real(kind=RKIND), dimension(3,*), intent(in) :: localVerticalUnitVectors + integer, intent(in) :: c0 + real(kind=RKIND), dimension(3,3), intent(out) :: xyz + + integer :: i + + xyz(:,1) = cellTangentPlane(:,1,c0) !are these guaranteed unit vectors? + xyz(:,2) = cellTangentPlane(:,2,c0) + xyz(:,3) = localVerticalUnitVectors(:,c0) + do i=1,2 + call normalizeVector(xyz(:,i), 3) + end do + end subroutine coordinateSystem_cell + + real(kind=RKIND) function fluxSign(c0, iEdge, cellsOnEdge) + + !For finite volume computations, we'll use a normal pointing out of the cell + implicit none + + integer, intent(in) :: c0 + integer, intent(in) :: iEdge + integer, dimension(:,:), intent(in) :: cellsOnEdge + + if (c0 == cellsOnEdge(1,iEdge)) then + fluxSign = 1.0_RKIND + else + fluxSign = -1.0_RKIND + end if + end function fluxSign + + real(kind=RKIND) function calc_heightCellCenter(c0, level, zgrid) + + implicit none + + integer, intent(in) :: c0, level + real(kind=RKIND), dimension(:,:), intent(in) :: zgrid + + calc_heightCellCenter = 0.5*(zgrid(level,c0)+zgrid(level+1,c0)) + end function calc_heightCellCenter + + real(kind=RKIND) function calc_heightVerticalEdge(c0, c1, level, zgrid) + + implicit none + + integer, intent(in) :: c0, c1, level + real(kind=RKIND), dimension(:,:), intent(in) :: zgrid + + real(kind=RKIND) :: hTop, hBottom + + hTop = .5*(zgrid(level+1,c0)+zgrid(level+1,c1)) + hBottom = .5*(zgrid(level,c0)+zgrid(level,c1)) + + calc_heightVerticalEdge = hTop-hBottom + end function calc_heightVerticalEdge + + subroutine normalizeVector(vals, sz) + !normalize a vector to unit magnitude + implicit none + + real (kind=RKIND), dimension(:), intent(inout) :: vals + integer, intent(in) :: sz + + integer :: i + real (kind=RKIND) :: mag + + mag = 0.0_RKIND !sqrt(sum(squares)) + do i=1,sz + mag = mag+vals(i)*vals(i) + end do + mag = sqrt(mag) + vals(:) = vals(:)/mag + end subroutine normalizeVector + + real(kind=RKIND) function calcVolumeCell(areaCell, nEdges, hEdge) + + implicit none + + integer, intent(in) :: nEdges + real(kind=RKIND), intent(in) :: areaCell + real(kind=RKIND), dimension(nEdges), intent(in) :: hEdge + + integer :: i + real(kind=RKIND) :: avgHt, vol + + avgHt = 0.0_RKIND + do i=1,nEdges + avgHt = avgHt + hEdge(i) + end do + avgHt = avgHt/nEdges + + vol = areaCell*avgHt + calcVolumeCell = vol + end function calcVolumeCell + + real(kind=RKIND) function calc_horizDeriv_fv(valEdges, nNbrs, dvEdge, dhEdge, & + normalEdge, unitDeriv, volumeCell) + !normals to edges point out of cell + implicit none + + integer, intent(in) :: nNbrs + real(kind=RKIND), dimension(:), intent(in) :: valEdges, dvEdge, dhEdge + real(kind=RKIND), dimension(3,nNbrs), intent(in) :: normalEdge + real(kind=RKIND), dimension(3), intent(in) :: unitDeriv + real(kind=RKIND), intent(in) :: volumeCell + + integer :: i + real(kind=RKIND) :: vale, rsum, areaFace + real(kind=RKIND), dimension(3) :: unitNormalEdge + + rsum = 0.0_RKIND + do i=1,nNbrs + vale = valEdges(i) !0.5 * (val0 + valNbrs(i)) + areaFace = dvEdge(i) * dhEdge(i) + unitNormalEdge(:) = normalEdge(:,i) + call normalizeVector(unitNormalEdge,3) + areaFace = areaFace*dotProduct(unitNormalEdge, unitDeriv,3) !* abs(dotProduct(unitNormalEdge, unitDeriv,3)) + rsum = rsum + vale * areaFace + end do + rsum = rsum / volumeCell + + calc_horizDeriv_fv = rsum + end function calc_horizDeriv_fv + + !cell centers are halfway between w faces + real(kind=RKIND) function calc_vertDeriv_center(val0, valp, valm, z0,zp,zm) + + implicit none + + real(kind=RKIND), intent(in) :: val0, valp, valm, z0,zp,zm !center, plus, minus + + real(kind=RKIND) :: dval_dzp, dval_dzm + + !Average 1 sided differences to below and above since not equally spaced pts + dval_dzp = calc_vertDeriv_one(valp, val0, zp-z0) + dval_dzm = calc_vertDeriv_one(val0, valm, z0-zm) + calc_vertDeriv_center = 0.5*(dval_dzp+dval_dzm) + + end function calc_vertDeriv_center + + real(kind=RKIND) function calc_vertDeriv_one(valp, valm, dz) + !1 sided finite difference + + implicit none + + real(kind=RKIND), intent(in) :: valp, valm, dz + + calc_vertDeriv_one = (valp - valm) / dz + + end function calc_vertDeriv_one + + subroutine floodFill_strato(mesh, diag, pvuVal, stratoPV) + !Searching down each column from TOA to find 2pvu surface is buggy with stratospheric wave breaking, + !since will find 2 pvu at a higher level than "tropopause". This looks to be worse as mesh gets finer and vertical vorticity jumps. + !Note that stratospheric blobs may persist for long times w/ slow mixing downstream of mountains or deep convection. + !A few quicker fixes (make sure <2pvu for a number of layers; search down from 10PVU instead of TOA) are hacky and not robust. + + !To alleviate the (hopefully) pockets of wave breaking, we can flood fill from a known + !stratosphere region (e.g., model top > 2pvu) and hopefully filter down around any trouble regions. + !The problem w/ using only the flood fill is that strong surface PV anomalies can connect to 2pvu, + !and the resulting "flood-filled 2 pvu" can have sizeable areas that are just at the surface while there is clearly a tropopause above (e.g., in a cross-section). + !To address large surface blobs, take the flood fill mask and try to go up from the surface to 10 pvu w/in column. If can, all stratosphere. Else, disconnect "surface blob". + + !The "output" is iLev_DT, which is the vertical index for the level >= pvuVal. If >nVertLevels, pvuVal above column. If <2, pvuVal below column. + !Communication between blocks during the flood fill may be needed to treat some edge cases appropriately. + + use mpas_pool_routines, only : mpas_pool_get_dimension, mpas_pool_get_array + + implicit none + + type (mpas_pool_type), intent(in) :: mesh + type (mpas_pool_type), intent(inout) :: diag + real(kind=RKIND), intent(in) :: pvuVal, stratoPV + + integer :: iCell, k, nChanged, iNbr, iCellNbr + integer, pointer :: nCells, nVertLevels + integer, dimension(:), pointer :: nEdgesOnCell, iLev_DT + integer, dimension(:,:), pointer :: cellsOnCell + + real(kind=RKIND) :: sgnHemi, sgn + real(kind=RKIND),dimension(:),pointer:: latCell + real(kind=RKIND), dimension(:,:), pointer :: ertel_pv + + integer, dimension(:,:), allocatable :: candInStrato, inStrato + + call mpas_pool_get_dimension(mesh, 'nCells', nCells) + call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) + call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) + call mpas_pool_get_array(mesh, 'cellsOnCell', cellsOnCell) + call mpas_pool_get_array(mesh, 'latCell', latCell) + + call mpas_pool_get_array(diag, 'ertel_pv', ertel_pv) + call mpas_pool_get_array(diag, 'iLev_DT', iLev_DT) + + allocate(candInStrato(nVertLevels, nCells+1)) + allocate(inStrato(nVertLevels, nCells+1)) + candInStrato(:,:) = 0 + inStrato(:,:) = 0 + !store whether each level above DT to avoid repeating logic. we'll use candInStrato as a isVisited marker further below. + do iCell=1,nCells + sgnHemi = sign(1.0_RKIND, latCell(iCell)) !at the equator, sign(0)=0 + if (sgnHemi .EQ. 0.0) sgnHemi = 1.0_RKIND + do k=1,nVertLevels + sgn = ertel_pv(k,iCell)*sgnHemi-pvuVal + if (sgn .GE. 0) candInStrato(k,iCell) = 1 + end do + end do + + !seed flood fill with model top that's above DT. + !can have model top below 2pvu (eg, tropics) + nChanged = 0 + do iCell=1,nCells + do k=nVertLevels-5,nVertLevels + if (candInStrato(k,iCell) .GT. 0) then + inStrato(k,iCell) = 1 + candInStrato(k,iCell) = 0 + nChanged = nChanged+1 + end if + end do + end do + + !flood fill from the given seeds. since I don't know enough fortran, + !we'll just brute force a continuing loop rather than queue. + do while(nChanged .GT. 0) + nChanged = 0 + do iCell=1,nCells + do k=nVertLevels,1,-1 + !update if candidate and neighbor in strato + if (candInStrato(k,iCell) .GT. 0) then + !nbr above + if (k .LT. nVertLevels) then + if (inStrato(k+1,iCell) .GT. 0) then + inStrato(k,iCell) = 1 + candInStrato(k,iCell) = 0 + nChanged = nChanged+1 + cycle + end if + end if + + !side nbrs + do iNbr = 1, nEdgesOnCell(iCell) + iCellNbr = cellsOnCell(iNbr,iCell) + if (inStrato(k,iCellNbr) .GT. 0) then + inStrato(k,iCell) = 1 + candInStrato(k,iCell) = 0 + nChanged = nChanged+1 + cycle + end if + end do + + !nbr below + if (k .GT. 1) then + if (inStrato(k-1,iCell) .GT. 0) then + inStrato(k,iCell) = 1 + candInStrato(k,iCell) = 0 + nChanged = nChanged+1 + cycle + end if + end if + end if !candInStrato + end do !levels + end do !cells + end do !while + + !Detach high surface PV blobs w/o vertical connection to "stratosphere" + do iCell=1,nCells + if (inStrato(1,iCell) .GT. 0) then + !see how high up we can walk in the column + do k=2,nVertLevels + if (inStrato(k,iCell) .LT. 1) then + exit + end if !k is highest connected level to sfc + sgnHemi = sign(1.0_RKIND, latCell(iCell)) !at the equator, sign(0)=0 + if (sgnHemi .EQ. 0.0) sgnHemi = 1.0_RKIND + sgn = ertel_pv(k,iCell)*sgnHemi-stratoPV + if (sgn .LT. 0) then !not actually connected to "stratosphere" + inStrato(1:k,iCell) = 0 + end if + end do !k + end if !inStrato at sfc + end do !iCell + + !Fill iLev_DT with the lowest level above the tropopause (If DT above column, iLev>nVertLevels. If DT below column, iLev=0. + nChanged = 0 + do iCell=1,nCells + do k=1,nVertLevels + if (inStrato(k,iCell) .GT. 0) then + nChanged = 1 + exit + end if + end do !k + if (nChanged .GT. 0) then !found lowest level + if (k .EQ. 1) then + sgnHemi = sign(1.0_RKIND, latCell(iCell)) + sgn = ertel_pv(k,iCell)*sgnHemi-pvuVal + if (sgn .GT. 0) then !whole column above DT + iLev_DT(iCell) = 0 + end if + else + iLev_DT(iCell) = k + end if + else !whole column below DT + iLev_DT(iCell) = nVertLevels+2 + end if + end do !iCell + + end subroutine floodFill_strato + + subroutine floodFill_tropo(mesh, diag, pvuVal) + !Searching down each column from TOA to find 2pvu surface is buggy with stratospheric wave breaking, + !since will find 2 pvu at a higher level than "tropopause". This looks to be worse as mesh gets finer and vertical vorticity jumps. + !Note that stratospheric blobs may persist for long times w/ slow mixing downstream of mountains or deep convection. + !A few quicker fixes (make sure <2pvu for a number of layers; search down from 10PVU instead of TOA) are hacky and not robust. + + !Two flood fill options are to: + ! (1) flood fill stratosphere (>2pvu) from stratosphere seeds near model top. Strong surface PV anomalies can connect to 2pvu, + ! and the resulting "flood-filled 2 pvu" can have sizeable areas that are just at the surface while there is clearly a tropopause above (e.g., in a cross-section). + ! To address large surface blobs, take the flood fill mask and try to go up from the surface to 10 pvu w/in column. If can, all stratosphere. Else, disconnect "surface blob". + ! (2) flood fill troposphere (<2pvu) from troposphere seeds near surface. + !Somewhat paradoxically, the bottom of the stratosphere is lower than the top of the troposphere. + + !The "output" is iLev_DT, which is the vertical index for the level >= pvuVal. If >nVertLevels, pvuVal above column. If <2, pvuVal below column. + !Communication between blocks during the flood fill may be needed to treat some edge cases appropriately. + + use mpas_pool_routines, only : mpas_pool_get_dimension, mpas_pool_get_array + + implicit none + + type (mpas_pool_type), intent(in) :: mesh + type (mpas_pool_type), intent(inout) :: diag + real(kind=RKIND), intent(in) :: pvuVal + + integer :: iCell, k, nChanged, iNbr, iCellNbr, levInd + integer, pointer :: nCells, nVertLevels + integer, dimension(:), pointer :: nEdgesOnCell, iLev_DT + integer, dimension(:,:), pointer :: cellsOnCell + + real(kind=RKIND) :: sgnHemi, sgn + real(kind=RKIND),dimension(:),pointer:: latCell + real(kind=RKIND), dimension(:,:), pointer :: ertel_pv + + integer, dimension(:,:), allocatable :: candInTropo, inTropo !whether in troposphere + + call mpas_pool_get_dimension(mesh, 'nCells', nCells) + call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) + call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) + call mpas_pool_get_array(mesh, 'cellsOnCell', cellsOnCell) + call mpas_pool_get_array(mesh, 'latCell', latCell) + + call mpas_pool_get_array(diag, 'ertel_pv', ertel_pv) + !call mpas_pool_get_array(diag, 'iLev_DT_trop', iLev_DT) + call mpas_pool_get_array(diag, 'iLev_DT', iLev_DT) + + allocate(candInTropo(nVertLevels, nCells+1)) + allocate(inTropo(nVertLevels, nCells+1)) + candInTropo(:,:) = 0 + inTropo(:,:) = 0 + !store whether each level above DT to avoid repeating logic. we'll use cand as a isVisited marker further below. + do iCell=1,nCells + sgnHemi = sign(1.0_RKIND, latCell(iCell)) !at the equator, sign(0)=0 + if (sgnHemi .EQ. 0.0) sgnHemi = 1.0_RKIND + do k=1,nVertLevels + sgn = ertel_pv(k,iCell)*sgnHemi-pvuVal + if (sgn .LT. 0) candInTropo(k,iCell) = 1 + end do + end do + + !seed flood fill with near surface that's below DT (can have surface above 2pvu from pv anoms). + !Note that this would be wrong if low PV "stratospheric" blobs are right above the surface + nChanged = 0 + levInd = min(nVertLevels, 3) + do iCell=1,nCells + do k=1,levInd + if (candInTropo(k,iCell) .GT. 0) then + inTropo(k,iCell) = 1 + candInTropo(k,iCell) = 0 + nChanged = nChanged+1 + end if + end do + end do + + !flood fill from the given seeds. since I don't know enough fortran, + !we'll just brute force a continuing loop rather than queue. + do while(nChanged .GT. 0) + nChanged = 0 + do iCell=1,nCells + do k=1,nVertLevels + !update if candidate and neighbor in troposphere + if (candInTropo(k,iCell) .GT. 0) then + !nbr below + if (k .GT. 1) then + if (inTropo(k-1,iCell) .GT. 0) then + inTropo(k,iCell) = 1 + candInTropo(k,iCell) = 0 + nChanged = nChanged+1 + cycle + end if + end if + + !side nbrs + do iNbr = 1, nEdgesOnCell(iCell) + iCellNbr = cellsOnCell(iNbr,iCell) + if (inTropo(k,iCellNbr) .GT. 0) then + inTropo(k,iCell) = 1 + candInTropo(k,iCell) = 0 + nChanged = nChanged+1 + cycle + end if + end do + + !nbr above + if (k .LT. nVertLevels) then + if (inTropo(k+1,iCell) .GT. 0) then + inTropo(k,iCell) = 1 + candInTropo(k,iCell) = 0 + nChanged = nChanged+1 + cycle + end if + end if + + end if !candIn + end do !levels + end do !cells + !here's where a communication would be needed for edge cases !!! + end do !while + + !Fill iLev_DT with the lowest level above the tropopause (If DT above column, iLev>nVertLevels. If DT below column, iLev=0. + do iCell=1,nCells + nChanged = 0 + do k=nVertLevels,1,-1 + if (inTropo(k,iCell) .GT. 0) then + nChanged = 1 + exit + end if + end do !k + + if (nChanged .GT. 0) then !found troposphere's highest level + iLev_DT(iCell) = k+1 !level above troposphere (>nVertLevels if whole column below 2pvu; e.g., tropics) + else !whole column above DT (e.g., arctic pv tower) + iLev_DT(iCell) = 0 + end if + end do !iCell + + end subroutine floodFill_tropo + + subroutine interp_pv_diagnostics(mesh, diag, pvuVal, missingVal) + !compute various fields on 2pvu surface using calculated PVU field + !potential temperature, uZonal, uMeridional, vertical vorticity + + use mpas_pool_routines, only : mpas_pool_get_dimension, mpas_pool_get_array + + implicit none + + type (mpas_pool_type), intent(in) :: mesh + type (mpas_pool_type), intent(inout) :: diag + real(kind=RKIND) :: pvuVal, missingVal + + integer :: iCell, k + integer, pointer :: nCells, nVertLevels + integer, dimension(:), pointer :: nEdgesOnCell, iLev_DT + integer, dimension(:,:), pointer :: cellsOnCell, cellsOnEdge, edgesOnCell, verticesOnCell, & + cellsOnVertex + + real(kind=RKIND),dimension(:),pointer:: areaCell, latCell, u_pv, v_pv, theta_pv, vort_pv + real(kind=RKIND),dimension(:,:),pointer:: uZonal, uMeridional, vorticity, theta, ertel_pv, & + kiteAreasOnVertex + real(kind=RKIND), dimension(:,:), allocatable :: vVort + + call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) + call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCells) + + call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) + call mpas_pool_get_array(mesh, 'cellsOnCell', cellsOnCell) + call mpas_pool_get_array(mesh, 'cellsOnEdge', cellsOnEdge) + call mpas_pool_get_array(mesh, 'verticesOnCell', verticesOnCell) + call mpas_pool_get_array(mesh, 'kiteAreasOnVertex', kiteAreasOnVertex) + call mpas_pool_get_array(mesh, 'cellsOnVertex', cellsOnVertex) + call mpas_pool_get_array(mesh, 'areaCell', areaCell) + call mpas_pool_get_array(mesh, 'latCell', latCell) + + call mpas_pool_get_array(diag, 'ertel_pv', ertel_pv) + call mpas_pool_get_array(diag, 'theta', theta) + call mpas_pool_get_array(diag, 'vorticity', vorticity) + call mpas_pool_get_array(diag, 'uReconstructZonal', uZonal) + call mpas_pool_get_array(diag, 'uReconstructMeridional', uMeridional) + call mpas_pool_get_array(diag, 'u_pv', u_pv) + call mpas_pool_get_array(diag, 'v_pv', v_pv) + call mpas_pool_get_array(diag, 'theta_pv', theta_pv) + call mpas_pool_get_array(diag, 'vort_pv', vort_pv) + call mpas_pool_get_array(diag, 'iLev_DT', iLev_DT) + + !write(0,*) 'Interpolating u,v,theta,vort to pv ' + + call interp_pv(nCells, nVertLevels, pvuVal, latCell, & + ertel_pv, uZonal, u_pv, missingVal, iLev_DT) + call interp_pv(nCells, nVertLevels, pvuVal, latCell, & + ertel_pv, uMeridional, v_pv, missingVal, iLev_DT) + call interp_pv(nCells, nVertLevels, pvuVal, latCell, & + ertel_pv, theta, theta_pv, missingVal, iLev_DT) + + allocate(vVort(nVertLevels, nCells+1)) + do iCell=1,nCells + do k=1,nVertLevels + vVort(k,iCell) = calc_verticalVorticity_cell(iCell, k, nEdgesOnCell(iCell), verticesOnCell, cellsOnVertex, & + kiteAreasOnVertex, areaCell(iCell), vorticity) + end do + end do + call interp_pv(nCells, nVertLevels, pvuVal, latCell, & + ertel_pv, vVort, vort_pv, missingVal, iLev_DT) + deallocate(vVort) + !write(0,*) 'Done interpolating ' + end subroutine interp_pv_diagnostics + + subroutine interp_pvBudget_diagnostics(mesh, diag, pvuVal, missingVal) + !compute various fields on 2pvu surface using calculated PVU field + !tend_diab, tend_fric + + use mpas_pool_routines, only : mpas_pool_get_dimension, mpas_pool_get_array + + implicit none + + type (mpas_pool_type), intent(in) :: mesh + type (mpas_pool_type), intent(inout) :: diag + real(kind=RKIND) :: pvuVal, missingVal + + integer :: iCell, k + integer, pointer :: nCells, nVertLevels + integer, dimension(:), pointer :: iLev_DT + + real(kind=RKIND),dimension(:),pointer:: latCell, depv_dt_diab_pv, depv_dt_fric_pv + real(kind=RKIND),dimension(:,:),pointer:: depv_dt_diab, depv_dt_fric, ertel_pv + + call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) + call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCells) + + call mpas_pool_get_array(mesh, 'latCell', latCell) + + call mpas_pool_get_array(diag, 'ertel_pv', ertel_pv) + call mpas_pool_get_array(diag, 'depv_dt_diab', depv_dt_diab) + call mpas_pool_get_array(diag, 'depv_dt_fric', depv_dt_fric) + call mpas_pool_get_array(diag, 'depv_dt_diab_pv', depv_dt_diab_pv) + call mpas_pool_get_array(diag, 'depv_dt_fric_pv', depv_dt_fric_pv) + call mpas_pool_get_array(diag, 'iLev_DT', iLev_DT) + + !write(0,*) 'Interpolating u,v,theta,vort to pv ' + + call interp_pv(nCells, nVertLevels, pvuVal, latCell, & + ertel_pv, depv_dt_diab, depv_dt_diab_pv, missingVal, iLev_DT) + call interp_pv(nCells, nVertLevels, pvuVal, latCell, & + ertel_pv, depv_dt_fric, depv_dt_fric_pv, missingVal, iLev_DT) + !write(0,*) 'Done interpolating ' + end subroutine interp_pvBudget_diagnostics + + subroutine interp_pv( nCells, nLevels, interpVal, & + latCell, field0, field1,field, & + missingVal, iLev_DT) + + implicit none + !linear-in-PV interpolate columns of field1 to where field0 is interpVal*sign(lat) + !using level above tropopause already diagnosed + + ! input + + integer :: nCells, nLevels + integer, intent(in) :: iLev_DT(nCells) + real(kind=RKIND) :: interpVal, missingVal + real(kind=RKIND), intent(in) ::latCell(nCells) + real(kind=RKIND), intent(in) :: field0(nLevels,nCells), field1(nLevels,nCells) + real(kind=RKIND), intent(out) :: field(nCells) + + ! local + + integer :: iCell, iLev, levInd, indlNbr + real(kind=RKIND) :: valh, vall, vallNbr, sgnh, sgnl, sgnlNbr + real(kind=RKIND) :: dv_dl, levFrac, valInterpCell, sgnHemi + + do iCell = 1, nCells + !starting from top, trap val if values on opposite side + levInd = -1 !what should happen with missing values? + levFrac = 0.0 + sgnHemi = sign(1.0_RKIND, latCell(iCell)) !problem at the equator...is sign(0)=0? + if (sgnHemi .EQ. 0.0) sgnHemi = 1.0 + valInterpCell = interpVal*sgnHemi + + iLev = iLev_DT(iCell) + if (iLev .GT. nLevels) then + levInd = -1 + sgnl = -1.0 + else if (iLev .LT. 1) then + levInd = -1 + sgnl = 1.0 + else + valh = field0(iLev,iCell) + vall = field0(iLev-1,iCell) + !sandwiched value. equal in case val0 is a vals[l]. + !get linear interpolation: val0 = vals[l]+dvals/dl * dl + !Avoid divide by 0 by just assuming value is + !halfway between... + + dv_dl = valh-vall; + if (abs(dv_dl)<1.e-6) then + levFrac = 0.5; + else + levFrac = (valInterpCell-vall)/dv_dl + end if + + levInd = iLev-1 + end if !iLev in column + + !find value of field using index we just found + if (levInd<0) then !didn't trap value + if (sgnl>0.0) then !column above value, take surface + field(iCell) = field1(1,iCell) + else !column below value, take top + !field(iCell) = missingVal + field(iCell) = field1(nLevels,iCell) + end if + else + valh = field1(levInd+1,iCell) + vall = field1(levInd,iCell) + + dv_dl = valh-vall + field(iCell) = vall+dv_dl*levFrac + end if + end do + + end subroutine interp_pv + + subroutine calc_gradxu_cell(gradxu, addEarthVort, & + iCell, level, nVertLevels, nEdgesCell0, verticesOnCell, kiteAreasOnVertex, & + cellsOnCell, edgesOnCell, cellsOnEdge, dvEdge, edgeNormalVectors, & + cellsOnVertex, & + cellTangentPlane, localVerticalUnitVectors, zgrid, areaCell0, & + uReconstructX, uReconstructY, uReconstructZ, w,vorticity) + implicit none + + real(kind=RKIND), dimension(3), intent(out) :: gradxu + integer, intent(in) :: addEarthVort, iCell, level, nVertLevels, nEdgesCell0 + real(kind=RKIND), intent(in) :: areaCell0 + real(kind=RKIND), dimension(:), intent(in) :: dvEdge + real(kind=RKIND), dimension(3,2,*), intent(in) :: cellTangentPlane + real(kind=RKIND), dimension(3,*), intent(in) :: localVerticalUnitVectors, edgeNormalVectors + real(kind=RKIND), dimension(:,:), intent(in) :: zgrid,uReconstructX, uReconstructY, uReconstructZ, & + w, vorticity, kiteAreasOnVertex + integer, dimension(:,:), intent(in) :: cellsOnCell, edgesOnCell, cellsOnEdge, verticesOnCell, cellsOnVertex + + integer :: i, iNbr, iEdge + real(kind=RKIND) :: val0, valNbr, volumeCell, areaFactor, z0, zp, zm, valp, valm, dw_dx, dw_dy, du_dz, dv_dz + real(kind=RKIND), dimension(3) :: unitDeriv, velCell0, velCellp, velCellm + real(kind=RKIND), dimension(3,3) :: xyzLocal + real(kind=RKIND), dimension(nEdgesCell0) :: valEdges, dvEdgeCell, dhEdge + real(kind=RKIND), dimension(3,nEdgesCell0) :: normalEdgeCell + + !local coordinate system + call coordinateSystem_cell(cellTangentPlane, localVerticalUnitVectors, iCell, xyzLocal) + !normal vectors at voronoi polygon edges pointing out of cell + do i=1,nEdgesCell0 + iNbr = cellsOnCell(i, iCell) + !dhEdge(i) = calc_heightVerticalEdge(iCell, iNbr, level, zgrid) !vertical thickness of that face + !if don't want to consider 3d cell since we haven't calculated the cell + !volume well, set all thicknesses to be the same + dhEdge(i) = 100.0_RKIND + + iEdge = edgesOnCell(i,iCell) + dvEdgeCell(i) = dvEdge(iEdge) + val0 = fluxSign(iCell, iEdge, cellsOnEdge) + normalEdgeCell(:,i) = edgeNormalVectors(:,iEdge) + call normalizeVector(normalEdgeCell(:,i),3) + normalEdgeCell(:,i) = normalEdgeCell(:,i)*val0 + end do + + volumeCell = calcVolumeCell(areaCell0, nEdgesCell0, dhEdge) + + !w + val0 = .5*(w(level+1, iCell)+w(level, iCell)) + do i=1,nEdgesCell0 + iNbr = cellsOnCell(i, iCell) + valNbr = .5*(w(level+1, iNbr)+w(level, iNbr)) + valEdges(i) = 0.5*(valNbr+val0) + end do + unitDeriv(:) = xyzLocal(:,1) + dw_dx = calc_horizDeriv_fv(valEdges, nEdgesCell0, dvEdgeCell, dhEdge, normalEdgeCell, unitDeriv, volumeCell) + unitDeriv(:) = xyzLocal(:,2) + dw_dy = calc_horizDeriv_fv(valEdges, nEdgesCell0, dvEdgeCell, dhEdge, normalEdgeCell, unitDeriv, volumeCell) + + !vertical derivatives + !calc_heightCellCenter(c0, level, zgrid) calc_vertDeriv_center(val0, valp, valm, z0,zp,zm) + !du/dz and dv/dz + velCell0(1) = uReconstructX(level,iCell) + velCell0(2) = uReconstructY(level,iCell) + velCell0(3) = uReconstructZ(level,iCell) + z0 = calc_heightCellCenter(iCell, level, zgrid) + if (level>1) then + !have cell beneath + velCellm(1) = uReconstructX(level-1,iCell) + velCellm(2) = uReconstructY(level-1,iCell) + velCellm(3) = uReconstructZ(level-1,iCell) + zm = calc_heightCellCenter(iCell, level-1, zgrid) + end if + if (level0) then + call local2FullVorticity(gradxu, xyzLocal(:,1), xyzLocal(:,2), xyzLocal(:,3)) + end if + + end subroutine calc_gradxu_cell + + subroutine calc_grad_cell(gradtheta, & + iCell, level, nVertLevels, nEdgesCell0, verticesOnCell, kiteAreasOnVertex, & + cellsOnCell, edgesOnCell, cellsOnEdge, dvEdge, edgeNormalVectors, & + cellsOnVertex, & + cellTangentPlane, localVerticalUnitVectors, zgrid, areaCell0, & + theta) + ! + implicit none + + real(kind=RKIND), dimension(3), intent(out) :: gradtheta + real(kind=RKIND), intent(in) :: areaCell0 + real(kind=RKIND), dimension(:), intent(in) :: dvEdge + real(kind=RKIND), dimension(3,2,*), intent(in) :: cellTangentPlane + real(kind=RKIND), dimension(3,*), intent(in) :: localVerticalUnitVectors, edgeNormalVectors + real(kind=RKIND), dimension(:,:), intent(in) :: zgrid, theta, kiteAreasOnVertex + integer, intent(in) :: iCell, level, nVertLevels, nEdgesCell0 + integer, dimension(:,:), intent(in) :: cellsOnCell, edgesOnCell, cellsOnEdge, verticesOnCell, cellsOnVertex + + integer :: i, iNbr, iEdge + real(kind=RKIND) :: val0, valNbr, volumeCell, areaFactor, z0, zp, zm, valp, valm + real(kind=RKIND), dimension(3) :: unitDeriv, velCell0, velCellp, velCellm + real(kind=RKIND), dimension(3,3) :: xyzLocal + real(kind=RKIND), dimension(nEdgesCell0) :: valEdges, dvEdgeCell, dhEdge + real(kind=RKIND), dimension(3,nEdgesCell0) :: normalEdgeCell + + !local coordinate system + call coordinateSystem_cell(cellTangentPlane, localVerticalUnitVectors, iCell, xyzLocal) + !normal vectors at voronoi polygon edges pointing out of cell + do i=1,nEdgesCell0 + iNbr = cellsOnCell(i, iCell) + !dhEdge(i) = calc_heightVerticalEdge(iCell, iNbr, level, zgrid) !vertical thickness of that face + !if don't want to consider 3d cell since we haven't calculated the cell + !volume well, set all thicknesses to be the same + dhEdge(i) = 100.0_RKIND + + iEdge = edgesOnCell(i,iCell) + dvEdgeCell(i) = dvEdge(iEdge) + val0 = fluxSign(iCell, iEdge, cellsOnEdge) + normalEdgeCell(:,i) = edgeNormalVectors(:,iEdge) + call normalizeVector(normalEdgeCell(:,i),3) + normalEdgeCell(:,i) = normalEdgeCell(:,i)*val0 + end do + + volumeCell = calcVolumeCell(areaCell0, nEdgesCell0, dhEdge) + + !Need to get 3d curl and grad theta + !horizontal derivatives + !calc_horizDeriv_fv(valEdges, nNbrs, dvEdge, dhEdge, & + ! normalEdge, unitDeriv, volumeCell) + !theta + val0 = theta(level, iCell) + do i=1,nEdgesCell0 + iNbr = cellsOnCell(i, iCell) + valNbr = theta(level,iNbr) + valEdges(i) = 0.5*(valNbr+val0) + end do + unitDeriv(:) = xyzLocal(:,1) + gradtheta(1) = calc_horizDeriv_fv(valEdges, nEdgesCell0, dvEdgeCell, dhEdge, normalEdgeCell, unitDeriv, volumeCell) + unitDeriv(:) = xyzLocal(:,2) + gradtheta(2) = calc_horizDeriv_fv(valEdges, nEdgesCell0, dvEdgeCell, dhEdge, normalEdgeCell, unitDeriv, volumeCell) + + !vertical derivatives + !calc_heightCellCenter(c0, level, zgrid) calc_vertDeriv_center(val0, valp, valm, z0,zp,zm) + !theta + gradtheta(3) = 0.0_RKIND + z0 = calc_heightCellCenter(iCell, level, zgrid) + val0 = theta(level, iCell) + if (level>1) then + !have cell beneath + valm = theta(level-1, iCell) + zm = calc_heightCellCenter(iCell, level-1, zgrid) + end if + if (level \brief Reads sounding locations and sets sounding alarm + !> \author Michael Duda + !> \date 20 April 2016 + !> \details + !> This routine checks on the existence of a 'sounding_locations.txt' file, + !> and, if present, reads sounding locations from this file and determines + !> which grid cell contains each of the locations. + ! + !----------------------------------------------------------------------- + subroutine soundings_setup(configs, all_pools, simulation_clock, dminfo) + + use mpas_derived_types, only : MPAS_pool_type, MPAS_clock_type, dm_info + use mpas_pool_routines, only : mpas_pool_get_subpool, mpas_pool_get_dimension, mpas_pool_get_array, mpas_pool_get_config + use mpas_io_units, only : stderrUnit, mpas_new_unit, mpas_release_unit + use mpas_timekeeping, only : MPAS_timeInterval_type, MPAS_time_type, MPAS_set_timeInterval, & + MPAS_get_clock_time, MPAS_add_clock_alarm, MPAS_NOW + use mpas_dmpar, only : IO_NODE, mpas_dmpar_bcast_int, mpas_dmpar_bcast_logical, mpas_dmpar_bcast_char + + implicit none + + type (MPAS_pool_type), pointer :: configs + type (MPAS_pool_type), pointer :: all_pools + type (MPAS_clock_type), pointer :: simulation_clock + type (dm_info), intent(in) :: dminfo + + character(len=StrKIND), pointer :: soundingInterval + + integer :: i, ierr + integer :: sndUnit + real (kind=RKIND) :: station_lat, station_lon + character (len=StrKIND) :: tempstr + character (len=StrKIND) :: station_name + logical :: exists + integer :: nearestCell + integer, pointer :: nCells, nCellsSolve, maxEdges + integer, dimension(:), pointer :: nEdgesOnCell + integer, dimension(:,:), pointer :: cellsOnCell + real (kind=RKIND), dimension(:), pointer :: latCell, lonCell + type (MPAS_timeInterval_type) :: intv + type (MPAS_time_type) :: now + + + simulationClock => simulation_clock + + call mpas_pool_get_subpool(all_pools, 'mesh', mesh) + call mpas_pool_get_subpool(all_pools, 'state', state) + call mpas_pool_get_subpool(all_pools, 'diag', diag) + + call mpas_pool_get_config(configs, 'config_sounding_interval', soundingInterval) + + if (trim(soundingInterval) == 'none') then + return + end if + + if (dminfo % my_proc_id == IO_NODE) then + inquire(file='sounding_locations.txt', exist=exists) + end if + call mpas_dmpar_bcast_logical(dminfo, exists) + + if (.not. exists) then + write(stderrUnit,*) 'Sounding location file ''sounding_locations.txt'' not found.' + return + end if + + call MPAS_set_timeInterval(intv, timeString=trim(soundingInterval)) + now = MPAS_get_clock_time(simulationClock, MPAS_NOW) + call MPAS_add_clock_alarm(simulationClock, 'soundingAlarm', now, alarmTimeInterval=intv) + + if (dminfo % my_proc_id == IO_NODE) then + call mpas_new_unit(sndUnit) + + open(sndUnit, file='sounding_locations.txt', form='formatted', status='old', iostat=ierr) + end if + call mpas_dmpar_bcast_int(dminfo, ierr) + + if (ierr /= 0) then + write(stderrUnit,*) 'Error opening sounding location file ''sounding_locations.txt''' + if (dminfo % my_proc_id == IO_NODE) then + call mpas_release_unit(sndUnit) + end if + return + end if + + if (dminfo % my_proc_id == IO_NODE) then + do + read(sndUnit, fmt=*, iostat=ierr) station_lat, station_lon, station_name + if (ierr == 0) then + nSoundings = nSoundings + 1 + else + exit + end if + end do + end if + call mpas_dmpar_bcast_int(dminfo, nSoundings) + + if (nSoundings == 0) then + if (dminfo % my_proc_id == IO_NODE) then + close(sndUnit) + call mpas_release_unit(sndUnit) + end if + return + end if + + write(stderrUnit,*) 'Read ', nSoundings, ' sounding locations from ''sounding_locations.txt''' + + if (dminfo % my_proc_id == IO_NODE) then + rewind(sndUnit) + end if + + allocate(stationOwned(nSoundings)) + allocate(stationLats(nSoundings)) + allocate(stationLons(nSoundings)) + allocate(stationCells(nSoundings)) + allocate(stationNames(nSoundings)) + + call mpas_pool_get_dimension(mesh, 'nCells', nCells) + call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(mesh, 'maxEdges', maxEdges) + call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) + call mpas_pool_get_array(mesh, 'cellsOnCell', cellsOnCell) + call mpas_pool_get_array(mesh, 'latCell', latCell) + call mpas_pool_get_array(mesh, 'lonCell', lonCell) + + nearestCell = nCells + + do i=1,nSoundings + if (dminfo % my_proc_id == IO_NODE) then + read(sndUnit, fmt='(a)') tempstr + end if + call mpas_dmpar_bcast_char(dminfo, tempstr) + read(tempstr, fmt=*) stationLats(i), stationLons(i), stationNames(i) + nearestCell = nearest_cell((stationLats(i) * pi_const / 180.0_RKIND), (stationLons(i) * pi_const / 180.0_RKIND), & + nearestCell, nCells, maxEdges, nEdgesOnCell, cellsOnCell, latCell, lonCell) + if (nearestCell <= nCellsSolve) then + stationOwned(i) = .true. + stationCells(i) = nearestCell + else + stationOwned(i) = .false. + end if + end do + + if (dminfo % my_proc_id == IO_NODE) then + close(sndUnit) + + call mpas_release_unit(sndUnit) + end if + + end subroutine soundings_setup + + + !----------------------------------------------------------------------- + ! routine soundings_compute + ! + !> \brief If soundings alarm is ringing, writes soundings text files + !> \author Michael Duda + !> \date 20 April 2016 + !> \details + !> If this routine is called when the 'soundingAlarm' alarm is ringing, + !> each calling task will write the sounding locations within its blocks + !> to text files on disk. + ! + !----------------------------------------------------------------------- + subroutine soundings_compute() + + use mpas_io_units, only : stderrUnit + use mpas_derived_types, only : MPAS_pool_type + use mpas_pool_routines, only : MPAS_pool_get_dimension, MPAS_pool_get_array + use mpas_timekeeping, only : MPAS_time_type, MPAS_is_alarm_ringing, MPAS_reset_clock_alarm, MPAS_get_clock_time, & + MPAS_get_time, MPAS_NOW + use mpas_constants, only : rvord + + implicit none + + integer :: iStn, k + integer, pointer :: nVertLevels, index_qv + real (kind=RKIND), dimension(:,:), pointer :: pressure_base, pressure_p, uReconstructZonal, uReconstructMeridional, zgrid, & + theta_m, exner + real (kind=RKIND), dimension(:,:,:), pointer :: scalars + real (kind=RKIND) :: tmpc, tdpc, dir, spd, rh, log_rh, qvs, pres + type (MPAS_time_type) :: now + character(len=StrKIND) :: nowString + integer :: yyyy, mm, dd, h, m, s + character(len=StrKIND) :: fname + character(len=10) :: stid + + + if (MPAS_is_alarm_ringing(simulationClock, 'soundingAlarm')) then + + now = MPAS_get_clock_time(simulationClock, MPAS_NOW) + call mpas_get_time(now, YYYY=yyyy, MM=mm, DD=dd, H=h, M=m, S=s, dateTimeString=nowString) + + call MPAS_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) + call MPAS_pool_get_dimension(state, 'index_qv', index_qv) + call MPAS_pool_get_array(mesh, 'zgrid', zgrid) + call MPAS_pool_get_array(state, 'scalars', scalars, 1) + call MPAS_pool_get_array(state, 'theta_m', theta_m, 1) + call MPAS_pool_get_array(diag, 'pressure_base', pressure_base) + call MPAS_pool_get_array(diag, 'pressure_p', pressure_p) + call MPAS_pool_get_array(diag, 'exner', exner) + call MPAS_pool_get_array(diag, 'uReconstructZonal', uReconstructZonal) + call MPAS_pool_get_array(diag, 'uReconstructMeridional', uReconstructMeridional) + +! write(stderrUnit,*) '--- Writing soundings at '//trim(nowString)//'---' + + do iStn=1,nSoundings + if (stationOwned(iStn)) then +! write(stderrUnit,*) 'Writing sounding for station '//trim(stationNames(iStn)) + + write(fname,'(a,i4.4,i2.2,i2.2,i2.2,i2.2,a)') trim(stationNames(iStn))//'.', yyyy, mm, dd, h, m, '.snd' + open(97,file=trim(fname),form='formatted',status='replace') + + write(stid,'(a)') trim(stationNames(iStn)) + + write(97,'(a)') ' SNPARM = PRES;HGHT;TMPC;DWPC;DRCT;SPED;' + write(97,'(a)') '' + write(97,'(a,i2.2,i2.2,i2.2,a,i2.2,i2.2)') ' STID = '//stid//' STNM = 99999 TIME = ', mod(yyyy,100), mm, dd,'/', h, m + write(97,'(a,f6.2,a,f7.2,a)') ' SLAT = ', stationLats(iStn), ' SLON = ', stationLons(iStn), ' SELV = -999' + write(97,'(a)') '' + write(97,'(a)') ' PRES HGHT TMPC DWPC DRCT SPED' + + do k=1,nVertLevels + tmpc = theta_m(k,stationCells(iStn)) / (1.0_RKIND + rvord * scalars(index_qv,k,stationCells(iStn))) * exner(k,stationCells(iStn)) + pres = pressure_base(k,stationCells(iStn)) + pressure_p(k,stationCells(iStn)) +! if (tmpc >= 273.15_RKIND) then + qvs = rslf(pres, tmpc) +! else +! qvs = rsif(pres, tmpc) +! end if + rh = max(1.0e-8,min(1.0,scalars(index_qv,k,stationCells(iStn))/qvs)) + log_rh = log(rh) + tmpc = tmpc - 273.15_RKIND + pres = pres * 0.01 + tdpc = 243.04*(log_rh+(17.625*tmpc/(243.04+tmpc))) / (17.625-log_rh-((17.625*tmpc)/(243.04+tmpc))) + spd = sqrt(uReconstructZonal(k,stationCells(iStn))**2 + uReconstructMeridional(k,stationCells(iStn))**2) + if (spd == 0.0) then + dir = 0.0 + else + dir = acos(-uReconstructMeridional(k,stationCells(iStn)) / spd) + if (uReconstructZonal(k,stationCells(iStn)) > 0.0) then + dir = 2.0 * pi_const - dir + end if + dir = dir * 180.0_RKIND / pi_const + end if + write(97,'(f10.2,f10.2,f9.2,f9.2,f9.2,f9.2)') & + pres, & + 0.5 * (zgrid(k,stationCells(iStn)) + zgrid(k+1,stationCells(iStn))), & ! Avg to layer midpoint + tmpc, & + tdpc, & + dir, & + spd + end do + + close(97) + end if + end do + + call MPAS_reset_clock_alarm(simulationClock, 'soundingAlarm') + else +! write(stderrUnit,*) '--- Not yet time to write soundings ---' + end if + + end subroutine soundings_compute + + + !----------------------------------------------------------------------- + ! routine soundings_cleanup + ! + !> \brief Deallocates memory used to handle soundings + !> \author Michael Duda + !> \date 20 April 2016 + !> \details + !> This routine should be called last among public routines in this module + !> to release any allocated memory used in the storage of sounding + !> locations. + ! + !----------------------------------------------------------------------- + subroutine soundings_cleanup() + + implicit none + + if (nSoundings > 0) then + deallocate(stationOwned) + deallocate(stationLats) + deallocate(stationLons) + deallocate(stationCells) + deallocate(stationNames) + nSoundings = 0 + end if + + end subroutine soundings_cleanup + + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! Finds the MPAS grid cell nearest to (target_lat, target_lon) + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + integer function nearest_cell(target_lat, target_lon, start_cell, nCells, maxEdges, & + nEdgesOnCell, cellsOnCell, latCell, lonCell) + + implicit none + + real (kind=RKIND), intent(in) :: target_lat, target_lon + integer, intent(in) :: start_cell + integer, intent(in) :: nCells, maxEdges + integer, dimension(nCells), intent(in) :: nEdgesOnCell + integer, dimension(maxEdges,nCells), intent(in) :: cellsOnCell + real (kind=RKIND), dimension(nCells), intent(in) :: latCell, lonCell + + integer :: i + integer :: iCell + integer :: current_cell + real (kind=RKIND) :: current_distance, d + real (kind=RKIND) :: nearest_distance + + nearest_cell = start_cell + current_cell = -1 + + do while (nearest_cell /= current_cell) + current_cell = nearest_cell + current_distance = sphere_distance(latCell(current_cell), lonCell(current_cell), target_lat, & + target_lon, 1.0_RKIND) + nearest_cell = current_cell + nearest_distance = current_distance + do i = 1, nEdgesOnCell(current_cell) + iCell = cellsOnCell(i,current_cell) + if (iCell <= nCells) then + d = sphere_distance(latCell(iCell), lonCell(iCell), target_lat, target_lon, 1.0_RKIND) + if (d < nearest_distance) then + nearest_cell = iCell + nearest_distance = d + end if + end if + end do + end do + + end function nearest_cell + + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! Compute the great-circle distance between (lat1, lon1) and (lat2, lon2) + ! on a sphere with given radius. + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + real (kind=RKIND) function sphere_distance(lat1, lon1, lat2, lon2, radius) + + implicit none + + real (kind=RKIND), intent(in) :: lat1, lon1, lat2, lon2, radius + real (kind=RKIND) :: arg1 + + arg1 = sqrt( sin(0.5*(lat2-lat1))**2 + cos(lat1)*cos(lat2)*sin(0.5*(lon2-lon1))**2 ) + sphere_distance = 2.0 * radius * asin(arg1) + + end function sphere_distance + + +!============================================================================================= +!NOTE: functions rslf and rsif are taken from module_mp_thompson temporarily for computing +! the diagnostic relative humidity. These two functions will be removed from this module +! when the Thompson cloud microphysics scheme will be restored to MPAS-Dev. +! Laura D. Fowler (laura@ucar.edu) / 2013-07-11. + +!+---+-----------------------------------------------------------------+ +! THIS FUNCTION CALCULATES THE LIQUID SATURATION VAPOR MIXING RATIO AS +! A FUNCTION OF TEMPERATURE AND PRESSURE +! + REAL FUNCTION RSLF(P,T) + + IMPLICIT NONE + REAL, INTENT(IN):: P, T + REAL:: ESL,X + REAL, PARAMETER:: C0= .611583699E03 + REAL, PARAMETER:: C1= .444606896E02 + REAL, PARAMETER:: C2= .143177157E01 + REAL, PARAMETER:: C3= .264224321E-1 + REAL, PARAMETER:: C4= .299291081E-3 + REAL, PARAMETER:: C5= .203154182E-5 + REAL, PARAMETER:: C6= .702620698E-8 + REAL, PARAMETER:: C7= .379534310E-11 + REAL, PARAMETER:: C8=-.321582393E-13 + + X=MAX(-80.,T-273.16) + +! ESL=612.2*EXP(17.67*X/(T-29.65)) + ESL=C0+X*(C1+X*(C2+X*(C3+X*(C4+X*(C5+X*(C6+X*(C7+X*C8))))))) + RSLF=.622*ESL/(P-ESL) + +! ALTERNATIVE +! ; Source: Murphy and Koop, Review of the vapour pressure of ice and +! supercooled water for atmospheric applications, Q. J. R. +! Meteorol. Soc (2005), 131, pp. 1539-1565. +! ESL = EXP(54.842763 - 6763.22 / T - 4.210 * ALOG(T) + 0.000367 * T +! + TANH(0.0415 * (T - 218.8)) * (53.878 - 1331.22 +! / T - 9.44523 * ALOG(T) + 0.014025 * T)) + + END FUNCTION RSLF +!+---+-----------------------------------------------------------------+ +! THIS FUNCTION CALCULATES THE ICE SATURATION VAPOR MIXING RATIO AS A +! FUNCTION OF TEMPERATURE AND PRESSURE +! + REAL FUNCTION RSIF(P,T) + + IMPLICIT NONE + REAL, INTENT(IN):: P, T + REAL:: ESI,X + REAL, PARAMETER:: C0= .609868993E03 + REAL, PARAMETER:: C1= .499320233E02 + REAL, PARAMETER:: C2= .184672631E01 + REAL, PARAMETER:: C3= .402737184E-1 + REAL, PARAMETER:: C4= .565392987E-3 + REAL, PARAMETER:: C5= .521693933E-5 + REAL, PARAMETER:: C6= .307839583E-7 + REAL, PARAMETER:: C7= .105785160E-9 + REAL, PARAMETER:: C8= .161444444E-12 + + X=MAX(-80.,T-273.16) + ESI=C0+X*(C1+X*(C2+X*(C3+X*(C4+X*(C5+X*(C6+X*(C7+X*C8))))))) + RSIF=.622*ESI/(P-ESI) + +! ALTERNATIVE +! ; Source: Murphy and Koop, Review of the vapour pressure of ice and +! supercooled water for atmospheric applications, Q. J. R. +! Meteorol. Soc (2005), 131, pp. 1539-1565. +! ESI = EXP(9.550426 - 5723.265/T + 3.53068*ALOG(T) - 0.00728332*T) + + END FUNCTION RSIF + +end module soundings diff --git a/src/core_atmosphere/dynamics/Makefile b/src/core_atmosphere/dynamics/Makefile index 7b705bea62..97785deb4e 100644 --- a/src/core_atmosphere/dynamics/Makefile +++ b/src/core_atmosphere/dynamics/Makefile @@ -4,8 +4,7 @@ OBJS = mpas_atm_time_integration.o all: $(OBJS) -mpas_atm_time_integration.o: - +mpas_atm_time_integration.o: mpas_atm_iau.o clean: $(RM) *.o *.mod *.f90 @@ -17,7 +16,7 @@ clean: $(RM) $@ $*.mod ifeq "$(GEN_F90)" "true" $(CPP) $(CPPFLAGS) $(PHYSICS) $(CPPINCLUDES) $< > $*.f90 - $(FC) $(FFLAGS) -c $*.f90 $(FCINCLUDES) -I../../framework -I../../operators -I../physics -I../physics/physics_wrf -I../../external/esmf_time_f90 + $(FC) $(FFLAGS) -c $*.f90 $(FCINCLUDES) -I.. -I../../framework -I../../operators -I../physics -I../physics/physics_wrf -I../../external/esmf_time_f90 else - $(FC) $(CPPFLAGS) $(PHYSICS) $(FFLAGS) -c $*.F $(CPPINCLUDES) $(FCINCLUDES) -I../../framework -I../../operators -I../physics -I../physics/physics_wrf -I../../external/esmf_time_f90 + $(FC) $(CPPFLAGS) $(PHYSICS) $(FFLAGS) -c $*.F $(CPPINCLUDES) $(FCINCLUDES) -I.. -I../../framework -I../../operators -I../physics -I../physics/physics_wrf -I../../external/esmf_time_f90 endif diff --git a/src/core_atmosphere/dynamics/mpas_atm_iau.F b/src/core_atmosphere/dynamics/mpas_atm_iau.F new file mode 100644 index 0000000000..7c2fd170bb --- /dev/null +++ b/src/core_atmosphere/dynamics/mpas_atm_iau.F @@ -0,0 +1,219 @@ +! Copyright (c) 2016, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +module mpas_atm_iau + + use mpas_derived_types + use mpas_pool_routines + use mpas_kind_types + use mpas_dmpar + use mpas_constants + + !public :: atm_compute_iau_coef, atm_add_tend_anal_incr + + contains + +!================================================================================================== + real (kind=RKIND) function atm_iau_coef(configs, itimestep, dt) result(wgt_iau) +!================================================================================================== +! Compute the coefficient (or weight) for the IAU forcing at itimestep. + + implicit none + + type (mpas_pool_type), intent(in) :: configs + integer, intent(in) :: itimestep + real (kind=RKIND), intent(in) :: dt + + integer :: nsteps_iau ! Total number of time steps where the IAU forcing is applied. + logical, parameter :: debug = .false. + + character(len=StrKIND), pointer :: iau_opt + real (kind=RKIND), pointer :: time_window_sec + +! type (mpas_pool_type), intent(in) :: configs +! type (MPAS_Time_type) :: startTime, stopTime ! for the entire model integration period +! type (MPAS_Time_type) :: time_begin, time_end ! for the IAU window +! type (MPAS_TimeInterval_type) :: runDuration +! integer :: local_err +! character(len=StrKIND), pointer :: config_start_time, config_run_duration, config_stop_time +! real (kind=RKIND), pointer :: time_window_sec, runtime_window + + call mpas_pool_get_config(configs, 'config_IAU_option', iau_opt) + call mpas_pool_get_config(configs, 'config_IAU_window_length_s', time_window_sec) + +! Initialization + wgt_iau = 0. + + +! For config_IAU_option /= 'off', we compute a weighting function here based on the time info in namelist.atmosphere. +! The default option (config_IAU_option = 'on') defines a constant forcing with the same weight +! (= config_IAU_window_length_s/config_dt + 1) during the IAU time window. +! The model is assumed to be further advanced after the forcing (or the filtering) applied (as a free run), +! we need to fill up the weighting function with zeros for the period from the end of the IAU window +! all the way to config_stop_time (or for the rest of config_run_duration). +! call mpas_pool_get_config(configs, 'config_start_time', config_start_time) +! call mpas_pool_get_config(configs, 'config_run_duration', config_run_duration) +! call mpas_pool_get_config(configs, 'config_stop_time', config_stop_time) +! call mpas_pool_get_config(configs, 'config_dt', config_dt) +! call mpas_pool_get_config(configs, 'config_IAU_window_length_s', time_window_sec) + + if(trim(iau_opt) == 'on') then ! HA: We should be able to expand this for more options later on. + + nsteps_iau = nint(time_window_sec / dt) + !if(debug) write(0,*) 'atm_compute_iau_coef: nsteps_iau =',nsteps_iau + + if(itimestep <= nsteps_iau) then + !wgt_iau = 1./nsteps_iau + wgt_iau = 1.0_RKIND / time_window_sec + if(debug) write(0,*) 'atm_compute_iau_coef: wgt_iau =', wgt_iau + end if + + end if + + end function atm_iau_coef + +!================================================================================================== + subroutine atm_add_tend_anal_incr (configs, structs, itimestep, dt, tend_ru, tend_rtheta, tend_rho) +!================================================================================================== + + implicit none + + type (mpas_pool_type), intent(in) :: configs + type (mpas_pool_type), intent(inout) :: structs + integer, intent(in) :: itimestep + real (kind=RKIND), intent(in) :: dt + real (kind=RKIND), dimension(:,:), intent(inout) :: tend_ru + real (kind=RKIND), dimension(:,:), intent(inout) :: tend_rtheta + real (kind=RKIND), dimension(:,:), intent(inout) :: tend_rho + + type (mpas_pool_type), pointer :: tend + type (mpas_pool_type), pointer :: tend_iau + type (mpas_pool_type), pointer :: state + type (mpas_pool_type), pointer :: diag + type (mpas_pool_type), pointer :: mesh + + integer :: iEdge, iCell, i, j, k, n + integer, pointer :: nCells, nEdges, nCellsSolve, nEdgesSolve, nVertLevels + integer, pointer:: index_qv + integer, pointer:: moist_start, moist_end + + real (kind=RKIND), dimension(:,:), pointer :: rho_edge, rho_zz, theta_m, theta, u, zz, & + tend_th, tend_w +! tend_u, tend_rho, tend_theta, tend_th, tend_w + real(kind=RKIND),dimension(:,:,:), pointer :: scalars, tend_scalars, scalars_amb + real(kind=RKIND),dimension(:,:), pointer:: u_amb, theta_amb, rho_amb + + real (kind=RKIND) :: wgt_iau + + ! + ! Compute weight for IAU forcing in this timestep, and return if weight + ! is essentially zero. + ! + wgt_iau = atm_iau_coef(configs, itimestep, dt) + if (wgt_iau <= 1.0e-12_RKIND) then + return + end if + + call mpas_pool_get_subpool(structs, 'tend', tend) + call mpas_pool_get_subpool(structs, 'tend_iau', tend_iau) + call mpas_pool_get_subpool(structs, 'state', state) + call mpas_pool_get_subpool(structs, 'diag', diag) + call mpas_pool_get_subpool(structs, 'mesh', mesh) + + call mpas_pool_get_dimension(mesh, 'nCells', nCells) + call mpas_pool_get_dimension(mesh, 'nEdges', nEdges) + call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(mesh, 'nEdgesSolve', nEdgesSolve) + call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) + + call mpas_pool_get_array(mesh, 'zz', zz) + + call mpas_pool_get_array(state, 'theta_m', theta_m, 1) + call mpas_pool_get_array(state, 'scalars', scalars, 1) + call mpas_pool_get_array(state, 'rho_zz', rho_zz, 2) + call mpas_pool_get_array(diag , 'rho_edge', rho_edge) + + call mpas_pool_get_dimension(state, 'moist_start', moist_start) + call mpas_pool_get_dimension(state, 'moist_end', moist_end) + call mpas_pool_get_dimension(state, 'index_qv', index_qv) + + ! Joe did not recommend to add w tendecies in IAU. + !call mpas_pool_get_array(tend, 'w', tend_w) + +! call mpas_pool_get_array(tend, 'u', tend_u) +! call mpas_pool_get_array(tend, 'rho_zz', tend_rho) +! call mpas_pool_get_array(tend, 'theta_m', tend_theta) + call mpas_pool_get_array(tend, 'scalars_tend', tend_scalars) + + call mpas_pool_get_array(tend_iau, 'theta', theta_amb) + call mpas_pool_get_array(tend_iau, 'rho', rho_amb) + call mpas_pool_get_array(tend_iau, 'u', u_amb) + call mpas_pool_get_array(tend_iau, 'scalars', scalars_amb) + !call mpas_pool_get_array(tend_iau, 'w', w_amb) + + allocate(theta(nVertLevels,nCellsSolve) ) + allocate(tend_th(nVertLevels,nCellsSolve)) + +! initialize the tendency for potential temperature + tend_th = 0._RKIND + +! write(0,*) 'atm_add_tend_anal_incr: wgt_iau =',wgt_iau + +! add coupled tendencies for u on edges + do i = 1, nEdgesSolve + do k = 1, nVertLevels + tend_ru(k,i) = tend_ru(k,i) + wgt_iau * rho_edge(k,i) * u_amb(k,i) + enddo + enddo + +! add tendencies for rho_zz (instead of rho) + do i = 1, nCellsSolve + do k = 1, nVertLevels + tend_rho(k,i) = tend_rho(k,i) + wgt_iau * rho_amb(k,i)/zz(k,i) + enddo + enddo + +! add tendencies for w (tend_w = 0 at k=1 and k=nVertLevelsP1) - Not tested yet +! do i = 1, nCellsSolve +! do k = 2, nVertLevels +! tend_w(k,i) = tend_w(k,i) + wgt_iau * w_amb(k,i)*rho_zz(k,i) +! enddo +! enddo + + do i = 1, nCellsSolve + do k = 1, nVertLevels + theta(k,i) = theta_m(k,i) / (1._RKIND + rvord * scalars(index_qv,k,i)) + enddo + enddo + +! add coupled tendencies for other state variables on cell centers + do i = 1, nCellsSolve + do k = 1, nVertLevels + tend_th(k,i) = tend_th(k,i) + wgt_iau * (theta_amb(k,i)*rho_zz(k,i) + theta(k,i)*rho_amb(k,i)/zz(k,i)) + tend_scalars(moist_start:moist_end,k,i) = tend_scalars(moist_start:moist_end,k,i) & + + wgt_iau * (scalars_amb(moist_start:moist_end,k,i)*rho_zz(k,i) + scalars(moist_start:moist_end,k,i)*rho_amb(k,i)/zz(k,i)) + enddo + enddo + + !if non-hydrostatic core, convert the tendency for the potential temperature to a + !tendency for the modified potential temperature + do i = 1, nCellsSolve + do k = 1, nVertLevels + tend_th(k,i) = (1. + rvord * scalars(index_qv,k,i)) * tend_th(k,i) & + + rvord * theta(k,i) * tend_scalars(index_qv,k,i) + tend_rtheta(k,i) = tend_rtheta(k,i) + tend_th(k,i) + enddo + enddo + + deallocate(theta) + deallocate(tend_th) + + + end subroutine atm_add_tend_anal_incr + !================================================================================================== + +end module mpas_atm_iau diff --git a/src/core_atmosphere/dynamics/mpas_atm_time_integration.F b/src/core_atmosphere/dynamics/mpas_atm_time_integration.F index 7f8882b205..b2861fe7dc 100644 --- a/src/core_atmosphere/dynamics/mpas_atm_time_integration.F +++ b/src/core_atmosphere/dynamics/mpas_atm_time_integration.F @@ -5,6 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! + module atm_time_integration use mpas_derived_types @@ -16,6 +17,7 @@ module atm_time_integration ! Added only clause to keep xlf90 from getting confused from the overloaded abs intrinsic in mpas_timekeeping use mpas_timekeeping, only: MPAS_Time_type, MPAS_TimeInterval_type, & mpas_set_time, mpas_set_timeInterval, mpas_get_time, operator(+), add_t_ti + use mpas_timer #ifdef DO_PHYSICS use mpas_atmphys_driver_microphysics @@ -23,6 +25,40 @@ module atm_time_integration use mpas_atmphys_utilities #endif + use mpas_atm_iau + + integer :: timerid, secs, u_secs + + ! Used to store physics tendencies for dynamics variables + real (kind=RKIND), allocatable, dimension(:,:) :: tend_ru_physics, tend_rtheta_physics, tend_rho_physics + + ! Used in compute_dyn_tend + real (kind=RKIND), allocatable, dimension(:,:) :: qtot + real (kind=RKIND), allocatable, dimension(:,:) :: delsq_theta, delsq_w, delsq_divergence + real (kind=RKIND), allocatable, dimension(:,:) :: delsq_u +! real (kind=RKIND), allocatable, dimension(:,:) :: delsq_circulation ! no longer used -> removed + real (kind=RKIND), allocatable, dimension(:,:) :: delsq_vorticity + real (kind=RKIND), allocatable, dimension(:,:) :: dpdz + + ! Used in atm_advance_scalars + real (kind=RKIND), dimension(:,:,:), allocatable :: horiz_flux_array + + ! Used in atm_advance_scalars_mono + real (kind=RKIND), dimension(:,:), allocatable :: scalar_old_arr, scalar_new_arr + real (kind=RKIND), dimension(:,:), allocatable :: s_max_arr, s_min_arr + real (kind=RKIND), dimension(:,:,:), allocatable :: scale_array + real (kind=RKIND), dimension(:,:), allocatable :: flux_array + real (kind=RKIND), dimension(:,:), allocatable :: flux_upwind_tmp_arr + real (kind=RKIND), dimension(:,:), allocatable :: flux_tmp_arr + real (kind=RKIND), dimension(:,:), allocatable :: wdtn_arr + real (kind=RKIND), dimension(:,:), allocatable :: rho_zz_int + real (kind=RKIND), dimension(:,:,:), allocatable :: scalar_tend_array + + ! Used in compute_solve_diagnostics + real (kind=RKIND), allocatable, dimension(:,:) :: ke_vertex + real (kind=RKIND), allocatable, dimension(:,:) :: ke_edge + + contains @@ -58,9 +94,8 @@ subroutine atm_timestep(domain, dt, timeStamp, itimestep) if (trim(config_time_integration) == 'SRK3') then call atm_srk3(domain, dt, itimestep) else - write(0,*) 'Unknown time integration option '//trim(config_time_integration) - write(0,*) 'Currently, only ''SRK3'' is supported.' - call mpas_dmpar_abort(domain % dminfo) + call mpas_dmpar_global_abort('Unknown time integration option '//trim(config_time_integration), deferredAbort=.true.) + call mpas_dmpar_global_abort('Currently, only ''SRK3'' is supported.') end if call mpas_set_time(currTime, dateTimeString=timeStamp) @@ -99,18 +134,24 @@ subroutine atm_srk3(domain, dt, itimestep) real (kind=RKIND), intent(in) :: dt integer, intent(in) :: itimestep - + integer :: thread integer :: iCell, k, iEdge type (block_type), pointer :: block + integer, pointer :: nThreads + integer, dimension(:), pointer :: cellThreadStart, cellThreadEnd + integer, dimension(:), pointer :: cellSolveThreadStart, cellSolveThreadEnd + integer, dimension(:), pointer :: edgeThreadStart, edgeThreadEnd + integer, dimension(:), pointer :: edgeSolveThreadStart, edgeSolveThreadEnd + integer, dimension(:), pointer :: vertexThreadStart, vertexThreadEnd + integer, dimension(:), pointer :: vertexSolveThreadStart, vertexSolveThreadEnd + integer :: rk_step, number_of_sub_steps integer :: iScalar real (kind=RKIND), dimension(3) :: rk_timestep, rk_sub_timestep integer, dimension(3) :: number_sub_steps integer :: small_step - logical, parameter :: debug = .false. -! logical, parameter :: debug = .true. ! additions for splitting scalar transport from dynamics, WCS 18 November 2014 logical, pointer :: config_split_dynamics_transport @@ -118,19 +159,18 @@ subroutine atm_srk3(domain, dt, itimestep) integer :: dynamics_substep, dynamics_split real (kind=RKIND) :: dt_dynamics - real (kind=RKIND) :: scalar_min, scalar_max - real (kind=RKIND) :: global_scalar_min, global_scalar_max - integer, pointer :: config_number_of_sub_steps + integer, pointer :: config_time_integration_order logical, pointer :: config_scalar_advection logical, pointer :: config_positive_definite logical, pointer :: config_monotonic - logical, pointer :: config_print_global_minmax_vel - logical, pointer :: config_print_global_minmax_sca real (kind=RKIND), pointer :: config_dt character (len=StrKIND), pointer :: config_microp_scheme + character (len=StrKIND), pointer :: config_convection_scheme - integer, pointer :: num_scalars, index_qv, nCells, nCellsSolve, nEdges, nEdgesSolve, nVertLevels + integer, pointer :: num_scalars, index_qv, nCells, nCellsSolve, nEdges, nEdgesSolve, nVertices, nVerticesSolve, nVertLevels + + character(len=StrKIND), pointer :: config_IAU_option type (mpas_pool_type), pointer :: state type (mpas_pool_type), pointer :: diag @@ -144,6 +184,7 @@ subroutine atm_srk3(domain, dt, itimestep) type (field2DReal), pointer :: pressure_p_field type (field2DReal), pointer :: rtheta_p_field type (field2DReal), pointer :: rtheta_pp_field + type (field2DReal), pointer :: divergence_3d_field type (field2DReal), pointer :: tend_u_field type (field2DReal), pointer :: u_field type (field2DReal), pointer :: w_field @@ -152,6 +193,7 @@ subroutine atm_srk3(domain, dt, itimestep) type (field2DReal), pointer :: rho_pp_field type (field2DReal), pointer :: pv_edge_field type (field2DReal), pointer :: rho_edge_field + type (field2DReal), pointer :: exner_field real (kind=RKIND), dimension(:,:), pointer :: w real (kind=RKIND), dimension(:,:), pointer :: u, uReconstructZonal, uReconstructMeridional, uReconstructX, uReconstructY, uReconstructZ @@ -159,17 +201,21 @@ subroutine atm_srk3(domain, dt, itimestep) real (kind=RKIND), dimension(:,:), pointer :: rqvdynten + logical, parameter :: debug = .false. + + ! ! Retrieve configuration options ! call mpas_pool_get_config(domain % blocklist % configs, 'config_number_of_sub_steps', config_number_of_sub_steps) + call mpas_pool_get_config(domain % blocklist % configs, 'config_time_integration_order', config_time_integration_order) call mpas_pool_get_config(domain % blocklist % configs, 'config_scalar_advection', config_scalar_advection) call mpas_pool_get_config(domain % blocklist % configs, 'config_positive_definite', config_positive_definite) call mpas_pool_get_config(domain % blocklist % configs, 'config_monotonic', config_monotonic) call mpas_pool_get_config(domain % blocklist % configs, 'config_dt', config_dt) call mpas_pool_get_config(domain % blocklist % configs, 'config_microp_scheme', config_microp_scheme) - call mpas_pool_get_config(domain % blocklist % configs, 'config_print_global_minmax_vel', config_print_global_minmax_vel) - call mpas_pool_get_config(domain % blocklist % configs, 'config_print_global_minmax_sca', config_print_global_minmax_sca) + call mpas_pool_get_config(domain % blocklist % configs, 'config_convection_scheme', config_convection_scheme) + call mpas_pool_get_config(domain % blocklist % configs, 'config_IAU_option', config_IAU_option) ! config variables for dynamics-transport splitting, WCS 18 November 2014 call mpas_pool_get_config(domain % blocklist % configs, 'config_split_dynamics_transport', config_split_dynamics_transport) @@ -189,6 +235,21 @@ subroutine atm_srk3(domain, dt, itimestep) call mpas_pool_get_field(diag, 'pressure_p', pressure_p_field) call mpas_pool_get_field(diag, 'rtheta_p', rtheta_p_field) + ! + ! allocate storage for physics tendency save + ! + call mpas_pool_get_dimension(state, 'nCells', nCells) + call mpas_pool_get_dimension(state, 'nEdges', nEdges) + call mpas_pool_get_dimension(state, 'nVertLevels', nVertLevels) + + allocate(qtot(nVertLevels,nCells+1)) + qtot(:,nCells+1) = 0.0_RKIND + allocate(tend_rtheta_physics(nVertLevels,nCells+1)) + tend_rtheta_physics(:,nCells+1) = 0.0_RKIND + allocate(tend_rho_physics(nVertLevels,nCells+1)) + tend_rho_physics(:,nCells+1) = 0.0_RKIND + allocate(tend_ru_physics(nVertLevels,nEdges+1)) + tend_ru_physics(:,nEdges+1) = 0.0_RKIND ! ! Initialize RK weights @@ -206,20 +267,37 @@ subroutine atm_srk3(domain, dt, itimestep) if (.not. config_scalar_advection ) write(0,*) ' scalar advection turned off ' number_of_sub_steps = config_number_of_sub_steps - rk_timestep(1) = dt_dynamics/3. - rk_timestep(2) = dt_dynamics/2. - rk_timestep(3) = dt_dynamics - rk_sub_timestep(1) = dt_dynamics/3. - rk_sub_timestep(2) = dt_dynamics/real(number_of_sub_steps) - rk_sub_timestep(3) = dt_dynamics/real(number_of_sub_steps) + if(config_time_integration_order == 3) then - number_sub_steps(1) = 1 - number_sub_steps(2) = max(1,number_of_sub_steps/2) - number_sub_steps(3) = number_of_sub_steps + rk_timestep(1) = dt_dynamics/3. + rk_timestep(2) = dt_dynamics/2. + rk_timestep(3) = dt_dynamics - if(debug) write(0,*) ' copy step in rk solver ' + rk_sub_timestep(1) = dt_dynamics/3. + rk_sub_timestep(2) = dt_dynamics/real(number_of_sub_steps) + rk_sub_timestep(3) = dt_dynamics/real(number_of_sub_steps) + number_sub_steps(1) = 1 + number_sub_steps(2) = max(1,number_of_sub_steps/2) + number_sub_steps(3) = number_of_sub_steps + + else if (config_time_integration_order == 2) then + + rk_timestep(1) = dt_dynamics/2. + rk_timestep(2) = dt_dynamics/2. + rk_timestep(3) = dt_dynamics + + rk_sub_timestep(1) = dt_dynamics/real(number_of_sub_steps) + rk_sub_timestep(2) = dt_dynamics/real(number_of_sub_steps) + rk_sub_timestep(3) = dt_dynamics/real(number_of_sub_steps) + + number_sub_steps(1) = max(1,number_of_sub_steps/2) + number_sub_steps(2) = max(1,number_of_sub_steps/2) + number_sub_steps(3) = number_of_sub_steps + + end if + ! theta_m call mpas_dmpar_exch_halo_field(theta_m_field) @@ -233,83 +311,326 @@ subroutine atm_srk3(domain, dt, itimestep) call mpas_dmpar_exch_halo_field(rtheta_p_field) + call mpas_timer_start('atm_rk_integration_setup') + + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'state', state) + call mpas_pool_get_subpool(block % structs, 'diag', diag) + ! mesh is needed for atm_compute_moist_coefficients + call mpas_pool_get_subpool(block % structs, 'mesh', mesh) + + call mpas_pool_get_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_get_dimension(block % dimensions, 'cellThreadStart', cellThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellThreadEnd', cellThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadStart', cellSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadEnd', cellSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadStart', vertexThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadEnd', vertexThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadStart', vertexSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadEnd', vertexSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadStart', edgeThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadEnd', edgeThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadStart', edgeSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadEnd', edgeSolveThreadEnd) + +!$OMP PARALLEL DO + do thread=1,nThreads + call atm_rk_integration_setup(state, diag, & + cellThreadStart(thread), cellThreadEnd(thread), & + vertexThreadStart(thread), vertexThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread), & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread), & + vertexSolveThreadStart(thread), vertexSolveThreadEnd(thread), & + edgeSolveThreadStart(thread), edgeSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO + + block => block % next + end do + call mpas_timer_stop('atm_rk_integration_setup') + + call mpas_timer_start('atm_compute_moist_coefficients') + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'state', state) + call mpas_pool_get_subpool(block % structs, 'diag', diag) + ! mesh is needed for atm_compute_moist_coefficients + call mpas_pool_get_subpool(block % structs, 'mesh', mesh) + + call mpas_pool_get_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_get_dimension(block % dimensions, 'cellThreadStart', cellThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellThreadEnd', cellThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadStart', cellSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadEnd', cellSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadStart', vertexThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadEnd', vertexThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadStart', vertexSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadEnd', vertexSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadStart', edgeThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadEnd', edgeThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadStart', edgeSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadEnd', edgeSolveThreadEnd) + +!$OMP PARALLEL DO + do thread=1,nThreads + call atm_compute_moist_coefficients( block % dimensions, state, diag, mesh, & !MGD could do away with dimensions arg + cellThreadStart(thread), cellThreadEnd(thread), & + vertexThreadStart(thread), vertexThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread), & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread), & + vertexSolveThreadStart(thread), vertexSolveThreadEnd(thread), & + edgeSolveThreadStart(thread), edgeSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO + + block => block % next + end do + call mpas_timer_stop('atm_compute_moist_coefficients') + +#ifdef DO_PHYSICS + call mpas_timer_start('physics_addtend') block => domain % blocklist do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'mesh', mesh) call mpas_pool_get_subpool(block % structs, 'state', state) call mpas_pool_get_subpool(block % structs, 'diag', diag) - call atm_rk_integration_setup(state, diag) + call mpas_pool_get_subpool(block % structs, 'tend', tend) + call mpas_pool_get_subpool(block % structs, 'tend_physics', tend_physics) + rk_step = 1 + dynamics_substep = 1 + call physics_get_tend( block, & + mesh, & + state, & + diag, & + tend, & + tend_physics, & + block % configs, & + rk_step, & + dynamics_substep, & + tend_ru_physics, & + tend_rtheta_physics, & + tend_rho_physics ) block => block % next end do + call mpas_timer_stop('physics_addtend') +#endif + + ! + ! IAU - Incremental Analysis Update + ! + if (trim(config_IAU_option) /= 'off') then + block => domain % blocklist + do while (associated(block)) + call atm_add_tend_anal_incr(block % configs, block % structs, itimestep, dt, & + tend_ru_physics, tend_rtheta_physics, tend_rho_physics) + block => block % next + end do + end if + DYNAMICS_SUBSTEPS : do dynamics_substep = 1, dynamics_split + ! Compute the coefficients for the vertically implicit solve in the acoustic step. + ! These coefficients will work for the first acoustic step in all cases. + call mpas_timer_start('atm_compute_vert_imp_coefs') + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'mesh', mesh) + call mpas_pool_get_subpool(block % structs, 'state', state) + call mpas_pool_get_subpool(block % structs, 'diag', diag) + call mpas_pool_get_subpool(block % structs, 'tend', tend) + + call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) + + call mpas_pool_get_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_get_dimension(block % dimensions, 'cellThreadStart', cellThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellThreadEnd', cellThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadStart', cellSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadEnd', cellSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadStart', edgeThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadEnd', edgeThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadStart', edgeSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadEnd', edgeSolveThreadEnd) + + rk_step = 1 +!$OMP PARALLEL DO + do thread=1,nThreads + call atm_compute_vert_imp_coefs( state, mesh, diag, block % configs, nVertLevels, rk_sub_timestep(rk_step), & + cellThreadStart(thread), cellThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread), & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread), & + edgeSolveThreadStart(thread), edgeSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO + block => block % next + end do + call mpas_timer_stop('atm_compute_vert_imp_coefs') + + call mpas_pool_get_field(diag, 'exner', exner_field) + call mpas_dmpar_exch_halo_field(exner_field) + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! BEGIN Runge-Kutta loop !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! RK3_DYNAMICS : do rk_step = 1, 3 ! Runge-Kutta loop - if(debug) write(0,*) ' rk substep ', rk_step - - block => domain % blocklist - do while (associated(block)) - ! The coefficients are set for owned cells (cqw) and for all edges of owned cells, - call mpas_pool_get_subpool(block % structs, 'state', state) - call mpas_pool_get_subpool(block % structs, 'diag', diag) - call mpas_pool_get_subpool(block % structs, 'mesh', mesh) - call atm_compute_moist_coefficients( block % dimensions, state, diag, mesh ) !MGD could do away with dimensions arg - block => block % next - end do - - - if (debug) write(0,*) ' compute_dyn_tend ' + ! recompute vertically implicit coefficients if necessary + if( (config_time_integration_order == 3) .and. (rk_step == 2)) then + + ! Compute the coefficients for the vertically implicit solve in the acoustic step. + ! These coefficients will work for the 2nd and 3rd acoustic steps (dt is the same for both). + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'mesh', mesh) + call mpas_pool_get_subpool(block % structs, 'state', state) + call mpas_pool_get_subpool(block % structs, 'diag', diag) + call mpas_pool_get_subpool(block % structs, 'tend', tend) + + call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) + + call mpas_pool_get_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_get_dimension(block % dimensions, 'cellThreadStart', cellThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellThreadEnd', cellThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadStart', cellSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadEnd', cellSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadStart', edgeThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadEnd', edgeThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadStart', edgeSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadEnd', edgeSolveThreadEnd) + +!$OMP PARALLEL DO + do thread=1,nThreads + call atm_compute_vert_imp_coefs( state, mesh, diag, block % configs, nVertLevels, rk_sub_timestep(rk_step), & + cellThreadStart(thread), cellThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread), & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread), & + edgeSolveThreadStart(thread), edgeSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO + block => block % next + end do + end if + + call mpas_timer_start('atm_compute_dyn_tend') block => domain % blocklist do while (associated(block)) call mpas_pool_get_subpool(block % structs, 'state', state) call mpas_pool_get_subpool(block % structs, 'diag', diag) call mpas_pool_get_subpool(block % structs, 'mesh', mesh) call mpas_pool_get_subpool(block % structs, 'tend', tend) - + call mpas_pool_get_subpool(block % structs, 'tend_physics', tend_physics) + + call mpas_pool_get_dimension(mesh, 'nCells', nCells) + call mpas_pool_get_dimension(mesh, 'nEdges', nEdges) + call mpas_pool_get_dimension(mesh, 'nVertices', nVertices) call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) - call atm_compute_dyn_tend( tend, state, diag, mesh, block % configs, nVertLevels, rk_step, dt ) - + call mpas_pool_get_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_get_dimension(block % dimensions, 'cellThreadStart', cellThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellThreadEnd', cellThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadStart', cellSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadEnd', cellSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadStart', vertexThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadEnd', vertexThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadStart', vertexSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadEnd', vertexSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadStart', edgeThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadEnd', edgeThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadStart', edgeSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadEnd', edgeSolveThreadEnd) + + allocate(delsq_theta(nVertLevels,nCells+1)) + delsq_theta(:,nCells+1) = 0.0_RKIND + allocate(delsq_w(nVertLevels,nCells+1)) + delsq_w(:,nCells+1) = 0.0_RKIND +!! allocate(qtot(nVertLevels,nCells+1)) ! initializing this earlier in solution sequence + allocate(delsq_divergence(nVertLevels,nCells+1)) + delsq_divergence(:,nCells+1) = 0.0_RKIND + allocate(delsq_u(nVertLevels,nEdges+1)) + delsq_u(:,nEdges+1) = 0.0_RKIND +!! allocate(delsq_circulation(nVertLevels,nVertices+1)) ! no longer used -> removed + allocate(delsq_vorticity(nVertLevels,nVertices+1)) + delsq_vorticity(:,nVertices+1) = 0.0_RKIND + allocate(dpdz(nVertLevels,nCells+1)) + dpdz(:,nCells+1) = 0.0_RKIND + +!$OMP PARALLEL DO + do thread=1,nThreads + call atm_compute_dyn_tend( tend, tend_physics, state, diag, mesh, block % configs, nVertLevels, rk_step, dt, & + cellThreadStart(thread), cellThreadEnd(thread), & + vertexThreadStart(thread), vertexThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread), & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread), & + vertexSolveThreadStart(thread), vertexSolveThreadEnd(thread), & + edgeSolveThreadStart(thread), edgeSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO + + deallocate(delsq_theta) + deallocate(delsq_w) +!! deallocate(qtot) ! deallocation after dynamics step complete, see below + deallocate(delsq_divergence) + deallocate(delsq_u) +!! deallocate(delsq_circulation) ! no longer used -> removed + deallocate(delsq_vorticity) + deallocate(dpdz) + block => block % next end do - if (debug) write(0,*) ' finished compute_dyn_tend ' + call mpas_timer_stop('atm_compute_dyn_tend') #ifdef DO_PHYSICS - if (debug) write(0,*) ' add physics tendencies ' - block => domain % blocklist - do while (associated(block)) - call mpas_pool_get_subpool(block % structs, 'mesh', mesh) - call mpas_pool_get_subpool(block % structs, 'state', state) - call mpas_pool_get_subpool(block % structs, 'diag', diag) - call mpas_pool_get_subpool(block % structs, 'tend', tend) - call mpas_pool_get_subpool(block % structs, 'tend_physics', tend_physics) - call physics_addtend( block, & - mesh, & - state, & - diag, & - tend, & - tend_physics, & - block % configs, & - rk_step ) - block => block % next - end do - if (debug) write(0,*) ' finished add physics tendencies ' +! call mpas_timer_start('physics_addtend') +! block => domain % blocklist +! do while (associated(block)) +! call mpas_pool_get_subpool(block % structs, 'mesh', mesh) +! call mpas_pool_get_subpool(block % structs, 'state', state) +! call mpas_pool_get_subpool(block % structs, 'diag', diag) +! call mpas_pool_get_subpool(block % structs, 'tend', tend) +! call mpas_pool_get_subpool(block % structs, 'tend_physics', tend_physics) +! call physics_addtend( block, & +! mesh, & +! state, & +! diag, & +! tend, & +! tend_physics, & +! block % configs, & +! rk_step, & +! dynamics_substep ) +! block => block % next +! end do +! call mpas_timer_stop('physics_addtend') #endif + !*********************************** ! need tendencies at all edges of owned cells - ! we are solving for all edges of owned cells to minimize communications ! during the acoustic substeps !*********************************** - ! tend_u +! tend_u call mpas_pool_get_subpool(domain % blocklist % structs, 'tend', tend) call mpas_pool_get_field(tend, 'u', tend_u_field) call mpas_dmpar_exch_halo_field(tend_u_field, (/ 1 /)) + + call mpas_timer_start('small_step_prep') block => domain % blocklist do while (associated(block)) @@ -317,14 +638,33 @@ subroutine atm_srk3(domain, dt, itimestep) call mpas_pool_get_subpool(block % structs, 'state', state) call mpas_pool_get_subpool(block % structs, 'diag', diag) call mpas_pool_get_subpool(block % structs, 'tend', tend) - + call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) - call atm_set_smlstep_pert_variables( tend, diag, mesh, block % configs ) - call atm_compute_vert_imp_coefs( state, mesh, diag, block % configs, nVertLevels, rk_sub_timestep(rk_step) ) - + call mpas_pool_get_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_get_dimension(block % dimensions, 'cellThreadStart', cellThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellThreadEnd', cellThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadStart', cellSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadEnd', cellSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadStart', edgeThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadEnd', edgeThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadStart', edgeSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadEnd', edgeSolveThreadEnd) + +!$OMP PARALLEL DO + do thread=1,nThreads + call atm_set_smlstep_pert_variables( tend, diag, mesh, block % configs, & + cellThreadStart(thread), cellThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread), & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread), & + edgeSolveThreadStart(thread), edgeSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO block => block % next end do + call mpas_timer_stop('small_step_prep') !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! begin acoustic steps loop @@ -332,8 +672,11 @@ subroutine atm_srk3(domain, dt, itimestep) do small_step = 1, number_sub_steps(rk_step) - if(debug) write(0,*) ' acoustic step ',small_step - + call mpas_pool_get_subpool(domain % blocklist % structs, 'diag', diag) + call mpas_pool_get_field(diag, 'rho_pp', rho_pp_field) + call mpas_dmpar_exch_halo_field(rho_pp_field, (/ 1 /)) + + call mpas_timer_start('atm_advance_acoustic_step') block => domain % blocklist do while (associated(block)) call mpas_pool_get_subpool(block % structs, 'mesh', mesh) @@ -344,12 +687,40 @@ subroutine atm_srk3(domain, dt, itimestep) call mpas_pool_get_dimension(mesh, 'nCells', nCells) call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) - call atm_advance_acoustic_step( state, diag, tend, mesh, block % configs, nCells, nVertLevels, rk_sub_timestep(rk_step) ) + call mpas_pool_get_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_get_dimension(block % dimensions, 'cellThreadStart', cellThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellThreadEnd', cellThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadStart', cellSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadEnd', cellSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadStart', vertexThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadEnd', vertexThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadStart', vertexSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadEnd', vertexSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadStart', edgeThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadEnd', edgeThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadStart', edgeSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadEnd', edgeSolveThreadEnd) + +!$OMP PARALLEL DO + do thread=1,nThreads + call atm_advance_acoustic_step( state, diag, tend, mesh, block % configs, nCells, nVertLevels, & + rk_sub_timestep(rk_step), small_step, & + cellThreadStart(thread), cellThreadEnd(thread), & + vertexThreadStart(thread), vertexThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread), & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread), & + vertexSolveThreadStart(thread), vertexSolveThreadEnd(thread), & + edgeSolveThreadStart(thread), edgeSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO block => block % next end do + call mpas_timer_stop('atm_advance_acoustic_step') - if(debug) write(0,*) ' acoustic step complete ' ! rtheta_pp ! This is the only communications needed during the acoustic steps because we solve for u on all edges of owned cells @@ -357,7 +728,10 @@ subroutine atm_srk3(domain, dt, itimestep) call mpas_pool_get_subpool(domain % blocklist % structs, 'diag', diag) call mpas_pool_get_field(diag, 'rtheta_pp', rtheta_pp_field) call mpas_dmpar_exch_halo_field(rtheta_pp_field, (/ 1 /)) - + + call mpas_pool_get_field(diag, 'divergence_3d', divergence_3d_field) + call mpas_dmpar_exch_halo_field(divergence_3d_field, (/ 1 /)) + end do ! end of acoustic steps loop !CR: SMALLER STENCIL?: call mpas_dmpar_exch_halo_field(block % diag % rw_p, (/ 1 /)) @@ -376,17 +750,48 @@ subroutine atm_srk3(domain, dt, itimestep) call mpas_pool_get_field(diag, 'rtheta_pp', rtheta_pp_field) call mpas_dmpar_exch_halo_field(rtheta_pp_field, (/ 2 /)) + call mpas_timer_start('atm_recover_large_step_variables') block => domain % blocklist do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'mesh', mesh) call mpas_pool_get_subpool(block % structs, 'state', state) call mpas_pool_get_subpool(block % structs, 'diag', diag) call mpas_pool_get_subpool(block % structs, 'tend', tend) - call atm_recover_large_step_variables( state, diag, tend, mesh, block % configs, rk_timestep(rk_step), number_sub_steps(rk_step), rk_step ) + call mpas_pool_get_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_get_dimension(block % dimensions, 'cellThreadStart', cellThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellThreadEnd', cellThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadStart', cellSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadEnd', cellSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadStart', vertexThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadEnd', vertexThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadStart', vertexSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadEnd', vertexSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadStart', edgeThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadEnd', edgeThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadStart', edgeSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadEnd', edgeSolveThreadEnd) + +!$OMP PARALLEL DO + do thread=1,nThreads + call atm_recover_large_step_variables( state, diag, tend, mesh, block % configs, rk_timestep(rk_step), & + number_sub_steps(rk_step), rk_step, & + cellThreadStart(thread), cellThreadEnd(thread), & + vertexThreadStart(thread), vertexThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread), & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread), & + vertexSolveThreadStart(thread), vertexSolveThreadEnd(thread), & + edgeSolveThreadStart(thread), edgeSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO block => block % next end do + call mpas_timer_stop('atm_recover_large_step_variables') ! u !CR: SMALLER STENCIL?: call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % u, (/ 3 /)) @@ -399,6 +804,11 @@ subroutine atm_srk3(domain, dt, itimestep) if (config_scalar_advection .and. (.not. config_split_dynamics_transport) ) then + if (rk_step < 3 .or. (.not. config_monotonic .and. .not. config_positive_definite)) then + call mpas_timer_start('atm_advance_scalars') + else + call mpas_timer_start('atm_advance_scalars_mono') + end if block => domain % blocklist do while (associated(block)) call mpas_pool_get_subpool(block % structs, 'tend', tend) @@ -411,34 +821,148 @@ subroutine atm_srk3(domain, dt, itimestep) call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) call mpas_pool_get_dimension(state, 'num_scalars', num_scalars) + call mpas_pool_get_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_get_dimension(block % dimensions, 'cellThreadStart', cellThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellThreadEnd', cellThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadStart', cellSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadEnd', cellSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadStart', vertexThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadEnd', vertexThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadStart', vertexSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadEnd', vertexSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadStart', edgeThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadEnd', edgeThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadStart', edgeSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadEnd', edgeSolveThreadEnd) + + allocate(scalar_old_arr(nVertLevels,nCells+1)) + scalar_old_arr(:,nCells+1) = 0.0_RKIND + allocate(scalar_new_arr(nVertLevels,nCells+1)) + scalar_new_arr(:,nCells+1) = 0.0_RKIND + allocate(s_max_arr(nVertLevels,nCells+1)) + s_max_arr(:,nCells+1) = 0.0_RKIND + allocate(s_min_arr(nVertLevels,nCells+1)) + s_min_arr(:,nCells+1) = 0.0_RKIND + allocate(scale_array(nVertLevels,2,nCells+1)) + scale_array(:,:,nCells+1) = 0.0_RKIND + allocate(flux_array(nVertLevels,nEdges+1)) + flux_array(:,nEdges+1) = 0.0_RKIND + allocate(wdtn_arr(nVertLevels+1,nCells+1)) + wdtn_arr(:,nCells+1) = 0.0_RKIND + if (rk_step < 3 .or. (.not. config_monotonic .and. .not. config_positive_definite)) then + allocate(horiz_flux_array(num_scalars,nVertLevels,nEdges+1)) + horiz_flux_array(:,:,nEdges+1) = 0.0_RKIND + else + allocate(flux_upwind_tmp_arr(nVertLevels,nEdges+1)) + flux_upwind_tmp_arr(:,nEdges+1) = 0.0_RKIND + allocate(flux_tmp_arr(nVertLevels,nEdges+1)) + flux_tmp_arr(:,nEdges+1) = 0.0_RKIND + end if + ! ! Note: The advance_scalars_mono routine can be used without limiting, and thus, encompasses ! the functionality of the advance_scalars routine; however, it is noticeably slower, ! so we use the advance_scalars routine for the first two RK substeps. ! +!$OMP PARALLEL DO + do thread=1,nThreads + if (rk_step < 3 .or. (.not. config_monotonic .and. .not. config_positive_definite)) then + call atm_advance_scalars( tend, state, diag, mesh, block % configs, num_scalars, nCells, nVertLevels, rk_timestep(rk_step), & + cellThreadStart(thread), cellThreadEnd(thread), & + vertexThreadStart(thread), vertexThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread), & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread), & + vertexSolveThreadStart(thread), vertexSolveThreadEnd(thread), & + edgeSolveThreadStart(thread), edgeSolveThreadEnd(thread), & + horiz_flux_array, rk_step, config_time_integration_order, & + advance_density=.false. ) + else + + block % domain = domain + call atm_advance_scalars_mono( block, tend, state, diag, mesh, block % configs, nCells, nEdges, nVertLevels, rk_timestep(rk_step), & + cellThreadStart(thread), cellThreadEnd(thread), & + vertexThreadStart(thread), vertexThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread), & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread), & + vertexSolveThreadStart(thread), vertexSolveThreadEnd(thread), & + edgeSolveThreadStart(thread), edgeSolveThreadEnd(thread), & + scalar_old_arr, scalar_new_arr, s_max_arr, s_min_arr, wdtn_arr, & + scale_array, flux_array, flux_upwind_tmp_arr, flux_tmp_arr, & + advance_density=.false.) + end if + end do +!$OMP END PARALLEL DO + + deallocate(scalar_old_arr) + deallocate(scalar_new_arr) + deallocate(s_max_arr) + deallocate(s_min_arr) + deallocate(scale_array) + deallocate(flux_array) + deallocate(wdtn_arr) if (rk_step < 3 .or. (.not. config_monotonic .and. .not. config_positive_definite)) then - call atm_advance_scalars( tend, state, diag, mesh, block % configs, num_scalars, nCells, nVertLevels, rk_timestep(rk_step), advance_density=.false. ) + deallocate(horiz_flux_array) else - block % domain = domain - call atm_advance_scalars_mono( block, tend, state, diag, mesh, block % configs, nCells, nEdges, nVertLevels, rk_timestep(rk_step), advance_density=.false.) + deallocate(flux_upwind_tmp_arr) + deallocate(flux_tmp_arr) end if + block => block % next end do + if (rk_step < 3 .or. (.not. config_monotonic .and. .not. config_positive_definite)) then + call mpas_timer_stop('atm_advance_scalars') + else + call mpas_timer_stop('atm_advance_scalars_mono') + end if end if + call mpas_timer_start('atm_compute_solve_diagnostics') block => domain % blocklist do while (associated(block)) call mpas_pool_get_subpool(block % structs, 'state', state) call mpas_pool_get_subpool(block % structs, 'diag', diag) call mpas_pool_get_subpool(block % structs, 'mesh', mesh) + + call mpas_pool_get_dimension(state, 'nEdges', nEdges) + call mpas_pool_get_dimension(state, 'nVertices', nVertices) + call mpas_pool_get_dimension(state, 'nVertLevels', nVertLevels) + + call mpas_pool_get_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_get_dimension(block % dimensions, 'cellThreadStart', cellThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellThreadEnd', cellThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadStart', vertexThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadEnd', vertexThreadEnd) - call atm_compute_solve_diagnostics( dt, state, 2, diag, mesh, block % configs ) + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadStart', edgeThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadEnd', edgeThreadEnd) + + allocate(ke_vertex(nVertLevels,nVertices+1)) + ke_vertex(:,nVertices+1) = 0.0_RKIND + allocate(ke_edge(nVertLevels,nEdges+1)) + ke_edge(:,nEdges+1) = 0.0_RKIND + +!$OMP PARALLEL DO + do thread=1,nThreads + call atm_compute_solve_diagnostics(dt, state, 2, diag, mesh, block % configs, & + cellThreadStart(thread), cellThreadEnd(thread), & + vertexThreadStart(thread), vertexThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread), rk_step) + end do +!$OMP END PARALLEL DO + + deallocate(ke_vertex) + deallocate(ke_edge) block => block % next end do + call mpas_timer_stop('atm_compute_solve_diagnostics') - if(debug) write(0,*) ' diagnostics complete ' ! w call mpas_pool_get_subpool(domain % blocklist % structs, 'state', state) @@ -469,6 +993,14 @@ subroutine atm_srk3(domain, dt, itimestep) call mpas_dmpar_exch_halo_field(theta_m_field) call mpas_dmpar_exch_halo_field(pressure_p_field) call mpas_dmpar_exch_halo_field(rtheta_p_field) + + ! + ! Note: A halo exchange for 'exner' here as well as after the call + ! to driver_microphysics() can substitute for the exchange at + ! the beginning of each dynamics subcycle. Placing halo exchanges + ! here and after microphysics may in future allow for aggregation of + ! the 'exner' exchange with other exchanges. + ! end if ! dynamics-transport split, WCS 18 November 2014 @@ -476,16 +1008,53 @@ subroutine atm_srk3(domain, dt, itimestep) ! (2) need to accumulate ruAvg and wwAvg over the dynamics substeps, prepare for use in transport ! Notes: physics tendencies for scalars should be OK coming out of dynamics + call mpas_timer_start('atm_rk_dynamics_substep_finish') block => domain % blocklist do while (associated(block)) call mpas_pool_get_subpool(block % structs, 'state', state) call mpas_pool_get_subpool(block % structs, 'diag', diag) - call atm_rk_dynamics_substep_finish(state, diag, dynamics_substep, dynamics_split) + + call mpas_pool_get_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_get_dimension(block % dimensions, 'cellThreadStart', cellThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellThreadEnd', cellThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadStart', cellSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadEnd', cellSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadStart', vertexThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadEnd', vertexThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadStart', vertexSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadEnd', vertexSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadStart', edgeThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadEnd', edgeThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadStart', edgeSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadEnd', edgeSolveThreadEnd) + +!$OMP PARALLEL DO + do thread=1,nThreads + call atm_rk_dynamics_substep_finish(state, diag, dynamics_substep, dynamics_split, & + cellThreadStart(thread), cellThreadEnd(thread), & + vertexThreadStart(thread), vertexThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread), & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread), & + vertexSolveThreadStart(thread), vertexSolveThreadEnd(thread), & + edgeSolveThreadStart(thread), edgeSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO + block => block % next end do + call mpas_timer_stop('atm_rk_dynamics_substep_finish') end do DYNAMICS_SUBSTEPS + + deallocate(qtot) ! we are finished with these now + deallocate(tend_rtheta_physics) + deallocate(tend_rho_physics) + deallocate(tend_ru_physics) + ! ! split transport, at present RK3 ! @@ -495,11 +1064,17 @@ subroutine atm_srk3(domain, dt, itimestep) rk_timestep(1) = dt/3. rk_timestep(2) = dt/2. rk_timestep(3) = dt + ! switch for 2nd order time integration for scalar transport + if(config_time_integration_order == 2) rk_timestep(1) = dt/2. RK3_SPLIT_TRANSPORT : do rk_step = 1, 3 ! Runge-Kutta loop - if(debug) write(0,*) ' rk split transport substep ', rk_step + if (rk_step < 3 .or. (.not. config_monotonic .and. .not. config_positive_definite)) then + call mpas_timer_start('atm_advance_scalars') + else + call mpas_timer_start('atm_advance_scalars_mono') + end if block => domain % blocklist do while (associated(block)) call mpas_pool_get_subpool(block % structs, 'tend', tend) @@ -512,19 +1087,111 @@ subroutine atm_srk3(domain, dt, itimestep) call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) call mpas_pool_get_dimension(state, 'num_scalars', num_scalars) + call mpas_pool_get_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_get_dimension(block % dimensions, 'cellThreadStart', cellThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellThreadEnd', cellThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadStart', cellSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadEnd', cellSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadStart', vertexThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadEnd', vertexThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadStart', vertexSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadEnd', vertexSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadStart', edgeThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadEnd', edgeThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadStart', edgeSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadEnd', edgeSolveThreadEnd) + + allocate(scalar_old_arr(nVertLevels,nCells+1)) + scalar_old_arr(:,nCells+1) = 0.0_RKIND + allocate(scalar_new_arr(nVertLevels,nCells+1)) + scalar_new_arr(:,nCells+1) = 0.0_RKIND + allocate(s_max_arr(nVertLevels,nCells+1)) + s_max_arr(:,nCells+1) = 0.0_RKIND + allocate(s_min_arr(nVertLevels,nCells+1)) + s_min_arr(:,nCells+1) = 0.0_RKIND + allocate(scale_array(nVertLevels,2,nCells+1)) + scale_array(:,:,nCells+1) = 0.0_RKIND + allocate(flux_array(nVertLevels,nEdges+1)) + flux_array(:,nEdges+1) = 0.0_RKIND + allocate(wdtn_arr(nVertLevels+1,nCells+1)) + wdtn_arr(:,nCells+1) = 0.0_RKIND + allocate(rho_zz_int(nVertLevels,nCells+1)) + rho_zz_int(:,nCells+1) = 0.0_RKIND + allocate(scalar_tend_array(num_scalars,nVertLevels,nCells+1)) + scalar_tend_array(:,:,nCells+1) = 0.0_RKIND + if (rk_step < 3 .or. (.not. config_monotonic .and. .not. config_positive_definite)) then + allocate(horiz_flux_array(num_scalars,nVertLevels,nEdges+1)) + horiz_flux_array(:,:,nEdges+1) = 0.0_RKIND + else + allocate(flux_upwind_tmp_arr(nVertLevels,nEdges+1)) + flux_upwind_tmp_arr(:,nEdges+1) = 0.0_RKIND + allocate(flux_tmp_arr(nVertLevels,nEdges+1)) + flux_tmp_arr(:,nEdges+1) = 0.0_RKIND + end if + ! ! Note: The advance_scalars_mono routine can be used without limiting, and thus, encompasses ! the functionality of the advance_scalars routine; however, it is noticeably slower, ! so we use the advance_scalars routine for the first two RK substeps. ! + + ! The latest version of atm_advance_scalars does not need the arrays scalar_tend_array or rho_zz_int + ! We can remove scalar_tend_array???? WCS 20160921 +!$OMP PARALLEL DO + do thread=1,nThreads + if (rk_step < 3 .or. (.not. config_monotonic .and. .not. config_positive_definite)) then + call atm_advance_scalars( tend, state, diag, mesh, block % configs, num_scalars, nCells, nVertLevels, rk_timestep(rk_step), & + cellThreadStart(thread), cellThreadEnd(thread), & + vertexThreadStart(thread), vertexThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread), & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread), & + vertexSolveThreadStart(thread), vertexSolveThreadEnd(thread), & + edgeSolveThreadStart(thread), edgeSolveThreadEnd(thread), & + horiz_flux_array, rk_step, config_time_integration_order, & + advance_density=.true., scalar_tend=scalar_tend_array, rho_zz_int=rho_zz_int ) + else + + block % domain = domain + call atm_advance_scalars_mono( block, tend, state, diag, mesh, block % configs, nCells, nEdges, nVertLevels, rk_timestep(rk_step), & + cellThreadStart(thread), cellThreadEnd(thread), & + vertexThreadStart(thread), vertexThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread), & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread), & + vertexSolveThreadStart(thread), vertexSolveThreadEnd(thread), & + edgeSolveThreadStart(thread), edgeSolveThreadEnd(thread), & + scalar_old_arr, scalar_new_arr, s_max_arr, s_min_arr, wdtn_arr, & + scale_array, flux_array, flux_upwind_tmp_arr, flux_tmp_arr, & + advance_density=.true., rho_zz_int=rho_zz_int) + end if + end do +!$OMP END PARALLEL DO + + deallocate(scalar_old_arr) + deallocate(scalar_new_arr) + deallocate(s_max_arr) + deallocate(s_min_arr) + deallocate(scale_array) + deallocate(flux_array) + deallocate(wdtn_arr) + deallocate(rho_zz_int) + deallocate(scalar_tend_array) if (rk_step < 3 .or. (.not. config_monotonic .and. .not. config_positive_definite)) then - call atm_advance_scalars( tend, state, diag, mesh, block % configs, num_scalars, nCells, nVertLevels, rk_timestep(rk_step), advance_density=.true.) + deallocate(horiz_flux_array) else - block % domain = domain - call atm_advance_scalars_mono( block, tend, state, diag, mesh, block % configs, nCells, nEdges, nVertLevels, rk_timestep(rk_step), advance_density=.true.) + deallocate(flux_upwind_tmp_arr) + deallocate(flux_tmp_arr) end if + block => block % next end do + if (rk_step < 3 .or. (.not. config_monotonic .and. .not. config_positive_definite)) then + call mpas_timer_stop('atm_advance_scalars') + else + call mpas_timer_stop('atm_advance_scalars_mono') + end if if (rk_step < 3) then call mpas_pool_get_field(state, 'scalars', scalars_field, 2) @@ -535,7 +1202,9 @@ subroutine atm_srk3(domain, dt, itimestep) end if -!... compute full velocity vectors at cell centers: + ! + ! reconstruct full velocity vectors at cell centers: + ! block => domain % blocklist do while (associated(block)) call mpas_pool_get_subpool(block % structs, 'state', state) @@ -560,8 +1229,10 @@ subroutine atm_srk3(domain, dt, itimestep) block => block % next end do -!... call to parameterizations of cloud microphysics. calculation of the tendency of water vapor to horizontal and -!... vertical advection needed for the Tiedtke parameterization of convection. + ! + ! call to parameterizations of cloud microphysics. calculation of the tendency of water vapor to horizontal and + ! vertical advection needed for the Tiedtke parameterization of convection. + ! #ifdef DO_PHYSICS block => domain % blocklist @@ -576,15 +1247,26 @@ subroutine atm_srk3(domain, dt, itimestep) call mpas_pool_get_array(state, 'scalars', scalars_1, 1) call mpas_pool_get_array(state, 'scalars', scalars_2, 2) call mpas_pool_get_dimension(state, 'index_qv', index_qv) - call mpas_pool_get_array(tend_physics, 'rqvdynten', rqvdynten) - !NOTE: The calculation of the tendency due to horizontal and vertical advection for the water vapor mixing ratio - !requires that the subroutine atm_advance_scalars_mono was called on the third Runge Kutta step, so that a halo - !update for the scalars at time_levs(1) is applied. A halo update for the scalars at time_levs(2) is done above. - if (config_monotonic) then - rqvdynten(:,:) = ( scalars_2(index_qv,:,:) - scalars_1(index_qv,:,:) ) / config_dt - else - rqvdynten(:,:) = 0._RKIND + call mpas_pool_get_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadStart', cellSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadEnd', cellSolveThreadEnd) + + if(config_convection_scheme == 'cu_grell_freitas' .or. & + config_convection_scheme == 'cu_tiedtke' .or. & + config_convection_scheme == 'cu_ntiedtke') then + + call mpas_pool_get_array(tend_physics, 'rqvdynten', rqvdynten) + + !NOTE: The calculation of the tendency due to horizontal and vertical advection for the water vapor mixing ratio + !requires that the subroutine atm_advance_scalars_mono was called on the third Runge Kutta step, so that a halo + !update for the scalars at time_levs(1) is applied. A halo update for the scalars at time_levs(2) is done above. + if (config_monotonic) then + rqvdynten(:,:) = ( scalars_2(index_qv,:,:) - scalars_1(index_qv,:,:) ) / config_dt + else + rqvdynten(:,:) = 0._RKIND + end if end if !simply set to zero negative mixing ratios of different water species (for now): @@ -592,94 +1274,43 @@ subroutine atm_srk3(domain, dt, itimestep) scalars_2(:,:,:) = 0.0 !call microphysics schemes: - if (config_microp_scheme .ne. 'off') & - call microphysics_driver ( block % configs, mesh, state, 2, diag, diag_physics, tend, itimestep ) - + if (trim(config_microp_scheme) /= 'off') then + call mpas_timer_start('microphysics') +!$OMP PARALLEL DO + do thread=1,nThreads + call driver_microphysics ( block % configs, mesh, state, 2, diag, diag_physics, tend, itimestep, & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO + call mpas_timer_stop('microphysics') + end if block => block % next end do -#endif - - if (config_print_global_minmax_vel) then - write(0,*) - - block => domain % blocklist - do while (associated(block)) - call mpas_pool_get_subpool(block % structs, 'state', state) - - call mpas_pool_get_array(state, 'w', w, 2) - call mpas_pool_get_array(state, 'u', u, 2) - call mpas_pool_get_dimension(state, 'nCellsSolve', nCellsSolve) - call mpas_pool_get_dimension(state, 'nEdgesSolve', nEdgesSolve) - call mpas_pool_get_dimension(state, 'nVertLevels', nVertLevels) - - scalar_min = 0.0 - scalar_max = 0.0 - do iCell = 1, nCellsSolve - do k = 1, nVertLevels - scalar_min = min(scalar_min, w(k,iCell)) - scalar_max = max(scalar_max, w(k,iCell)) - end do - end do - call mpas_dmpar_min_real(domain % dminfo, scalar_min, global_scalar_min) - call mpas_dmpar_max_real(domain % dminfo, scalar_max, global_scalar_max) - write(0,*) 'global min, max w ', global_scalar_min, global_scalar_max - - scalar_min = 0.0 - scalar_max = 0.0 - do iEdge = 1, nEdgesSolve - do k = 1, nVertLevels - scalar_min = min(scalar_min, u(k,iEdge)) - scalar_max = max(scalar_max, u(k,iEdge)) - end do - end do - call mpas_dmpar_min_real(domain % dminfo, scalar_min, global_scalar_min) - call mpas_dmpar_max_real(domain % dminfo, scalar_max, global_scalar_max) - write(0,*) 'global min, max u ', global_scalar_min, global_scalar_max - - block => block % next - end do - end if - - if (config_print_global_minmax_sca) then - if (.not. config_print_global_minmax_vel) write(0,*) - - block => domain % blocklist - do while (associated(block)) - call mpas_pool_get_subpool(block % structs, 'state', state) - call mpas_pool_get_array(state, 'scalars', scalars, 2) - call mpas_pool_get_dimension(state, 'nCellsSolve', nCellsSolve) - call mpas_pool_get_dimension(state, 'nVertLevels', nVertLevels) - call mpas_pool_get_dimension(state, 'num_scalars', num_scalars) - - do iScalar = 1, num_scalars - scalar_min = 0.0 - scalar_max = 0.0 - do iCell = 1, nCellsSolve - do k = 1, nVertLevels - scalar_min = min(scalar_min, scalars(iScalar,k,iCell)) - scalar_max = max(scalar_max, scalars(iScalar,k,iCell)) - end do - end do - call mpas_dmpar_min_real(domain % dminfo, scalar_min, global_scalar_min) - call mpas_dmpar_max_real(domain % dminfo, scalar_max, global_scalar_max) - write(0,'(a,i4,2(1x,e17.10))') ' global min, max scalar ', iScalar, global_scalar_min, global_scalar_max - end do + ! + ! Note: A halo exchange for 'exner' here as well as at the end of + ! the first (n-1) dynamics subcycles can substitute for the exchange at + ! the beginning of each dynamics subcycle. Placing halo exchanges here + ! and at the end of dynamics subcycles may in future allow for aggregation + ! of the 'exner' exchange with other exchanges. + ! +#endif - block => block % next - end do - end if + call summarize_timestep(domain) end subroutine atm_srk3 -!--- - subroutine atm_rk_integration_setup( state, diag ) + subroutine atm_rk_integration_setup( state, diag, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd) implicit none type (mpas_pool_type), intent(inout) :: state type (mpas_pool_type), intent(inout) :: diag + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd real (kind=RKIND), dimension(:,:), pointer :: ru real (kind=RKIND), dimension(:,:), pointer :: ru_save @@ -718,23 +1349,24 @@ subroutine atm_rk_integration_setup( state, diag ) call mpas_pool_get_array(state, 'scalars', scalars_1, 1) call mpas_pool_get_array(state, 'scalars', scalars_2, 2) - ru_save(:,:) = ru(:,:) - rw_save(:,:) = rw(:,:) - rtheta_p_save(:,:) = rtheta_p(:,:) - rho_p_save(:,:) = rho_p(:,:) + ru_save(:,edgeStart:edgeEnd) = ru(:,edgeStart:edgeEnd) + rw_save(:,cellStart:cellEnd) = rw(:,cellStart:cellEnd) + rtheta_p_save(:,cellStart:cellEnd) = rtheta_p(:,cellStart:cellEnd) + rho_p_save(:,cellStart:cellEnd) = rho_p(:,cellStart:cellEnd) - u_2(:,:) = u_1(:,:) - w_2(:,:) = w_1(:,:) - theta_m_2(:,:) = theta_m_1(:,:) - rho_zz_2(:,:) = rho_zz_1(:,:) - rho_zz_old_split(:,:) = rho_zz_1(:,:) - scalars_2(:,:,:) = scalars_1(:,:,:) + u_2(:,edgeStart:edgeEnd) = u_1(:,edgeStart:edgeEnd) + w_2(:,cellStart:cellEnd) = w_1(:,cellStart:cellEnd) + theta_m_2(:,cellStart:cellEnd) = theta_m_1(:,cellStart:cellEnd) + rho_zz_2(:,cellStart:cellEnd) = rho_zz_1(:,cellStart:cellEnd) + rho_zz_old_split(:,cellStart:cellEnd) = rho_zz_1(:,cellStart:cellEnd) + scalars_2(:,:,cellStart:cellEnd) = scalars_1(:,:,cellStart:cellEnd) end subroutine atm_rk_integration_setup -!----- - subroutine atm_compute_moist_coefficients( dims, state, diag, mesh ) + subroutine atm_compute_moist_coefficients( dims, state, diag, mesh, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd) ! the moist coefficients cqu and cqw serve to transform the inverse dry density (1/rho_d) ! into the inverse full (moist) density (1/rho_m). @@ -745,11 +1377,12 @@ subroutine atm_compute_moist_coefficients( dims, state, diag, mesh ) type (mpas_pool_type), intent(inout) :: state type (mpas_pool_type), intent(inout) :: diag type (mpas_pool_type), intent(inout) :: mesh - + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd integer :: iEdge, iCell, k, cell1, cell2, iq integer, pointer :: nCells, nEdges, nVertLevels, nCellsSolve - real (kind=RKIND) :: qtot + real (kind=RKIND) :: qtotal integer, dimension(:,:), pointer :: cellsOnEdge integer, pointer :: moist_start, moist_end real (kind=RKIND), dimension(:,:,:), pointer :: scalars @@ -768,36 +1401,46 @@ subroutine atm_compute_moist_coefficients( dims, state, diag, mesh ) call mpas_pool_get_array(diag, 'cqw', cqw) call mpas_pool_get_array(diag, 'cqu', cqu) - - do iCell = 1, nCellsSolve - do k = 2, nVertLevels - qtot = 0. +! do iCell = cellSolveStart,cellSolveEnd + do iCell = cellStart,cellEnd + qtot(1:nVertLevels,iCell) = 0.0 + do k = 1,nVertLevels do iq = moist_start, moist_end - qtot = qtot + 0.5 * (scalars(iq, k, iCell) + scalars(iq, k-1, iCell)) + qtot(k,iCell) = qtot(k,iCell) + scalars(iq, k, iCell) end do - cqw(k,iCell) = 1./(1.+qtot) + end do + end do + +! do iCell = cellSolveStart,cellSolveEnd + do iCell = cellStart,cellEnd + do k = 2, nVertLevels + qtotal = 0.5*(qtot(k,iCell)+qtot(k-1,iCell)) + cqw(k,iCell) = 1.0 / (1.0 + qtotal) end do end do - do iEdge = 1, nEdges +! would need to compute qtot for all cells and an openmp barrier to use qtot below. + + do iEdge = edgeStart,edgeEnd cell1 = cellsOnEdge(1,iEdge) cell2 = cellsOnEdge(2,iEdge) if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then do k = 1, nVertLevels - qtot = 0. + qtotal = 0.0 do iq = moist_start, moist_end - qtot = qtot + 0.5 * ( scalars(iq, k, cell1) + scalars(iq, k, cell2) ) + qtotal = qtotal + 0.5 * ( scalars(iq, k, cell1) + scalars(iq, k, cell2) ) end do - cqu(k,iEdge) = 1./( 1. + qtot) + cqu(k,iEdge) = 1.0 / (1.0 + qtotal) end do end if end do end subroutine atm_compute_moist_coefficients -!--- - subroutine atm_compute_vert_imp_coefs(state, mesh, diag, configs, nVertLevels, dts) + subroutine atm_compute_vert_imp_coefs(state, mesh, diag, configs, nVertLevels, dts, & + cellStart, cellEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, edgeSolveStart, edgeSolveEnd) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! Compute coefficients for vertically implicit gravity-wave/acoustic computations ! @@ -815,26 +1458,19 @@ subroutine atm_compute_vert_imp_coefs(state, mesh, diag, configs, nVertLevels, d type (mpas_pool_type), intent(in) :: configs integer, intent(in) :: nVertLevels ! for allocating stack variables real (kind=RKIND), intent(in) :: dts + integer, intent(in) :: cellStart, cellEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, edgeSolveStart, edgeSolveEnd - integer :: iCell, k, iq - - integer, pointer :: nCells, nCellsSolve real (kind=RKIND), dimension(:,:), pointer :: zz, cqw, p, t, rb, rtb, pb, rt real (kind=RKIND), dimension(:,:), pointer :: cofwr, cofwz, coftz, cofwt, a_tri, alpha_tri, gamma_tri real (kind=RKIND), dimension(:), pointer :: cofrz, rdzw, fzm, fzp, rdzu real (kind=RKIND), dimension(:,:,:), pointer :: scalars - real (kind=RKIND), dimension( nVertLevels ) :: b_tri,c_tri real (kind=RKIND), pointer :: epssm - real (kind=RKIND) :: dtseps, c2, qtot, rcv - integer, pointer :: moist_start, moist_end - -! set coefficients + integer, pointer :: nCells, moist_start, moist_end - call mpas_pool_get_dimension(mesh, 'nCells', nCells) - call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) call mpas_pool_get_config(configs, 'config_epssm', epssm) @@ -862,39 +1498,110 @@ subroutine atm_compute_vert_imp_coefs(state, mesh, diag, configs, nVertLevels, d call mpas_pool_get_array(state, 'theta_m', t, 2) call mpas_pool_get_array(state, 'scalars', scalars, 2) + + call mpas_pool_get_dimension(state, 'nCells', nCells) call mpas_pool_get_dimension(state, 'moist_start', moist_start) call mpas_pool_get_dimension(state, 'moist_end', moist_end) + call atm_compute_vert_imp_coefs_work(nCells, moist_start, moist_end, dts, epssm, & + zz, cqw, p, t, rb, rtb, pb, rt, cofwr, cofwz, coftz, cofwt, & + a_tri, alpha_tri, gamma_tri, cofrz, rdzw, fzm, fzp, rdzu, scalars, & + cellStart, cellEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, edgeSolveStart, edgeSolveEnd) + + + end subroutine atm_compute_vert_imp_coefs + + + subroutine atm_compute_vert_imp_coefs_work(nCells, moist_start, moist_end, dts, epssm, & + zz, cqw, p, t, rb, rtb, pb, rt, cofwr, cofwz, coftz, cofwt, & + a_tri, alpha_tri, gamma_tri, cofrz, rdzw, fzm, fzp, rdzu, scalars, & + cellStart, cellEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, edgeSolveStart, edgeSolveEnd) + + use mpas_atm_dimensions + + implicit none + + + ! + ! Dummy arguments + ! + integer, intent(in) :: nCells, moist_start, moist_end + real (kind=RKIND), intent(in) :: dts + real (kind=RKIND), intent(in) :: epssm + + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: zz + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: cqw + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: p + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: t + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rb + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rtb + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: pb + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rt + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: cofwr + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: cofwz + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: coftz + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: cofwt + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: a_tri + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: alpha_tri + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: gamma_tri + real (kind=RKIND), dimension(nVertLevels) :: cofrz + real (kind=RKIND), dimension(nVertLevels) :: rdzw + real (kind=RKIND), dimension(nVertLevels) :: fzm + real (kind=RKIND), dimension(nVertLevels) :: fzp + real (kind=RKIND), dimension(nVertLevels) :: rdzu + real (kind=RKIND), dimension(num_scalars,nVertLevels,nCells+1) :: scalars + + integer, intent(in) :: cellStart, cellEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, edgeSolveStart, edgeSolveEnd + + + ! + ! Local variables + ! + integer :: iCell, k, iq + real (kind=RKIND) :: dtseps, c2, qtotal, rcv + real (kind=RKIND), dimension( nVertLevels ) :: b_tri, c_tri + + + ! set coefficients dtseps = .5*dts*(1.+epssm) rcv = rgas/(cp-rgas) c2 = cp*rcv +! MGD bad to have all threads setting this variable? do k=1,nVertLevels cofrz(k) = dtseps*rdzw(k) end do - do iCell = 1, nCellsSolve ! we only need to do cells we are solving for, not halo cells + do iCell = cellSolveStart,cellSolveEnd ! we only need to do cells we are solving for, not halo cells +!DIR$ IVDEP do k=2,nVertLevels cofwr(k,iCell) =.5*dtseps*gravity*(fzm(k)*zz(k,iCell)+fzp(k)*zz(k-1,iCell)) end do coftz(1,iCell) = 0.0 +!DIR$ IVDEP do k=2,nVertLevels cofwz(k,iCell) = dtseps*c2*(fzm(k)*zz(k,iCell)+fzp(k)*zz(k-1,iCell)) & *rdzu(k)*cqw(k,iCell)*(fzm(k)*p (k,iCell)+fzp(k)*p (k-1,iCell)) coftz(k,iCell) = dtseps* (fzm(k)*t (k,iCell)+fzp(k)*t (k-1,iCell)) end do coftz(nVertLevels+1,iCell) = 0.0 +!DIR$ IVDEP do k=1,nVertLevels - qtot = 0. - do iq = moist_start, moist_end - qtot = qtot + scalars(iq, k, iCell) - end do +! qtotal = 0. +! do iq = moist_start, moist_end +! qtotal = qtotal + scalars(iq, k, iCell) +! end do + qtotal = qtot(k,iCell) - cofwt(k,iCell) = .5*dtseps*rcv*zz(k,iCell)*gravity*rb(k,iCell)/(1.+qtot) & + cofwt(k,iCell) = .5*dtseps*rcv*zz(k,iCell)*gravity*rb(k,iCell)/(1.+qtotal) & *p(k,iCell)/((rtb(k,iCell)+rt(k,iCell))*pb(k,iCell)) +! cofwt(k,iCell) = 0. end do a_tri(1,iCell) = 0. ! note, this value is never used @@ -903,6 +1610,7 @@ subroutine atm_compute_vert_imp_coefs(state, mesh, diag, configs, nVertLevels, d gamma_tri(1,iCell) = 0. alpha_tri(1,iCell) = 0. ! note, this value is never used +!DIR$ IVDEP do k=2,nVertLevels a_tri(k,iCell) = -cofwz(k ,iCell)* coftz(k-1,iCell)*rdzw(k-1)*zz(k-1,iCell) & +cofwr(k ,iCell)* cofrz(k-1 ) & @@ -917,6 +1625,7 @@ subroutine atm_compute_vert_imp_coefs(state, mesh, diag, configs, nVertLevels, d -cofwr(k ,iCell)* cofrz(k ) & +cofwt(k ,iCell)* coftz(k+1,iCell)*rdzw(k ) end do +!MGD VECTOR DEPENDENCE do k=2,nVertLevels alpha_tri(k,iCell) = 1./(b_tri(k)-a_tri(k,iCell)*gamma_tri(k-1,iCell)) gamma_tri(k,iCell) = c_tri(k)*alpha_tri(k,iCell) @@ -924,11 +1633,12 @@ subroutine atm_compute_vert_imp_coefs(state, mesh, diag, configs, nVertLevels, d end do ! loop over cells - end subroutine atm_compute_vert_imp_coefs + end subroutine atm_compute_vert_imp_coefs_work -!------------------------ - subroutine atm_set_smlstep_pert_variables( tend, diag, mesh, configs ) + subroutine atm_set_smlstep_pert_variables( tend, diag, mesh, configs, & + cellStart, cellEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, edgeSolveStart, edgeSolveEnd) ! following Klemp et al MWR 2007, we use preturbation variables ! in the acoustic-step integration. This routine computes those @@ -942,40 +1652,39 @@ subroutine atm_set_smlstep_pert_variables( tend, diag, mesh, configs ) type (mpas_pool_type), intent(inout) :: diag type (mpas_pool_type), intent(inout) :: mesh type (mpas_pool_type), intent(in) :: configs + integer, intent(in) :: cellStart, cellEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, edgeSolveStart, edgeSolveEnd - integer :: iCell, iEdge, k, cell1, cell2 - real (kind=RKIND), pointer :: coef_3rd_order - integer, pointer :: config_theta_adv_order - integer, pointer :: nCellsSolve, nCells, nVertLevels, nEdges - integer, dimension(:,:), pointer :: cellsOnEdge - real (kind=RKIND), dimension(:), pointer :: fzm, fzp, dvEdge, areaCell - real (kind=RKIND) :: flux + integer, pointer :: nCells, nEdges, nCellsSolve + integer, dimension(:), pointer :: nEdgesOnCell + integer, dimension(:,:), pointer :: cellsOnEdge, edgesOnCell + real (kind=RKIND), dimension(:), pointer :: fzm, fzp real (kind=RKIND), dimension(:,:), pointer :: ruAvg, wwAvg - real (kind=RKIND), dimension(:,:,:), pointer :: zb, zb3 + real (kind=RKIND), dimension(:,:,:), pointer :: zb, zb3, zb_cell, zb3_cell real (kind=RKIND), dimension(:,:), pointer :: zz real (kind=RKIND), dimension(:,:), pointer :: w_tend, u_tend real (kind=RKIND), dimension(:,:), pointer :: rho_pp, rho_p_save, rho_p real (kind=RKIND), dimension(:,:), pointer :: ru_p, ru, ru_save real (kind=RKIND), dimension(:,:), pointer :: rtheta_pp, rtheta_p_save, rtheta_p, rtheta_pp_old real (kind=RKIND), dimension(:,:), pointer :: rw_p, rw_save, rw + real (kind=RKIND), dimension(:,:), pointer :: edgesOnCell_sign - call mpas_pool_get_config(configs, 'config_coef_3rd_order', coef_3rd_order) - call mpas_pool_get_config(configs, 'config_theta_adv_order', config_theta_adv_order) - call mpas_pool_get_dimension(mesh, 'nCells', nCells) call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) call mpas_pool_get_dimension(mesh, 'nEdges', nEdges) - call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) call mpas_pool_get_array(mesh, 'zz', zz) call mpas_pool_get_array(mesh, 'zb', zb) call mpas_pool_get_array(mesh, 'zb3', zb3) + call mpas_pool_get_array(mesh, 'zb_cell', zb_cell) + call mpas_pool_get_array(mesh, 'zb3_cell', zb3_cell) call mpas_pool_get_array(mesh, 'fzm', fzm) call mpas_pool_get_array(mesh, 'fzp', fzp) - call mpas_pool_get_array(mesh, 'dvEdge', dvEdge) - call mpas_pool_get_array(mesh, 'areaCell', areaCell) call mpas_pool_get_array(mesh, 'cellsOnEdge', cellsOnEdge) + call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) + call mpas_pool_get_array(mesh, 'edgesOnCell', edgesOnCell) + call mpas_pool_get_array(mesh, 'edgesOnCell_sign', edgesOnCell_sign) call mpas_pool_get_array(tend, 'w', w_tend) call mpas_pool_get_array(tend, 'u', u_tend) @@ -1000,56 +1709,101 @@ subroutine atm_set_smlstep_pert_variables( tend, diag, mesh, configs ) call mpas_pool_get_array(diag, 'rw_save', rw_save) call mpas_pool_get_array(diag, 'rw', rw) - if (config_theta_adv_order /= 3) coef_3rd_order = 0.0 + call atm_set_smlstep_pert_variables_work(nCells, nEdges, nCellsSolve, & + nEdgesOnCell, cellsOnEdge, edgesOnCell, fzm, fzp, ruAvg, wwAvg, zb, zb3, zb_cell, zb3_cell, & + zz, w_tend, u_tend, rho_pp, rho_p_save, rho_p, ru_p, ru, ru_save, & + rtheta_pp, rtheta_p_save, rtheta_p, rtheta_pp_old, rw_p, rw_save, rw, edgesOnCell_sign, & + cellStart, cellEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, edgeSolveStart, edgeSolveEnd) + + + end subroutine atm_set_smlstep_pert_variables + + + subroutine atm_set_smlstep_pert_variables_work(nCells, nEdges, nCellsSolve, & + nEdgesOnCell, cellsOnEdge, edgesOnCell, fzm, fzp, ruAvg, wwAvg, zb, zb3, zb_cell, zb3_cell, & + zz, w_tend, u_tend, rho_pp, rho_p_save, rho_p, ru_p, ru, ru_save, & + rtheta_pp, rtheta_p_save, rtheta_p, rtheta_pp_old, rw_p, rw_save, rw, edgesOnCell_sign, & + cellStart, cellEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, edgeSolveStart, edgeSolveEnd) - ! set the acoustic step perturbation variables by subtracting the RK timestep variables - ! from their at the previous RK substep. + use mpas_atm_dimensions - rho_pp = rho_p_save - rho_p - ru_p = ru_save - ru - rtheta_pp = rtheta_p_save - rtheta_p - rtheta_pp_old = rtheta_pp - rw_p = rw_save - rw + implicit none + + + ! + ! Dummy arguments + ! + integer, intent(in) :: nCells, nEdges, nCellsSolve + + integer, intent(in) :: cellStart, cellEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, edgeSolveStart, edgeSolveEnd + + integer, dimension(nCells+1) :: nEdgesOnCell + integer, dimension(2,nEdges+1) :: cellsOnEdge + integer, dimension(maxEdges,nCells+1) :: edgesOnCell + real (kind=RKIND), dimension(nVertLevels) :: fzm + real (kind=RKIND), dimension(nVertLevels) :: fzp + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: ruAvg + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: wwAvg + real (kind=RKIND), dimension(nVertLevels+1,2,nEdges+1) :: zb + real (kind=RKIND), dimension(nVertLevels+1,2,nEdges+1) :: zb3 + real (kind=RKIND), dimension(nVertLevels+1,maxEdges,nCells+1) :: zb_cell + real (kind=RKIND), dimension(nVertLevels+1,maxEdges,nCells+1) :: zb3_cell + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: zz + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: w_tend + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: u_tend + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rho_pp + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rho_p_save + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rho_p + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: ru_p + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: ru + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: ru_save + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rtheta_pp + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rtheta_p_save + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rtheta_p + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rtheta_pp_old + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: rw_p + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: rw_save + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: rw + real (kind=RKIND), dimension(maxEdges,nCells+1) :: edgesOnCell_sign + + ! + ! Local variables + ! + integer :: iCell, iEdge, i, k + real (kind=RKIND) :: flux ! we solve for omega instead of w (see Klemp et al MWR 2007), ! so here we change the w_p tendency to an omega_p tendency - do iCell = 1, nCellsSolve - do k = 2, nVertLevels - w_tend(k,iCell) = ( fzm(k) * zz(k,iCell) + fzp(k) * zz(k-1,iCell) ) * w_tend(k,iCell) - end do - end do - ! here we need to compute the omega tendency in a manner consistent with our diagnosis of omega. ! this requires us to use the same flux divergence as is used in the theta eqn - see Klemp et al MWR 2003. - do iEdge = 1, nEdges - - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - +!! do iCell=cellStart,cellEnd + do iCell=cellSolveStart,cellSolveEnd + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) +!DIR$ IVDEP + do k = 2, nVertLevels + flux = edgesOnCell_sign(i,iCell) * (fzm(k) * u_tend(k,iEdge) + fzp(k) * u_tend(k-1,iEdge)) + w_tend(k,iCell) = w_tend(k,iCell) & + - (zb_cell(k,i,iCell) + sign(1.0_RKIND, u_tend(k,iEdge)) * zb3_cell(k,i,iCell)) * flux + end do + end do +!DIR$ IVDEP do k = 2, nVertLevels - flux = fzm(k) * u_tend(k,iEdge) + fzp(k) * u_tend(k-1,iEdge) - w_tend(k,cell2) = w_tend(k,cell2) & - + (zb(k,2,iEdge) + coef_3rd_order * sign(1.0_RKIND, u_tend(k,iEdge)) * zb3(k,2,iEdge)) * flux & - * (fzm(k) * zz(k,cell2) + fzp(k) * zz(k-1,cell2)) - w_tend(k,cell1) = w_tend(k,cell1) & - - (zb(k,1,iEdge) + coef_3rd_order * sign(1.0_RKIND, u_tend(k,iEdge)) * zb3(k,1,iEdge)) * flux & - * (fzm(k) * zz(k,cell1) + fzp(k) * zz(k-1,cell1)) + w_tend(k,iCell) = ( fzm(k) * zz(k,iCell) + fzp(k) * zz(k-1,iCell) ) * w_tend(k,iCell) end do - end do - ! ruAvg and wwAvg will store the mass fluxes averaged over the acoustic steps for the subsequent scalar transport. - - ruAvg(:,:) = 0.0 - wwAvg(:,:) = 0.0 - - end subroutine atm_set_smlstep_pert_variables + end subroutine atm_set_smlstep_pert_variables_work -!------------------------------- - subroutine atm_advance_acoustic_step( state, diag, tend, mesh, configs, nCells, nVertLevels, dts ) + subroutine atm_advance_acoustic_step( state, diag, tend, mesh, configs, nCells, nVertLevels, dts, small_step, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd) ! This subroutine performs the entire acoustic step update, following Klemp et al MWR 2007, ! using forward-backward vertically implicit integration. @@ -1064,51 +1818,57 @@ subroutine atm_advance_acoustic_step( state, diag, tend, mesh, configs, nCells, type (mpas_pool_type), intent(inout) :: tend type (mpas_pool_type), intent(inout) :: mesh type (mpas_pool_type), intent(in) :: configs + integer, intent(in) :: small_step ! acoustic step number integer, intent(in) :: nCells ! for allocating stack variables integer, intent(in) :: nVertLevels ! for allocating stack variables real (kind=RKIND), intent(in) :: dts + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd + real (kind=RKIND), dimension(nVertLevels) :: du + real (kind=RKIND), dimension(nVertLevels+1) :: dpzx + real (kind=RKIND), dimension(:,:), pointer :: rho_zz, theta_m, ru_p, rw_p, rtheta_pp, & - rtheta_pp_old, zz, exner, cqu, ruAvg, & - wwAvg, rho_pp, cofwt, coftz, zx, & + rtheta_pp_old, zz, exner, cqu, ruAvg, & + wwAvg, rho_pp, cofwt, coftz, zxu, & a_tri, alpha_tri, gamma_tri, dss, & tend_ru, tend_rho, tend_rt, tend_rw, & - zgrid, cofwr, cofwz, w, h_divergence - real (kind=RKIND), dimension(:), pointer :: fzm, fzp, rdzw, dcEdge, AreaCell, cofrz, dvEdge + zgrid, cofwr, cofwz, w, divergence_3d - real (kind=RKIND), dimension(:,:), pointer :: cpr, cpl, pzp, pzm - integer, dimension(:,:), pointer :: cellsOnEdge +! redefine ru_p to be perturbation from time t, change 3a ! temporary + real (kind=RKIND), dimension(:,:), pointer :: ru + real (kind=RKIND), dimension(:,:), pointer :: ru_save +! redefine rw_p to be perturbation from time t, change 3a ! temporary + real (kind=RKIND), dimension(:,:), pointer :: rw + real (kind=RKIND), dimension(:,:), pointer :: rw_save - real (kind=RKIND) :: c2, rcv - real (kind=RKIND), dimension( nVertLevels ) :: du - real (kind=RKIND), dimension( nVertLevels + 1 ) :: dpzx - real (kind=RKIND), dimension( nVertLevels, nCells+1 ) :: ts, rs + real (kind=RKIND), dimension(:), pointer :: fzm, fzp, rdzw, dcEdge, invDcEdge, invAreaCell, cofrz, dvEdge + + integer, dimension(:), pointer :: nEdgesOnCell + integer, dimension(:,:), pointer :: cellsOnEdge, edgesOnCell + real (kind=RKIND), dimension(:,:), pointer :: edgesOnCell_sign - integer :: cell1, cell2, iEdge, iCell, k - real (kind=RKIND) :: pgrad, flux, resm - real (kind=RKIND), pointer :: epssm, smdiv + real (kind=RKIND), pointer :: epssm, smdiv, smdiv_p_forward real (kind=RKIND), pointer :: cf1, cf2, cf3 - real (kind=RKIND) :: pr, pl - integer :: kr, kl integer, pointer :: nEdges, nCellsSolve - logical, parameter :: debug = .false. - logical, parameter :: debug1 = .false. - logical, pointer :: newpx - -!-- call mpas_pool_get_array(mesh, 'cellsOnEdge', cellsOnEdge) + call mpas_pool_get_array(mesh, 'edgesOnCell', edgesOnCell) + call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) + call mpas_pool_get_array(mesh, 'edgesOnCell_sign', edgesOnCell_sign) call mpas_pool_get_array(state, 'rho_zz', rho_zz, 2) - call mpas_pool_get_array(state, 'theta_m', theta_m, 2) +! call mpas_pool_get_array(state, 'theta_m', theta_m, 2) + call mpas_pool_get_array(state, 'theta_m', theta_m, 1) +! change needed for rw_p, change 6 (see rayleigh damping) call mpas_pool_get_array(state, 'w', w, 2) +! call mpas_pool_get_array(state, 'w', w, 1) call mpas_pool_get_array(diag, 'rtheta_pp', rtheta_pp) call mpas_pool_get_array(diag, 'rtheta_pp_old', rtheta_pp_old) - call mpas_pool_get_array(diag, 'h_divergence', h_divergence) call mpas_pool_get_array(diag, 'ru_p', ru_p) call mpas_pool_get_array(diag, 'rw_p', rw_p) call mpas_pool_get_array(diag, 'exner', exner) @@ -1116,6 +1876,7 @@ subroutine atm_advance_acoustic_step( state, diag, tend, mesh, configs, nCells, call mpas_pool_get_array(diag, 'ruAvg', ruAvg) call mpas_pool_get_array(diag, 'wwAvg', wwAvg) call mpas_pool_get_array(diag, 'rho_pp', rho_pp) + call mpas_pool_get_array(diag, 'divergence_3d', divergence_3d) call mpas_pool_get_array(diag, 'cofwt', cofwt) call mpas_pool_get_array(diag, 'coftz', coftz) call mpas_pool_get_array(diag, 'cofrz', cofrz) @@ -1126,8 +1887,6 @@ subroutine atm_advance_acoustic_step( state, diag, tend, mesh, configs, nCells, call mpas_pool_get_array(diag, 'gamma_tri', gamma_tri) call mpas_pool_get_array(mesh, 'dss', dss) - call mpas_pool_get_array(mesh, 'pzp', pzp) - call mpas_pool_get_array(mesh, 'pzm', pzm) call mpas_pool_get_array(tend, 'u', tend_ru) call mpas_pool_get_array(tend, 'rho_zz', tend_rho) @@ -1135,14 +1894,15 @@ subroutine atm_advance_acoustic_step( state, diag, tend, mesh, configs, nCells, call mpas_pool_get_array(tend, 'w', tend_rw) call mpas_pool_get_array(mesh, 'zz', zz) - call mpas_pool_get_array(mesh, 'zx', zx) + call mpas_pool_get_array(mesh, 'zxu', zxu) call mpas_pool_get_array(mesh, 'zgrid', zgrid) call mpas_pool_get_array(mesh, 'fzm', fzm) call mpas_pool_get_array(mesh, 'fzp', fzp) call mpas_pool_get_array(mesh, 'rdzw', rdzw) call mpas_pool_get_array(mesh, 'dcEdge', dcEdge) + call mpas_pool_get_array(mesh, 'invDcEdge', invDcEdge) call mpas_pool_get_array(mesh, 'dvEdge', dvEdge) - call mpas_pool_get_array(mesh, 'areaCell', areaCell) + call mpas_pool_get_array(mesh, 'invAreaCell', invAreaCell) call mpas_pool_get_dimension(mesh, 'nEdges', nEdges) call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) @@ -1151,215 +1911,316 @@ subroutine atm_advance_acoustic_step( state, diag, tend, mesh, configs, nCells, call mpas_pool_get_array(mesh, 'cf2', cf2) call mpas_pool_get_array(mesh, 'cf3', cf3) - call mpas_pool_get_array(mesh, 'cpr', cpr) - call mpas_pool_get_array(mesh, 'cpl', cpl) - - call mpas_pool_get_config(configs, 'config_newpx', newpx) +! redefine ru_p to be perturbation from time t, change 3b ! temporary + call mpas_pool_get_array(diag, 'ru', ru) + call mpas_pool_get_array(diag, 'ru_save', ru_save) +! redefine rw_p to be perturbation from time t, change 3b ! temporary + call mpas_pool_get_array(diag, 'rw', rw) + call mpas_pool_get_array(diag, 'rw_save', rw_save) ! epssm is the offcentering coefficient for the vertically implicit integration. - ! smdiv is the 3D divergence-damping coefficient. + ! smdiv is the 3D divergence-damping coefficients. call mpas_pool_get_config(configs, 'config_epssm', epssm) call mpas_pool_get_config(configs, 'config_smdiv', smdiv) + call mpas_pool_get_config(configs, 'config_smdiv_p_forward', smdiv_p_forward) + call atm_advance_acoustic_step_work(nCells, nEdges, nCellsSolve, cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd, & + rho_zz, theta_m, ru_p, rw_p, rtheta_pp, rtheta_pp_old, zz, exner, cqu, ruAvg, wwAvg, & + rho_pp, cofwt, coftz, zxu, a_tri, alpha_tri, gamma_tri, dss, tend_ru, tend_rho, tend_rt, & + tend_rw, zgrid, cofwr, cofwz, w, ru, ru_save, rw, rw_save, divergence_3d, fzm, fzp, rdzw, dcEdge, invDcEdge, & + invAreaCell, cofrz, dvEdge, nEdgesOnCell, cellsOnEdge, edgesOnCell, edgesOnCell_sign, & + dts, small_step, epssm, smdiv, smdiv_p_forward, cf1, cf2, cf3 & + ) - rcv = rgas/(cp-rgas) - c2 = cp*rcv - resm = (1.-epssm)/(1.+epssm) + end subroutine atm_advance_acoustic_step - ts = 0. - rs = 0. - ! acoustic step divergence damping - forward weight rtheta_pp - see Klemp et al MWR 2007 - rtheta_pp_old = rtheta_pp + smdiv*(rtheta_pp - rtheta_pp_old) + subroutine atm_advance_acoustic_step_work(nCells, nEdges, nCellsSolve, cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd, & + rho_zz, theta_m, ru_p, rw_p, rtheta_pp, rtheta_pp_old, zz, exner, cqu, ruAvg, wwAvg, & + rho_pp, cofwt, coftz, zxu, a_tri, alpha_tri, gamma_tri, dss, tend_ru, tend_rho, tend_rt, & + tend_rw, zgrid, cofwr, cofwz, w, ru, ru_save, rw, rw_save, divergence_3d, fzm, fzp, rdzw, dcEdge, invDcEdge, & + invAreaCell, cofrz, dvEdge, nEdgesOnCell, cellsOnEdge, edgesOnCell, edgesOnCell_sign, & + dts, small_step, epssm, smdiv, smdiv_p_forward, cf1, cf2, cf3 & + ) - if (debug) write(0,*) ' updating ru_p ' + use mpas_atm_dimensions - ! forward-backward acoustic step integration. - ! begin by updating the horizontal velocity u, - ! and accumulating the contribution from the updated u to the other tendencies. + implicit none - ! we are looping over all edges, but only computing on edges of owned cells. This will include updates of - ! all owned edges plus some edges that are owned by other blocks. We perform these redundant computations - ! so that we do not have to communicate updates of u to update the cell variables (rho, w, and theta). - do iEdge = 1, nEdges - - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) + ! + ! Dummy arguments + ! + integer, intent(in) :: nCells, nEdges, nCellsSolve + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd + + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rho_zz + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: theta_m + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: ru_p + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: rw_p + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rtheta_pp + + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rtheta_pp_old + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: divergence_3d + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: zz + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: exner + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: cqu + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: ruAvg + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: wwAvg + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rho_pp + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: cofwt + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: coftz + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: zxu + + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: a_tri + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: alpha_tri + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: gamma_tri + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: dss + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: tend_ru + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: tend_rho + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: tend_rt + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: tend_rw + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: zgrid + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: cofwr + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: cofwz + + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: w + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: ru + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: ru_save + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: rw + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: rw_save + + real (kind=RKIND), dimension(nVertLevels) :: fzm + real (kind=RKIND), dimension(nVertLevels) :: fzp + real (kind=RKIND), dimension(nVertLevels) :: rdzw + real (kind=RKIND), dimension(nEdges+1) :: dcEdge + real (kind=RKIND), dimension(nEdges+1) :: invDcEdge + real (kind=RKIND), dimension(nCells+1) :: invAreaCell + real (kind=RKIND), dimension(nVertLevels) :: cofrz + real (kind=RKIND), dimension(nEdges+1) :: dvEdge + + integer, dimension(nCells+1) :: nEdgesOnCell + integer, dimension(2,nEdges+1) :: cellsOnEdge + integer, dimension(maxEdges,nCells+1) :: edgesOnCell + real (kind=RKIND), dimension(maxEdges,nCells+1) :: edgesOnCell_sign + + integer, intent(in) :: small_step + real (kind=RKIND), intent(in) :: dts, epssm, smdiv, smdiv_p_forward, cf1, cf2, cf3 + real (kind=RKIND), dimension(nVertLevels) :: ts, rs - ! update edges for block-owned cells - if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve ) then + + ! + ! Local variables + ! + integer :: cell1, cell2, iEdge, iCell, i, k + real (kind=RKIND) :: c2, rcv, rtheta_pp_tmp + real (kind=RKIND) :: pgrad, flux, resm, rdts - if (newpx) then - k = 1 - pr = cpr(k ,iEdge)*zz(k ,cell2)*rtheta_pp_old(k ,cell2) & - + cpr(k+1,iEdge)*zz(k+1,cell2)*rtheta_pp_old(k+1,cell2) & - + cpr(k+2,iEdge)*zz(k+2,cell2)*rtheta_pp_old(k+2,cell2) + rcv = rgas / (cp - rgas) + c2 = cp * rcv + resm = (1.0 - epssm) / (1.0 + epssm) + rdts = 1./dts - pl = cpl(k ,iEdge)*zz(k ,cell1)*rtheta_pp_old(k ,cell1) & - + cpl(k+1,iEdge)*zz(k+1,cell1)*rtheta_pp_old(k+1,cell1) & - + cpl(k+2,iEdge)*zz(k+2,cell1)*rtheta_pp_old(k+2,cell1) - pgrad = 2./(zz(k,cell1)+zz(k,cell2))*(pr-pl)/dcEdge(iEdge) - pgrad = 0.5*c2*(exner(k,cell1)+exner(k,cell2))*pgrad - du(k) = dts*(tend_ru(k,iEdge) - cqu(k,iEdge) * pgrad) + if(small_step /= 1) then ! not needed on first small step - do k=2,nVertLevels + do iCell = cellStart,cellEnd + ! acoustic step divergence damping - forward weight rtheta_pp - see Klemp et al MWR 2007 + do k = 1,nVertLevels + rtheta_pp_tmp = rtheta_pp(k,iCell) + rtheta_pp(k,iCell) = (rtheta_pp(k,iCell) + smdiv_p_forward * (rtheta_pp(k,iCell)-rtheta_pp_old(k,iCell)))*zz(k,iCell) + rtheta_pp_old(k,iCell) = rtheta_pp_tmp + end do + end do + +!$OMP BARRIER - kr = min(nVertLevels,k+ nint(.5-sign(0.5_RKIND,zx(k,iEdge)+zx(k+1,iEdge)))) - kl = min(nVertLevels,2*k+1-kr) - pr = zz(k,cell2)*rtheta_pp_old(k ,cell2)+.5*(zgrid(k ,cell1) +zgrid(k +1,cell1) & - -zgrid(k ,cell2) -zgrid(k +1,cell2)) & - /(zgrid(kr+1,cell2) -zgrid(kr-1,cell2)) & - *(zz(kr,cell2)*rtheta_pp_old(kr,cell2)-zz(kr-1,cell2)*rtheta_pp_old(kr-1,cell2)) - pl = zz(k,cell1)*rtheta_pp_old(k ,cell1)+.5*(zgrid(k ,cell2) +zgrid(k +1,cell2) & - -zgrid(k ,cell1) -zgrid(k +1,cell1)) & - /(zgrid(kl+1,cell1) -zgrid(kl-1,cell1)) & - *(zz(kl,cell1)*rtheta_pp_old(kl,cell1)-zz(kl-1,cell1)*rtheta_pp_old(kl-1,cell1)) - pgrad = 2./(zz(k,cell1)+zz(k,cell2))*(pr-pl)/dcEdge(iEdge) - pgrad = 0.5*c2*(exner(k,cell1)+exner(k,cell2))*pgrad - du(k) = dts*(tend_ru(k,iEdge) - cqu(k,iEdge) * pgrad) - end do + ! forward-backward acoustic step integration. + ! begin by updating the horizontal velocity u, + ! and accumulating the contribution from the updated u to the other tendencies. - else + ! we are looping over all edges, but only computing on edges of owned cells. This will include updates of + ! all owned edges plus some edges that are owned by other blocks. We perform these redundant computations + ! so that we do not have to communicate updates of u to update the cell variables (rho, w, and theta). - k = 1 - dpzx(k) = .25*(zx(k,iEdge)+zx(k+1,iEdge)) & - *(pzm(k,cell2)*(zz(k+1,cell2)*rtheta_pp_old(k+1,cell2) & - -zz(k ,cell2)*rtheta_pp_old(k ,cell2)) & - +pzm(k,cell1)*(zz(k+1,cell1)*rtheta_pp_old(k+1,cell1) & - -zz(k ,cell1)*rtheta_pp_old(k ,cell1)) & - +pzp(k,cell2)*(zz(k+2,cell2)*rtheta_pp_old(k+2,cell2) & - -zz(k ,cell2)*rtheta_pp_old(k ,cell2)) & - +pzp(k,cell1)*(zz(k+2,cell1)*rtheta_pp_old(k+2,cell1) & - -zz(k ,cell1)*rtheta_pp_old(k ,cell1))) - - do k=2,nVertLevels-1 - dpzx(k) = .25*(zx(k,iEdge)+zx(k+1,iEdge)) & - *(pzp(k,cell2)*(zz(k+1,cell2)*rtheta_pp_old(k+1,cell2) & - -zz(k ,cell2)*rtheta_pp_old(k ,cell2)) & - +pzm(k,cell2)*(zz(k ,cell2)*rtheta_pp_old(k ,cell2) & - -zz(k-1,cell2)*rtheta_pp_old(k-1,cell2)) & - +pzp(k,cell1)*(zz(k+1,cell1)*rtheta_pp_old(k+1,cell1) & - -zz(k ,cell1)*rtheta_pp_old(k ,cell1)) & - +pzm(k,cell1)*(zz(k ,cell1)*rtheta_pp_old(k ,cell1) & - -zz(k-1,cell1)*rtheta_pp_old(k-1,cell1))) - end do + !MGD this loop will not be very load balanced with if-test below - k = nVertLevels - dpzx(k) = .25*(zx(k,iEdge)+zx(k+1,iEdge)) & - *(pzm(k,cell2)*(zz(k ,cell2)*rtheta_pp_old(k ,cell2) & - -zz(k-1,cell2)*rtheta_pp_old(k-1,cell2)) & - +pzm(k,cell1)*(zz(k ,cell1)*rtheta_pp_old(k ,cell1) & - -zz(k-1,cell1)*rtheta_pp_old(k-1,cell1))) + do iEdge=edgeStart,edgeEnd ! MGD do we really just need edges touching owned cells? + + cell1 = cellsOnEdge(1,iEdge) + cell2 = cellsOnEdge(2,iEdge) - do k=1,nVertLevels - pgrad = ((rtheta_pp_old(k,cell2)*zz(k,cell2) & - -rtheta_pp_old(k,cell1)*zz(k,cell1))/dcEdge(iEdge) & - -dpzx(k))/(.5*(zz(k,cell2)+zz(k,cell1))) - pgrad = 0.5*c2*(exner(k,cell1)+exner(k,cell2))*pgrad - du(k) = dts*(tend_ru(k,iEdge) - cqu(k,iEdge) * pgrad) - end do - end if + ! update edges for block-owned cells + if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve ) then - do k=1,nVertLevels +!DIR$ IVDEP + do k=1,nVertLevels +!! pgrad = ((zz_rtheta_pp(k,cell2)-rtheta_pp_old(k,cell1))*invDcEdge(iEdge) )/(.5*(zz(k,cell2)+zz(k,cell1))) + pgrad = ((rtheta_pp(k,cell2)-rtheta_pp(k,cell1))*invDcEdge(iEdge) )/(.5*(zz(k,cell2)+zz(k,cell1))) + pgrad = cqu(k,iEdge)*0.5*c2*(exner(k,cell1)+exner(k,cell2))*pgrad + pgrad = pgrad + 0.5*zxu(k,iEdge)*gravity*(rho_pp(k,cell1)+rho_pp(k,cell2)) + ru_p(k,iEdge) = ru_p(k,iEdge) + dts*(tend_ru(k,iEdge) - pgrad) & + - smdiv*dcEdge(iEdge)*(divergence_3d(k,cell2)-divergence_3d(k,cell1)) + end do - ! full update of ru_p + ! accumulate ru_p for use later in scalar transport +!DIR$ IVDEP + do k=1,nVertLevels + ruAvg(k,iEdge) = ruAvg(k,iEdge) + ru_p(k,iEdge) + end do - ru_p(k,iEdge) = ru_p(k,iEdge) + du(k) + end if ! end test for block-owned cells - ! add horizontal fluxes using updated ru_p into density update, rtheta update and w update + end do ! end loop over edges - flux = dts*dvEdge(iEdge)*ru_p(k,iEdge) - rs(k,cell1) = rs(k,cell1)-flux/AreaCell(cell1) - rs(k,cell2) = rs(k,cell2)+flux/AreaCell(cell2) - - flux = flux*0.5*(theta_m(k,cell2)+theta_m(k,cell1)) - ts(k,cell1) = ts(k,cell1)-flux/AreaCell(cell1) - ts(k,cell2) = ts(k,cell2)+flux/AreaCell(cell2) + else ! this is all that us needed for ru_p update for first acoustic step in RK substep - ! accumulate ru_p for use later in scalar transport + do iEdge=edgeStart,edgeEnd ! MGD do we really just need edges touching owned cells? - ruAvg(k,iEdge) = ruAvg(k,iEdge) + ru_p(k,iEdge) + cell1 = cellsOnEdge(1,iEdge) + cell2 = cellsOnEdge(2,iEdge) - end do + ! update edges for block-owned cells + if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve ) then + +!DIR$ IVDEP + do k=1,nVertLevels + ru_p(k,iEdge) = dts*tend_ru(k,iEdge) - smdiv*dcEdge(iEdge)*(tend_rho(k,cell2)-tend_rho(k,cell1)) + end do +!DIR$ IVDEP + do k=1,nVertLevels +!! ruAvg(k,iEdge) = ruAvg(k,iEdge) + ru_p(k,iEdge) + ruAvg(k,iEdge) = ru_p(k,iEdge) + end do - end if ! end test for block-owned cells + end if ! end test for block-owned cells + end do ! end loop over edges - end do ! end loop over edges + end if ! test for first acoustic step - ! saving rtheta_pp before update for use in divergence damping in next acoustic step +!$OMP BARRIER - rtheta_pp_old(:,:) = rtheta_pp(:,:) + if (small_step == 1) then ! initialize here on first small timestep. + do iCell=cellStart,cellEnd + rtheta_pp_old(1:nVertLevels,iCell) = 0.0 + end do + end if + +!!!OMP BARRIER -- not needed, since rtheta_pp_old not used below when small_step == 1 + + do iCell=cellSolveStart,cellSolveEnd ! loop over all owned cells to solve + + ts(:) = 0.0 + rs(:) = 0.0 + + if(small_step == 1) then ! initialize here on first small timestep. + wwAvg(1:nVertLevels+1,iCell) = 0.0 + rho_pp(1:nVertLevels,iCell) = 0.0 + rtheta_pp(1:nVertLevels,iCell) = 0.0 +!MGD moved to loop above over all cells +! rtheta_pp_old(1:nVertLevels,iCell) = 0.0 + rw_p(:,iCell) = 0.0 + divergence_3d(1:nVertLevels,iCell) = 0. + else ! reset rtheta_pp to input value; + ! rtheta_pp_old stores input value for use in div damping on next acoustic step. + ! Save rho_pp to compute d_rho_pp/dt to get divergence for next acoustic filter application. + rtheta_pp(1:nVertLevels,iCell) = rtheta_pp_old(1:nVertLevels,iCell) + divergence_3d(1:nVertLevels,iCell) = rho_pp(1:nVertLevels,iCell) + end if + + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) + cell1 = cellsOnEdge(1,iEdge) + cell2 = cellsOnEdge(2,iEdge) +!DIR$ IVDEP + do k=1,nVertLevels + flux = edgesOnCell_sign(i,iCell)*dts*dvEdge(iEdge)*ru_p(k,iEdge) * invAreaCell(iCell) + rs(k) = rs(k)-flux + ts(k) = ts(k)-flux*0.5*(theta_m(k,cell2)+theta_m(k,cell1)) + end do + end do ! vertically implicit acoustic and gravity wave integration. ! this follows Klemp et al MWR 2007, with the addition of an implicit Rayleigh damping of w ! serves as a gravity-wave absorbing layer, from Klemp et al 2008. - do iCell = 1, nCellsSolve - +!DIR$ IVDEP do k=1, nVertLevels - rs(k,iCell) = rho_pp(k,iCell) + dts*tend_rho(k,iCell) + rs(k,iCell) & - - cofrz(k)*resm*(rw_p(k+1,iCell)-rw_p(k,iCell)) - ts(k,iCell) = rtheta_pp(k,iCell) + dts*tend_rt(k,iCell) + ts(k,iCell) & - - resm*rdzw(k)*(coftz(k+1,iCell)*rw_p(k+1,iCell) & - -coftz(k,iCell)*rw_p(k,iCell)) + rs(k) = rho_pp(k,iCell) + dts*tend_rho(k,iCell) + rs(k) & + - cofrz(k)*resm*(rw_p(k+1,iCell)-rw_p(k,iCell)) + ts(k) = rtheta_pp(k,iCell) + dts*tend_rt(k,iCell) + ts(k) & + - resm*rdzw(k)*( coftz(k+1,iCell)*rw_p(k+1,iCell) & + -coftz(k,iCell)*rw_p(k,iCell)) end do +!DIR$ IVDEP do k=2, nVertLevels + wwavg(k,iCell) = wwavg(k,iCell) + 0.5*(1.0-epssm)*rw_p(k,iCell) + end do - wwavg(k,iCell) = wwavg(k,iCell) + 0.5*(1.-epssm)*rw_p(k,iCell) - +!DIR$ IVDEP + do k=2, nVertLevels rw_p(k,iCell) = rw_p(k,iCell) + dts*tend_rw(k,iCell) & - - cofwz(k,iCell)*((zz(k ,iCell)*ts (k ,iCell) & - -zz(k-1,iCell)*ts (k-1,iCell)) & + - cofwz(k,iCell)*((zz(k ,iCell)*ts(k) & + -zz(k-1,iCell)*ts(k-1)) & +resm*(zz(k ,iCell)*rtheta_pp(k ,iCell) & -zz(k-1,iCell)*rtheta_pp(k-1,iCell))) & - - cofwr(k,iCell)*((rs (k,iCell)+rs (k-1,iCell)) & + - cofwr(k,iCell)*((rs(k)+rs(k-1)) & +resm*(rho_pp(k,iCell)+rho_pp(k-1,iCell))) & - + cofwt(k ,iCell)*(ts (k ,iCell)+resm*rtheta_pp(k ,iCell)) & - + cofwt(k-1,iCell)*(ts (k-1,iCell)+resm*rtheta_pp(k-1,iCell)) + + cofwt(k ,iCell)*(ts(k )+resm*rtheta_pp(k ,iCell)) & + + cofwt(k-1,iCell)*(ts(k-1)+resm*rtheta_pp(k-1,iCell)) end do ! tridiagonal solve sweeping up and then down the column +!MGD VECTOR DEPENDENCE do k=2,nVertLevels rw_p(k,iCell) = (rw_p(k,iCell)-a_tri(k,iCell)*rw_p(k-1,iCell))*alpha_tri(k,iCell) end do +!MGD VECTOR DEPENDENCE do k=nVertLevels,1,-1 rw_p(k,iCell) = rw_p(k,iCell) - gamma_tri(k,iCell)*rw_p(k+1,iCell) end do ! the implicit Rayleigh damping on w (gravity-wave absorbing) +!DIR$ IVDEP do k=2,nVertLevels - rw_p(k,iCell) = (rw_p(k,iCell)-dts*dss(k,iCell)* & + rw_p(k,iCell) = (rw_p(k,iCell) + (rw_save(k ,iCell) - rw(k ,iCell)) -dts*dss(k,iCell)* & (fzm(k)*zz (k,iCell)+fzp(k)*zz (k-1,iCell)) & *(fzm(k)*rho_zz(k,iCell)+fzp(k)*rho_zz(k-1,iCell)) & - *w(k,iCell) )/(1.+dts*dss(k,iCell)) - - ! accumulate (rho*omega)' for use later in scalar transport + *w(k,iCell) )/(1.0+dts*dss(k,iCell)) & + - (rw_save(k ,iCell) - rw(k ,iCell)) + end do - wwAvg(k,iCell) = wwAvg(k,iCell) + 0.5*(1.+epssm)*rw_p(k,iCell) - + ! accumulate (rho*omega)' for use later in scalar transport +!DIR$ IVDEP + do k=2,nVertLevels + wwAvg(k,iCell) = wwAvg(k,iCell) + 0.5*(1.0+epssm)*rw_p(k,iCell) end do ! update rho_pp and theta_pp given updated rw_p +!DIR$ IVDEP do k=1,nVertLevels - rho_pp(k,iCell) = rs(k,iCell) - cofrz(k) *(rw_p(k+1,iCell)-rw_p(k ,iCell)) - rtheta_pp(k,iCell) = ts(k,iCell) - rdzw(k)*(coftz(k+1,iCell)*rw_p(k+1,iCell) & + rho_pp(k,iCell) = rs(k) - cofrz(k) *(rw_p(k+1,iCell)-rw_p(k ,iCell)) + rtheta_pp(k,iCell) = ts(k) - rdzw(k)*(coftz(k+1,iCell)*rw_p(k+1,iCell) & -coftz(k ,iCell)*rw_p(k ,iCell)) + divergence_3d(k,iCell) = (rho_pp(k,iCell) - divergence_3d(k,iCell))*rdts end do end do ! end of loop over cells - end subroutine atm_advance_acoustic_step + end subroutine atm_advance_acoustic_step_work -!------------------------ - subroutine atm_recover_large_step_variables( state, diag, tend, mesh, configs, dt, ns, rk_step ) + subroutine atm_recover_large_step_variables( state, diag, tend, mesh, configs, dt, ns, rk_step, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd) ! reconstitute state variables from acoustic-step perturbation variables ! after the acoustic steps. The perturbation variables were originally set in @@ -1375,26 +2236,26 @@ subroutine atm_recover_large_step_variables( state, diag, tend, mesh, configs, d type (mpas_pool_type), intent(in) :: configs integer, intent(in) :: ns, rk_step real (kind=RKIND), intent(in) :: dt + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd real (kind=RKIND), dimension(:,:), pointer :: wwAvg, rw_save, w, rw, rw_p, rtheta_p, rtheta_pp, & rtheta_p_save, rt_diabatic_tend, rho_p, rho_p_save, & rho_pp, rho_zz, rho_base, ruAvg, ru_save, ru_p, u, ru, & exner, exner_base, rtheta_base, pressure_p, & - zz, theta_m, pressure_b, qvapor + zz, theta_m, pressure_b real (kind=RKIND), dimension(:,:,:), pointer :: scalars - real (kind=RKIND), dimension(:), pointer :: fzm, fzp, dvEdge, areaCell - real (kind=RKIND), dimension(:,:,:), pointer :: zb, zb3 - integer, dimension(:,:), pointer :: cellsOnEdge + real (kind=RKIND), dimension(:), pointer :: fzm, fzp + real (kind=RKIND), dimension(:,:,:), pointer :: zb, zb3, zb_cell, zb3_cell + real (kind=RKIND), dimension(:,:), pointer :: edgesOnCell_sign + integer, dimension(:,:), pointer :: cellsOnEdge, edgesOnCell + integer, dimension(:), pointer :: nEdgesOnCell - integer :: iCell, iEdge, k, cell1, cell2 + integer :: i, iCell, iEdge, k, cell1, cell2 integer, pointer :: nVertLevels, nCells, nCellsSolve, nEdges, nEdgesSolve - real (kind=RKIND) :: rcv, p0, flux - real (kind=RKIND), pointer :: cf1, cf2, cf3, coef_3rd_order - integer, pointer :: config_theta_adv_order - integer, pointer :: index_qv - - logical, parameter :: debug=.false. + real (kind=RKIND) :: invNs, rcv, p0, flux + real (kind=RKIND), pointer :: cf1, cf2, cf3 call mpas_pool_get_array(diag, 'wwAvg', wwAvg) @@ -1411,10 +2272,6 @@ subroutine atm_recover_large_step_variables( state, diag, tend, mesh, configs, d call mpas_pool_get_array(state, 'theta_m', theta_m, 2) call mpas_pool_get_array(state, 'scalars', scalars, 2) - call mpas_pool_get_dimension(state, 'index_qv', index_qv) - - qvapor => scalars(index_qv,:,:) ! MGD does this actually work? - call mpas_pool_get_array(state, 'rho_zz', rho_zz, 2) call mpas_pool_get_array(diag, 'rho_p', rho_p) call mpas_pool_get_array(diag, 'rho_p_save', rho_p_save) @@ -1436,11 +2293,14 @@ subroutine atm_recover_large_step_variables( state, diag, tend, mesh, configs, d call mpas_pool_get_array(mesh, 'zz', zz) call mpas_pool_get_array(mesh, 'zb', zb) call mpas_pool_get_array(mesh, 'zb3', zb3) + call mpas_pool_get_array(mesh, 'zb_cell', zb_cell) + call mpas_pool_get_array(mesh, 'zb3_cell', zb3_cell) call mpas_pool_get_array(mesh, 'fzm', fzm) call mpas_pool_get_array(mesh, 'fzp', fzp) - call mpas_pool_get_array(mesh, 'dvEdge', dvEdge) - call mpas_pool_get_array(mesh, 'areaCell', areaCell) call mpas_pool_get_array(mesh, 'cellsOnEdge', cellsOnEdge) + call mpas_pool_get_array(mesh, 'edgesOnCell', edgesOnCell) + call mpas_pool_get_array(mesh, 'edgesOnCell_sign', edgesOnCell_sign) + call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) call mpas_pool_get_dimension(mesh, 'nCells', nCells) @@ -1452,127 +2312,212 @@ subroutine atm_recover_large_step_variables( state, diag, tend, mesh, configs, d call mpas_pool_get_array(mesh, 'cf2', cf2) call mpas_pool_get_array(mesh, 'cf3', cf3) - call mpas_pool_get_config(configs, 'config_coef_3rd_order', coef_3rd_order) - call mpas_pool_get_config(configs, 'config_theta_adv_order', config_theta_adv_order) + + call atm_recover_large_step_variables_work(nCells, nEdges, nCellsSolve, nEdgesSolve, dt, ns, rk_step, & + wwAvg, rw_save, w, rw, rw_p, rtheta_p, rtheta_pp, rtheta_p_save, rt_diabatic_tend, rho_p, & + rho_p_save, rho_pp, rho_zz, rho_base, ruAvg, ru_save, ru_p, u, ru, exner, exner_base, & + rtheta_base, pressure_p, zz, theta_m, pressure_b, scalars, fzm, fzp, & + zb, zb3, zb_cell, zb3_cell, edgesOnCell_sign, cellsOnEdge, edgesOnCell, nEdgesOnCell, & + cf1, cf2, cf3, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd) + + end subroutine atm_recover_large_step_variables + + + subroutine atm_recover_large_step_variables_work(nCells, nEdges, nCellsSolve, nEdgesSolve, dt, ns, rk_step, & + wwAvg, rw_save, w, rw, rw_p, rtheta_p, rtheta_pp, rtheta_p_save, rt_diabatic_tend, rho_p, & + rho_p_save, rho_pp, rho_zz, rho_base, ruAvg, ru_save, ru_p, u, ru, exner, exner_base, & + rtheta_base, pressure_p, zz, theta_m, pressure_b, scalars, fzm, fzp, & + zb, zb3, zb_cell, zb3_cell, edgesOnCell_sign, cellsOnEdge, edgesOnCell, nEdgesOnCell, & + cf1, cf2, cf3, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd) + + use mpas_atm_dimensions + + implicit none + + + ! + ! Dummy arguments + ! + integer, intent(in) :: nCells, nEdges, nCellsSolve, nEdgesSolve + integer, intent(in) :: ns, rk_step + real (kind=RKIND), intent(in) :: dt + + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: wwAvg + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: rw_save + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: w + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: rw + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: rw_p + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rtheta_p + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rtheta_pp + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rtheta_p_save + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rt_diabatic_tend + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rho_p + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rho_p_save + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rho_pp + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rho_zz + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rho_base + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: ruAvg + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: ru_save + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: ru_p + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: u + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: ru + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: exner + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: exner_base + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rtheta_base + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: pressure_p + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: zz + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: theta_m + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: pressure_b + real (kind=RKIND), dimension(num_scalars,nVertLevels,nCells+1) :: scalars + real (kind=RKIND), dimension(nVertLevels) :: fzm + real (kind=RKIND), dimension(nVertLevels) :: fzp + real (kind=RKIND), dimension(nVertLevels+1,2,nEdges+1) :: zb + real (kind=RKIND), dimension(nVertLevels+1,2,nEdges+1) :: zb3 + real (kind=RKIND), dimension(nVertLevels+1,maxEdges,nCells+1) :: zb_cell + real (kind=RKIND), dimension(nVertLevels+1,maxEdges,nCells+1) :: zb3_cell + real (kind=RKIND), dimension(maxEdges,nCells+1) :: edgesOnCell_sign + integer, dimension(2,nEdges+1) :: cellsOnEdge + integer, dimension(maxEdges,nCells+1) :: edgesOnCell + integer, dimension(nCells+1) :: nEdgesOnCell + + real (kind=RKIND) :: cf1, cf2, cf3 + + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd + + + ! + ! Local variables + ! + integer :: i, iCell, iEdge, k, cell1, cell2 + real (kind=RKIND) :: invNs, rcv, p0, flux + rcv = rgas/(cp-rgas) - p0 = 1.e+05 ! this should come from somewhere else... + p0 = 1.0e+05 ! this should come from somewhere else... - if (config_theta_adv_order /=3) coef_3rd_order = 0.0 + ! Avoid FP errors caused by a potential division by zero below by + ! initializing the "garbage cell" of rho_zz to a non-zero value + do k=1,nVertLevels + rho_zz(k,nCells+1) = 1.0 + end do ! compute new density everywhere so we can compute u from ru. ! we will also need it to compute theta_m below - do iCell = 1, nCells + invNs = 1 / real(ns,RKIND) - do k = 1, nVertLevels + do iCell=cellStart,cellEnd - rho_p(k,iCell) = rho_p(k,iCell) + rho_pp(k,iCell) +!DIR$ IVDEP + do k = 1, nVertLevels + rho_p(k,iCell) = rho_p_save(k,iCell) + rho_pp(k,iCell) rho_zz(k,iCell) = rho_p(k,iCell) + rho_base(k,iCell) end do - w(1,iCell) = 0. - do k = 2, nVertLevels - wwAvg(k,iCell) = rw(k,iCell) + (wwAvg(k,iCell) / float(ns)) - - rw(k,iCell) = rw(k,iCell) + rw_p(k,iCell) + w(1,iCell) = 0.0 +!DIR$ IVDEP + do k = 2, nVertLevels + wwAvg(k,iCell) = rw_save(k,iCell) + (wwAvg(k,iCell) * invNs) + rw(k,iCell) = rw_save(k,iCell) + rw_p(k,iCell) - ! pick up part of diagnosed w from omega - w(k,iCell) = rw(k,iCell)/( (fzm(k)*zz (k,iCell)+fzp(k)*zz (k-1,iCell)) & - *(fzm(k)*rho_zz(k,iCell)+fzp(k)*rho_zz(k-1,iCell)) ) + ! pick up part of diagnosed w from omega - divide by density later + w(k,iCell) = rw(k,iCell)/(fzm(k)*zz(k,iCell)+fzp(k)*zz(k-1,iCell)) + end do - w(nVertLevels+1,iCell) = 0. + + w(nVertLevels+1,iCell) = 0.0 if (rk_step == 3) then +!DIR$ IVDEP do k = 1, nVertLevels - rtheta_p(k,iCell) = rtheta_p(k,iCell) + rtheta_pp(k,iCell) & + rtheta_p(k,iCell) = rtheta_p_save(k,iCell) + rtheta_pp(k,iCell) & - dt * rho_zz(k,iCell) * rt_diabatic_tend(k,iCell) + theta_m(k,iCell) = (rtheta_p(k,iCell) + rtheta_base(k,iCell))/rho_zz(k,iCell) + exner(k,iCell) = (zz(k,iCell)*(rgas/p0)*(rtheta_p(k,iCell)+rtheta_base(k,iCell)))**rcv + ! pressure_p is perturbation pressure + pressure_p(k,iCell) = zz(k,iCell) * rgas * (exner(k,iCell)*rtheta_p(k,iCell)+rtheta_base(k,iCell) & + * (exner(k,iCell)-exner_base(k,iCell))) end do else +!DIR$ IVDEP do k = 1, nVertLevels - rtheta_p(k,iCell) = rtheta_p(k,iCell) + rtheta_pp(k,iCell) + rtheta_p(k,iCell) = rtheta_p_save(k,iCell) + rtheta_pp(k,iCell) + theta_m(k,iCell) = (rtheta_p(k,iCell) + rtheta_base(k,iCell))/rho_zz(k,iCell) end do end if - do k = 1, nVertLevels - theta_m(k,iCell) = (rtheta_p(k,iCell) + rtheta_base(k,iCell))/rho_zz(k,iCell) - exner(k,iCell) = (zz(k,iCell)*(rgas/p0)*(rtheta_p(k,iCell)+rtheta_base(k,iCell)))**rcv - ! pressure_p is perturbation pressure - pressure_p(k,iCell) = zz(k,iCell) * rgas * (exner(k,iCell)*rtheta_p(k,iCell)+rtheta_base(k,iCell) & - * (exner(k,iCell)-exner_base(k,iCell))) - end do - end do ! recover time-averaged ruAvg on all edges of owned cells (for upcoming scalar transport). ! we solved for these in the acoustic-step loop. ! we will compute ru and u here also, given we are here, even though we only need them on nEdgesSolve - ! Avoid FP errors caused by a potential division by zero below by - ! initializing the "garbage cell" of rho_zz to a non-zero value - rho_zz(:,nCells+1) = 1.0 +!$OMP BARRIER - do iEdge = 1, nEdges + do iEdge=edgeStart,edgeEnd cell1 = cellsOnEdge(1,iEdge) cell2 = cellsOnEdge(2,iEdge) +!DIR$ IVDEP do k = 1, nVertLevels - ruAvg(k,iEdge) = ru(k,iEdge) + (ruAvg(k,iEdge) / float(ns)) - ru(k,iEdge) = ru(k,iEdge) + ru_p(k,iEdge) + ruAvg(k,iEdge) = ru_save(k,iEdge) + (ruAvg(k,iEdge) * invNs) + ru(k,iEdge) = ru_save(k,iEdge) + ru_p(k,iEdge) u(k,iEdge) = 2.*ru(k,iEdge)/(rho_zz(k,cell1)+rho_zz(k,cell2)) end do + end do + +!$OMP BARRIER + + do iCell=cellStart,cellEnd ! finish recovering w from (rho*omega)_p. as when we formed (rho*omega)_p from u and w, we need ! to use the same flux-divergence operator as is used for the horizontal theta transport ! (See Klemp et al 2003). - flux = cf1*ru(1,iEdge) + cf2*ru(2,iEdge) + cf3*ru(3,iEdge) - w(1,cell2) = w(1,cell2) - (zb(1,2,iEdge) + sign(1.0_RKIND,flux)*coef_3rd_order*zb3(1,2,iEdge)) & - *flux/(cf1*rho_zz(1,cell2)+cf2*rho_zz(2,cell2)+cf3*rho_zz(3,cell2)) - w(1,cell1) = w(1,cell1) + (zb(1,1,iEdge) + sign(1.0_RKIND,flux)*coef_3rd_order*zb3(1,1,iEdge)) & - *flux/(cf1*rho_zz(1,cell1)+cf2*rho_zz(2,cell1)+cf3*rho_zz(3,cell1)) + do i=1,nEdgesOnCell(iCell) + iEdge=edgesOnCell(i,iCell) + + flux = (cf1*ru(1,iEdge) + cf2*ru(2,iEdge) + cf3*ru(3,iEdge)) + w(1,iCell) = w(1,iCell) + edgesOnCell_sign(i,iCell) * & + (zb_cell(1,i,iCell) + sign(1.0_RKIND,flux)*zb3_cell(1,i,iCell))*flux + +!DIR$ IVDEP + do k = 2, nVertLevels + flux = (fzm(k)*ru(k,iEdge)+fzp(k)*ru(k-1,iEdge)) + w(k,iCell) = w(k,iCell) + edgesOnCell_sign(i,iCell) * & + (zb_cell(k,i,iCell)+sign(1.0_RKIND,flux)*zb3_cell(k,i,iCell))*flux + end do + + end do + w(1,iCell) = w(1,iCell)/(cf1*rho_zz(1,iCell)+cf2*rho_zz(2,iCell)+cf3*rho_zz(3,iCell)) +!DIR$ IVDEP do k = 2, nVertLevels - flux = (fzm(k)*ru(k,iEdge)+fzp(k)*ru(k-1,iEdge)) - w(k,cell2) = w(k,cell2) - (zb(k,2,iEdge)+sign(1.0_RKIND,flux)*coef_3rd_order*zb3(k,2,iEdge)) & - *flux/(fzm(k)*rho_zz(k,cell2)+fzp(k)*rho_zz(k-1,cell2)) - w(k,cell1) = w(k,cell1) + (zb(k,1,iEdge)+sign(1.0_RKIND,flux)*coef_3rd_order*zb3(k,1,iEdge)) & - *flux/(fzm(k)*rho_zz(k,cell1)+fzp(k)*rho_zz(k-1,cell1)) + w(k,iCell) = w(k,iCell)/(fzm(k)*rho_zz(k,iCell)+fzp(k)*rho_zz(k-1,iCell)) end do end do - end subroutine atm_recover_large_step_variables + end subroutine atm_recover_large_step_variables_work -!--------------------------------------------------------------------------------------- - subroutine atm_advance_scalars( tend, state, diag, mesh, configs, num_scalars, nCells, nVertLevels, dt, advance_density) + subroutine atm_advance_scalars( tend, state, diag, mesh, configs, num_scalars, nCells, nVertLevels, dt, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd, & + horiz_flux_arr, rk_step, config_time_integration_order, advance_density, scalar_tend, rho_zz_int) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! ! Integrate scalar equations - explicit transport plus other tendencies ! - ! this transport routine is similar to the original atm_advance_scalars, except it also advances - ! (re-integrates) the density. This re-integration allows the scalar transport routine to use a different - ! timestep than the dry dynamics, and also makes possible a spatial splitting of the scalar transport integration - ! (and density integration). The current integration is, however, not spatially split. - ! - ! WCS 18 November 2014 - !----------------------- - ! Input: s - current model state, - ! including tendencies from sources other than resolved transport. - ! grid - grid metadata - ! - ! input scalars in state are uncoupled (i.e. not mulitplied by density) - ! - ! Output: updated uncoupled scalars (scalars in state). - ! Note: scalar tendencies are also modified by this routine. - ! - ! This routine DOES NOT apply any positive definite or monotonic renormalizations. - ! - ! The transport scheme is from Skamarock and Gassmann MWR 2011. + ! Wrapper for atm_advance_scalars_work() to de-reference pointers ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -1586,56 +2531,40 @@ subroutine atm_advance_scalars( tend, state, diag, mesh, configs, num_scalars, n integer, intent(in) :: num_scalars ! for allocating stack variables integer, intent(in) :: nCells ! for allocating stack variables integer, intent(in) :: nVertLevels ! for allocating stack variables + integer, intent(in) :: rk_step ! rk substep we are integrating + integer, intent(in) :: config_time_integration_order ! time integration order real (kind=RKIND) :: dt + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd logical, intent(in), optional :: advance_density + real (kind=RKIND), dimension(:,:,:), intent(inout), optional :: scalar_tend + real (kind=RKIND), dimension(:,:), intent(inout), optional :: rho_zz_int - real (kind=RKIND), dimension(nVertLevels) :: scalar_weight1 - real (kind=RKIND), dimension(nVertLevels, 10) :: scalar_weight2 - integer:: jj - integer, dimension(10) :: ica + integer :: i, j, iCell, iAdvCell, iEdge, k, iScalar, cell1, cell2 real (kind=RKIND), dimension(:), pointer :: invAreaCell real (kind=RKIND) :: rho_zz_new_inv - integer :: i, iCell, iEdge, k, iScalar, cell1, cell2 real (kind=RKIND) :: scalar_weight - real (kind=RKIND), dimension(:,:,:), pointer :: scalar_old, scalar_new, scalar_tend + real (kind=RKIND), dimension(:,:,:), pointer :: scalar_old, scalar_new, scalar_tend_save real (kind=RKIND), dimension(:,:,:), pointer :: deriv_two real (kind=RKIND), dimension(:,:), pointer :: uhAvg, rho_zz_old, rho_zz_new, wwAvg, rho_edge, zgrid, kdiff - real (kind=RKIND), dimension(:), pointer :: dvEdge, dcEdge, areaCell, qv_init + real (kind=RKIND), dimension(:), pointer :: dvEdge, qv_init integer, dimension(:,:), pointer :: cellsOnEdge + real (kind=RKIND), dimension(:,:,:), intent(inout) :: horiz_flux_arr - integer, dimension(:,:), pointer :: advCellsForEdge - integer, dimension(:), pointer :: nAdvCellsForEdge - real (kind=RKIND), dimension(:,:), pointer :: adv_coefs, adv_coefs_3rd - real (kind=RKIND), dimension( num_scalars, nVertLevels ) :: flux_arr + integer, dimension(:,:), pointer :: advCellsForEdge, edgesOnCell + integer, dimension(:), pointer :: nAdvCellsForEdge, nEdgesOnCell + real (kind=RKIND), dimension(:,:), pointer :: adv_coefs, adv_coefs_3rd, edgesOnCell_sign real (kind=RKIND), dimension( num_scalars, nVertLevels + 1 ) :: wdtn - real (kind=RKIND), dimension(:,:), pointer :: rho_zz_int - real (kind=RKIND), dimension(:,:,:), pointer :: scalar_tend_save integer, pointer :: nCellsSolve, nEdges real (kind=RKIND), dimension(:), pointer :: fnm, fnp, rdnw, meshScalingDel2, meshScalingDel4 real (kind=RKIND), pointer :: coef_3rd_order - real (kind=RKIND), pointer :: h_theta_eddy_visc2, v_theta_eddy_visc2 - - real (kind=RKIND) :: flux3, flux4 - real (kind=RKIND) :: q_im2, q_im1, q_i, q_ip1, ua, coef3 - logical :: local_advance_density - integer, pointer :: config_scalar_vadv_order - - integer, parameter :: hadv_opt = 2 - - flux4(q_im2, q_im1, q_i, q_ip1, ua) = & - ua*( 7.*(q_i + q_im1) - (q_ip1 + q_im2) )/12.0 - - flux3(q_im2, q_im1, q_i, q_ip1, ua, coef3) = & - flux4(q_im2, q_im1, q_i, q_ip1, ua) + & - coef3*abs(ua)*((q_ip1 - q_im2)-3.*(q_i-q_im1))/12.0 - if (present(advance_density)) then local_advance_density = advance_density else @@ -1643,7 +2572,6 @@ subroutine atm_advance_scalars( tend, state, diag, mesh, configs, num_scalars, n end if call mpas_pool_get_config(configs, 'config_coef_3rd_order', coef_3rd_order) - call mpas_pool_get_config(configs, 'config_scalar_vadv_order', config_scalar_vadv_order) call mpas_pool_get_array(state, 'scalars', scalar_old, 1) call mpas_pool_get_array(state, 'scalars', scalar_new, 2) @@ -1656,11 +2584,12 @@ subroutine atm_advance_scalars( tend, state, diag, mesh, configs, num_scalars, n call mpas_pool_get_array(mesh, 'deriv_two', deriv_two) call mpas_pool_get_array(mesh, 'dvEdge', dvEdge) - call mpas_pool_get_array(mesh, 'dcEdge', dcEdge) call mpas_pool_get_array(mesh, 'cellsOnEdge', cellsOnEdge) - call mpas_pool_get_array(mesh, 'areaCell', areaCell) + call mpas_pool_get_array(mesh, 'edgesOnCell', edgesOnCell) + call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) + call mpas_pool_get_array(mesh, 'edgesOnCell_sign', edgesOnCell_sign) call mpas_pool_get_array(mesh, 'invAreaCell', invAreaCell) - call mpas_pool_get_array(tend, 'scalars_tend', scalar_tend) + call mpas_pool_get_array(tend, 'scalars_tend', scalar_tend_save) call mpas_pool_get_array(mesh, 'fzm', fnm) call mpas_pool_get_array(mesh, 'fzp', fnp) @@ -1680,54 +2609,175 @@ subroutine atm_advance_scalars( tend, state, diag, mesh, configs, num_scalars, n call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) call mpas_pool_get_dimension(mesh, 'nEdges', nEdges) - call mpas_pool_get_config(configs, 'config_h_theta_eddy_visc2', h_theta_eddy_visc2) - call mpas_pool_get_config(configs, 'config_v_theta_eddy_visc2', v_theta_eddy_visc2) - - -#ifndef DO_PHYSICS - scalar_tend = 0. ! testing purposes - we have no sources or sinks -#endif - - ! - ! Runge Kutta integration, so we compute fluxes from scalar_new values, update starts from scalar_old - ! - ! horizontal flux divergence, accumulate in scalar_tend - if (local_advance_density) then - allocate(rho_zz_int(nVertLevels,nCells)) - allocate(scalar_tend_save(num_scalars,nVertLevels,nCells)) - rho_zz_int(:,:) = 0.0 - scalar_tend_save(:,:,1:nCells) = scalar_tend(:,:,1:nCells) +! call atm_advance_scalars_work(num_scalars, nCells, nVertLevels, dt, & +! cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & +! cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd, & +! coef_3rd_order, scalar_old, scalar_new, rho_zz_old, rho_zz_new, kdiff, & +! uhAvg, wwAvg, deriv_two, dvEdge, & +! cellsOnEdge, edgesOnCell, nEdgesOnCell, edgesOnCell_sign, invAreaCell, & +! scalar_tend_save, fnm, fnp, rdnw, meshScalingDel2, meshScalingDel4, & +! nAdvCellsForEdge, advCellsForEdge, adv_coefs, adv_coefs_3rd, rho_edge, qv_init, zgrid, & +! nCellsSolve, nEdges, horiz_flux_arr, & +! local_advance_density, scalar_tend, rho_zz_int) + call atm_advance_scalars_work_new(num_scalars, nCells, nVertLevels, dt, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd, & + coef_3rd_order, scalar_old, scalar_new, rho_zz_old, rho_zz_new, kdiff, & + uhAvg, wwAvg, deriv_two, dvEdge, & + cellsOnEdge, edgesOnCell, nEdgesOnCell, edgesOnCell_sign, invAreaCell, & + scalar_tend_save, fnm, fnp, rdnw, meshScalingDel2, meshScalingDel4, & + nAdvCellsForEdge, advCellsForEdge, adv_coefs, adv_coefs_3rd, rho_edge, qv_init, zgrid, & + nCellsSolve, nEdges, horiz_flux_arr, rk_step, config_time_integration_order, & + local_advance_density, scalar_tend, rho_zz_int) else - rho_zz_int => rho_zz_new +! call atm_advance_scalars_work(num_scalars, nCells, nVertLevels, dt, & +! cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & +! cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd, & +! coef_3rd_order, scalar_old, scalar_new, rho_zz_old, rho_zz_new, kdiff, & +! uhAvg, wwAvg, deriv_two, dvEdge, & +! cellsOnEdge, edgesOnCell, nEdgesOnCell, edgesOnCell_sign, invAreaCell, & +! scalar_tend_save, fnm, fnp, rdnw, meshScalingDel2, meshScalingDel4, & +! nAdvCellsForEdge, advCellsForEdge, adv_coefs, adv_coefs_3rd, rho_edge, qv_init, zgrid, & +! nCellsSolve, nEdges, horiz_flux_arr, & +! local_advance_density) + call atm_advance_scalars_work_new(num_scalars, nCells, nVertLevels, dt, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd, & + coef_3rd_order, scalar_old, scalar_new, rho_zz_old, rho_zz_new, kdiff, & + uhAvg, wwAvg, deriv_two, dvEdge, & + cellsOnEdge, edgesOnCell, nEdgesOnCell, edgesOnCell_sign, invAreaCell, & + scalar_tend_save, fnm, fnp, rdnw, meshScalingDel2, meshScalingDel4, & + nAdvCellsForEdge, advCellsForEdge, adv_coefs, adv_coefs_3rd, rho_edge, qv_init, zgrid, & + nCellsSolve, nEdges, horiz_flux_arr, rk_step, config_time_integration_order, & + local_advance_density) end if - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then ! only for owned cells - - ! flux_arr stores the value of the scalar at the edge. - ! a better name perhaps would be scalarEdge + end subroutine atm_advance_scalars - select case(nAdvCellsForEdge(iEdge)) - case(10) - do jj=1,10 - do k=1,nVertLevels - scalar_weight2(k,jj) = uhAvg(k,iEdge)*(adv_coefs(jj,iEdge) + & - sign(coef_3rd_order,uhAvg(k,iEdge))*adv_coefs_3rd(jj,iEdge)) - enddo - enddo - do jj=1,10 - ica(jj) = advCellsForEdge(jj,iEdge) - enddo - do k=1,nVertLevels - do iScalar=1,num_scalars - flux_arr(iscalar,k) = & - scalar_weight2(k,1) * scalar_new(iScalar,k,ica(1)) + & - scalar_weight2(k,2) * scalar_new(iScalar,k,ica(2)) + & - scalar_weight2(k,3) * scalar_new(iScalar,k,ica(3)) + & + subroutine atm_advance_scalars_work( num_scalars_dummy, nCells, nVertLevels_dummy, dt, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd, & + coef_3rd_order, scalar_old, scalar_new, rho_zz_old, rho_zz_new, kdiff, & + uhAvg, wwAvg, deriv_two, dvEdge, & + cellsOnEdge, edgesOnCell, nEdgesOnCell, edgesOnCell_sign, invAreaCell, & + scalar_tend_save, fnm, fnp, rdnw, meshScalingDel2, meshScalingDel4, & + nAdvCellsForEdge, advCellsForEdge, adv_coefs, adv_coefs_3rd, rho_edge, qv_init, zgrid, & + nCellsSolve, nEdges, horiz_flux_arr, & + advance_density, scalar_tend, rho_zz_int) + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! + ! Integrate scalar equations - explicit transport plus other tendencies + ! + ! this transport routine is similar to the original atm_advance_scalars, except it also advances + ! (re-integrates) the density. This re-integration allows the scalar transport routine to use a different + ! timestep than the dry dynamics, and also makes possible a spatial splitting of the scalar transport integration + ! (and density integration). The current integration is, however, not spatially split. + ! + ! WCS 18 November 2014 + !----------------------- + ! Input: s - current model state, + ! including tendencies from sources other than resolved transport. + ! grid - grid metadata + ! + ! input scalars in state are uncoupled (i.e. not mulitplied by density) + ! + ! Output: updated uncoupled scalars (scalars in state). + ! Note: scalar tendencies are also modified by this routine. + ! + ! This routine DOES NOT apply any positive definite or monotonic renormalizations. + ! + ! The transport scheme is from Skamarock and Gassmann MWR 2011. + ! + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + use mpas_atm_dimensions + + implicit none + + integer, intent(in) :: num_scalars_dummy ! for allocating stack variables + integer, intent(in) :: nCells ! for allocating stack variables + integer, intent(in) :: nVertLevels_dummy ! for allocating stack variables + real (kind=RKIND), intent(in) :: dt + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd + logical, intent(in) :: advance_density + real (kind=RKIND), dimension(:,:,:), intent(in) :: scalar_old + real (kind=RKIND), dimension(:,:,:), intent(inout) :: scalar_new + real (kind=RKIND), dimension(num_scalars,nVertLevels,nCells+1), intent(inout) :: scalar_tend_save + real (kind=RKIND), dimension(:,:,:), intent(in) :: deriv_two + real (kind=RKIND), dimension(:,:), intent(in) :: rho_zz_old + real (kind=RKIND), dimension(:,:), intent(in) :: uhAvg, wwAvg, rho_edge, zgrid, rho_zz_new, kdiff + real (kind=RKIND), dimension(:), intent(in) :: dvEdge, qv_init + integer, dimension(:,:), intent(in) :: cellsOnEdge + integer, dimension(:,:), intent(in) :: advCellsForEdge, edgesOnCell + integer, dimension(:), intent(in) :: nAdvCellsForEdge, nEdgesOnCell + real (kind=RKIND), dimension(:,:), intent(in) :: adv_coefs, adv_coefs_3rd, edgesOnCell_sign + real (kind=RKIND), dimension(:), intent(in) :: fnm, fnp, rdnw, meshScalingDel2, meshScalingDel4 + real (kind=RKIND), intent(in) :: coef_3rd_order + real (kind=RKIND), dimension(num_scalars,nVertLevels,nEdges+1), intent(inout) :: horiz_flux_arr + real (kind=RKIND), dimension(num_scalars,nVertLevels,nCells+1), intent(inout), optional :: scalar_tend + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(inout), optional :: rho_zz_int + real (kind=RKIND), dimension(:), intent(in) :: invAreaCell + integer, intent(in) :: nCellsSolve, nEdges + + integer :: i, j, iCell, iAdvCell, iEdge, k, iScalar, cell1, cell2 + real (kind=RKIND) :: rho_zz_new_inv + + real (kind=RKIND) :: scalar_weight + + real (kind=RKIND), dimension( num_scalars, nVertLevels + 1 ) :: wdtn + + real (kind=RKIND), dimension(nVertLevels,10) :: scalar_weight2 + integer, dimension(10) :: ica + + real (kind=RKIND) :: flux3, flux4 + real (kind=RKIND) :: q_im2, q_im1, q_i, q_ip1, ua, coef3 + + logical :: local_advance_density + + flux4(q_im2, q_im1, q_i, q_ip1, ua) = & + ua*( 7.*(q_i + q_im1) - (q_ip1 + q_im2) )/12.0 + + flux3(q_im2, q_im1, q_i, q_ip1, ua, coef3) = & + flux4(q_im2, q_im1, q_i, q_ip1, ua) + & + coef3*abs(ua)*((q_ip1 - q_im2)-3.*(q_i-q_im1))/12.0 + + local_advance_density = advance_density + + ! + ! Runge Kutta integration, so we compute fluxes from scalar_new values, update starts from scalar_old + ! + ! horizontal flux divergence, accumulate in scalar_tend + + + ! horiz_flux_arr stores the value of the scalar at the edge. + ! a better name perhaps would be scalarEdge + + do iEdge=edgeStart,edgeEnd + + select case(nAdvCellsForEdge(iEdge)) + + case(10) + + do j=1,10 +!DIR$ IVDEP + do k=1,nVertLevels + scalar_weight2(k,j) = adv_coefs(j,iEdge) + sign(1.0_RKIND,uhAvg(k,iEdge))*adv_coefs_3rd(j,iEdge) + end do + end do + do j=1,10 + ica(j) = advCellsForEdge(j,iEdge) + end do +!DIR$ IVDEP + do k = 1,nVertLevels +!DIR$ IVDEP + do iScalar = 1,num_scalars + horiz_flux_arr(iScalar,k,iEdge) = & + scalar_weight2(k,1) * scalar_new(iScalar,k,ica(1)) + & + scalar_weight2(k,2) * scalar_new(iScalar,k,ica(2)) + & + scalar_weight2(k,3) * scalar_new(iScalar,k,ica(3)) + & scalar_weight2(k,4) * scalar_new(iScalar,k,ica(4)) + & scalar_weight2(k,5) * scalar_new(iScalar,k,ica(5)) + & scalar_weight2(k,6) * scalar_new(iScalar,k,ica(6)) + & @@ -1736,166 +2786,501 @@ subroutine atm_advance_scalars( tend, state, diag, mesh, configs, num_scalars, n scalar_weight2(k,9) * scalar_new(iScalar,k,ica(9)) + & scalar_weight2(k,10) * scalar_new(iScalar,k,ica(10)) end do + end do + + case default + + horiz_flux_arr(:,:,iEdge) = 0.0 + do j=1,nAdvCellsForEdge(iEdge) + iAdvCell = advCellsForEdge(j,iEdge) +!DIR$ IVDEP + do k=1,nVertLevels + scalar_weight = adv_coefs(j,iEdge) + sign(1.0_RKIND,uhAvg(k,iEdge))*adv_coefs_3rd(j,iEdge) +!DIR$ IVDEP + do iScalar=1,num_scalars + horiz_flux_arr(iScalar,k,iEdge) = horiz_flux_arr(iScalar,k,iEdge) + scalar_weight * scalar_new(iScalar,k,iAdvCell) + end do end do + end do - case default - do i=1,nAdvCellsForEdge(iEdge) - iCell = advCellsForEdge(i,iEdge) - do k=1,nVertLevels - scalar_weight1(k) = uhAvg(k,iEdge)*(adv_coefs(i,iEdge) + & - sign(coef_3rd_order,uhAvg(k,iEdge))*adv_coefs_3rd(i,iEdge)) - enddo - if(i == 1) then - do k=1,nVertLevels - do iScalar=1,num_scalars - flux_arr(iScalar,k) = scalar_weight1(k) * scalar_new(iScalar,k,iCell) - end do - end do - else - do k=1,nVertLevels - do iScalar=1,num_scalars - flux_arr(iScalar,k) = flux_arr(iScalar,k) + & - scalar_weight1(k) * scalar_new(iScalar,k,iCell) - end do - end do - endif - enddo - end select + end select + end do + +!$OMP BARRIER - ! here we add the horizontal flux divergence into the scalar tendency. - ! note that the scalar tendency is modified. + if (local_advance_density) then + if ((.not.present(scalar_tend)) .or. (.not.present(rho_zz_int))) then + call mpas_dmpar_global_abort('Error: rho_zz_int or scalar_tend not supplied to atm_advance_scalars( ) when advance_density=.true.') + end if + + do iCell=cellSolveStart,cellSolveEnd + scalar_tend(:,:,iCell) = scalar_tend_save(:,:,iCell) +#ifndef DO_PHYSICS + scalar_tend(:,:,iCell) = 0.0 ! testing purposes - we have no sources or sinks +#endif + + rho_zz_int(:,iCell) = 0.0 + + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) + + ! here we add the horizontal flux divergence into the scalar tendency. + ! note that the scalar tendency is modified. +!DIR$ IVDEP + do k=1,nVertLevels + rho_zz_int(k,iCell) = rho_zz_int(k,iCell) - edgesOnCell_sign(i,iCell) * uhAvg(k,iEdge)*dvEdge(iEdge) * invAreaCell(iCell) +!DIR$ IVDEP + do iScalar=1,num_scalars + scalar_tend(iScalar,k,iCell) = scalar_tend(iScalar,k,iCell) & + - edgesOnCell_sign(i,iCell) * uhAvg(k,iEdge)*horiz_flux_arr(iScalar,k,iEdge) * invAreaCell(iCell) + end do + end do + + end do +!DIR$ IVDEP do k=1,nVertLevels - do iScalar=1,num_scalars - scalar_tend(iScalar,k,cell1) = scalar_tend(iScalar,k,cell1) & - - flux_arr(iScalar,k) * invAreaCell(cell1) - scalar_tend(iScalar,k,cell2) = scalar_tend(iScalar,k,cell2) & - + flux_arr(iScalar,k) * invAreaCell(cell2) + rho_zz_int(k,iCell) = rho_zz_old(k,iCell) + dt*( rho_zz_int(k,iCell) - rdnw(k)*(wwAvg(k+1,iCell)-wwAvg(k,iCell)) ) end do - if (local_advance_density) then - rho_zz_int(k,cell1) = rho_zz_int(k,cell1) - uhAvg(k,iEdge)*dvEdge(iEdge)/areaCell(cell1) - rho_zz_int(k,cell2) = rho_zz_int(k,cell2) + uhAvg(k,iEdge)*dvEdge(iEdge)/areaCell(cell2) - end if + end do + + else + + do iCell=cellSolveStart,cellSolveEnd +#ifndef DO_PHYSICS + scalar_tend_save(:,:,iCell) = 0.0 ! testing purposes - we have no sources or sinks +#endif + + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) + + ! here we add the horizontal flux divergence into the scalar tendency. + ! note that the scalar tendency is modified. +!DIR$ IVDEP + do k=1,nVertLevels +!DIR$ IVDEP + do iScalar=1,num_scalars + scalar_tend_save(iScalar,k,iCell) = scalar_tend_save(iScalar,k,iCell) & + - edgesOnCell_sign(i,iCell) * uhAvg(k,iEdge)*horiz_flux_arr(iScalar,k,iEdge) * invAreaCell(iCell) + end do + end do + end do + end do - end if - end do + end if ! ! vertical flux divergence and update of the scalars ! ! zero fluxes at top and bottom + wdtn(:,1) = 0.0 + wdtn(:,nVertLevels+1) = 0.0 + - wdtn(:,1) = 0. - wdtn(:,nVertLevels+1) = 0. + do iCell=cellSolveStart,cellSolveEnd + + k = 2 + do iScalar=1,num_scalars + wdtn(iScalar,k) = wwAvg(k,iCell)*(fnm(k)*scalar_new(iScalar,k,iCell)+fnp(k)*scalar_new(iScalar,k-1,iCell)) + end do + +!DIR$ IVDEP + do k=3,nVertLevels-1 +!DIR$ IVDEP + do iScalar=1,num_scalars + wdtn(iScalar,k) = flux3( scalar_new(iScalar,k-2,iCell),scalar_new(iScalar,k-1,iCell), & + scalar_new(iScalar,k ,iCell),scalar_new(iScalar,k+1,iCell), & + wwAvg(k,iCell), coef_3rd_order ) + end do + end do + k = nVertLevels + do iScalar=1,num_scalars + wdtn(iScalar,k) = wwAvg(k,iCell)*(fnm(k)*scalar_new(iScalar,k,iCell)+fnp(k)*scalar_new(iScalar,k-1,iCell)) + end do if (local_advance_density) then - ! update density first - do iCell=1,nCellsSolve - do k=1,nVertLevels - rho_zz_int(k,iCell) = rho_zz_old(k,iCell) + dt*( rho_zz_int(k,iCell) - rdnw(k)*(wwAvg(k+1,iCell)-wwAvg(k,iCell)) ) +!DIR$ IVDEP + do k=1,nVertLevels + rho_zz_new_inv = 1.0_RKIND / rho_zz_int(k,iCell) +!DIR$ IVDEP + do iScalar=1,num_scalars + scalar_new(iScalar,k,iCell) = ( scalar_old(iScalar,k,iCell)*rho_zz_old(k,iCell) & + + dt*( scalar_tend(iScalar,k,iCell) -rdnw(k)*(wdtn(iScalar,k+1)-wdtn(iScalar,k)) ) ) * rho_zz_new_inv + end do + end do + else +!DIR$ IVDEP + do k=1,nVertLevels + rho_zz_new_inv = 1.0_RKIND / rho_zz_new(k,iCell) +!DIR$ IVDEP + do iScalar=1,num_scalars + scalar_new(iScalar,k,iCell) = ( scalar_old(iScalar,k,iCell)*rho_zz_old(k,iCell) & + + dt*( scalar_tend_save(iScalar,k,iCell) -rdnw(k)*(wdtn(iScalar,k+1)-wdtn(iScalar,k)) ) ) * rho_zz_new_inv end do end do end if - if (config_scalar_vadv_order == 2) then + end do - do iCell=1,nCellsSolve - do k = 2, nVertLevels - do iScalar=1,num_scalars - wdtn(iScalar,k) = wwAvg(k,iCell)*(fnm(k)*scalar_new(iScalar,k,iCell)+fnp(k)*scalar_new(iScalar,k-1,iCell)) + end subroutine atm_advance_scalars_work + + + subroutine atm_advance_scalars_work_new( num_scalars_dummy, nCells, nVertLevels_dummy, dt, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd, & + coef_3rd_order, scalar_old, scalar_new, rho_zz_old, rho_zz_new, kdiff, & + uhAvg, wwAvg, deriv_two, dvEdge, & + cellsOnEdge, edgesOnCell, nEdgesOnCell, edgesOnCell_sign, invAreaCell, & + scalar_tend_save, fnm, fnp, rdnw, meshScalingDel2, meshScalingDel4, & + nAdvCellsForEdge, advCellsForEdge, adv_coefs, adv_coefs_3rd, rho_edge, qv_init, zgrid, & + nCellsSolve, nEdges, horiz_flux_arr, rk_step, config_time_integration_order, & + advance_density, scalar_tend, rho_zz_int) + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! + ! Integrate scalar equations - explicit transport plus other tendencies + ! + ! this transport routine is similar to the original atm_advance_scalars, except it also advances + ! (re-integrates) the density. This re-integration allows the scalar transport routine to use a different + ! timestep than the dry dynamics, and also makes possible a spatial splitting of the scalar transport integration + ! (and density integration). The current integration is, however, not spatially split. + ! + ! WCS 18 November 2014 + !----------------------- + ! Input: s - current model state, + ! including tendencies from sources other than resolved transport. + ! grid - grid metadata + ! + ! input scalars in state are uncoupled (i.e. not mulitplied by density) + ! + ! Output: updated uncoupled scalars (scalars in state). + ! Note: scalar tendencies are also modified by this routine. + ! + ! This routine DOES NOT apply any positive definite or monotonic renormalizations. + ! + ! The transport scheme is from Skamarock and Gassmann MWR 2011. + ! + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + use mpas_atm_dimensions + + implicit none + + integer, intent(in) :: num_scalars_dummy ! for allocating stack variables + integer, intent(in) :: nCells ! for allocating stack variables + integer, intent(in) :: nVertLevels_dummy ! for allocating stack variables + real (kind=RKIND), intent(in) :: dt + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd + integer, intent(in) :: rk_step, config_time_integration_order + logical, intent(in) :: advance_density + real (kind=RKIND), dimension(:,:,:), intent(in) :: scalar_old + real (kind=RKIND), dimension(:,:,:), intent(inout) :: scalar_new + real (kind=RKIND), dimension(num_scalars,nVertLevels,nCells+1), intent(inout) :: scalar_tend_save + real (kind=RKIND), dimension(:,:,:), intent(in) :: deriv_two + real (kind=RKIND), dimension(:,:), intent(in) :: rho_zz_old + real (kind=RKIND), dimension(:,:), intent(in) :: uhAvg, wwAvg, rho_edge, zgrid, rho_zz_new, kdiff + real (kind=RKIND), dimension(:), intent(in) :: dvEdge, qv_init + integer, dimension(:,:), intent(in) :: cellsOnEdge + integer, dimension(:,:), intent(in) :: advCellsForEdge, edgesOnCell + integer, dimension(:), intent(in) :: nAdvCellsForEdge, nEdgesOnCell + real (kind=RKIND), dimension(:,:), intent(in) :: adv_coefs, adv_coefs_3rd, edgesOnCell_sign + real (kind=RKIND), dimension(:), intent(in) :: fnm, fnp, rdnw, meshScalingDel2, meshScalingDel4 + real (kind=RKIND), intent(in) :: coef_3rd_order + real (kind=RKIND), dimension(num_scalars,nVertLevels,nEdges+1), intent(inout) :: horiz_flux_arr + real (kind=RKIND), dimension(num_scalars,nVertLevels,nCells+1), intent(inout), optional :: scalar_tend + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(inout), optional :: rho_zz_int + real (kind=RKIND), dimension(:), intent(in) :: invAreaCell + integer, intent(in) :: nCellsSolve, nEdges + + integer :: i, j, iCell, iAdvCell, iEdge, k, iScalar, cell1, cell2 + real (kind=RKIND) :: rho_zz_new_inv + + real (kind=RKIND) :: scalar_weight + + real (kind=RKIND), dimension( num_scalars, nVertLevels + 1 ) :: wdtn + + real (kind=RKIND), dimension(nVertLevels,10) :: scalar_weight2 + integer, dimension(10) :: ica + + real (kind=RKIND) :: flux3, flux4 + real (kind=RKIND) :: q_im2, q_im1, q_i, q_ip1, ua, coef3 + + logical :: local_advance_density + + real (kind=RKIND) :: weight_time_old, weight_time_new + real (kind=RKIND), dimension(num_scalars,nVertLevels) :: scalar_tend_column ! local storage to accumulate tendency + + flux4(q_im2, q_im1, q_i, q_ip1, ua) = & + ua*( 7.*(q_i + q_im1) - (q_ip1 + q_im2) )/12.0 + + flux3(q_im2, q_im1, q_i, q_ip1, ua, coef3) = & + flux4(q_im2, q_im1, q_i, q_ip1, ua) + & + coef3*abs(ua)*((q_ip1 - q_im2)-3.*(q_i-q_im1))/12.0 + + local_advance_density = advance_density + + ! + ! Runge Kutta integration, so we compute fluxes from scalar_new values, update starts from scalar_old + ! + ! horizontal flux divergence, accumulate in scalar_tend + + + ! horiz_flux_arr stores the value of the scalar at the edge. + ! a better name perhaps would be scalarEdge + + ! weights for the time interpolation of the input density + ! + if (.not. advance_density ) then + weight_time_new = 1. + else + if((rk_step == 1) .and. config_time_integration_order == 3) weight_time_new = 1./3 + if((rk_step == 1) .and. config_time_integration_order == 2) weight_time_new = 1./2 + if(rk_step == 2) weight_time_new = 1./2 + if(rk_step == 3) weight_time_new = 1. + end if + weight_time_old = 1. - weight_time_new + + + do iEdge=edgeStart,edgeEnd + + select case(nAdvCellsForEdge(iEdge)) + + case(10) + + do j=1,10 +!DIR$ IVDEP + do k=1,nVertLevels + scalar_weight2(k,j) = adv_coefs(j,iEdge) + sign(1.0_RKIND,uhAvg(k,iEdge))*adv_coefs_3rd(j,iEdge) + end do + end do + do j=1,10 + ica(j) = advCellsForEdge(j,iEdge) + end do +!DIR$ IVDEP + do k = 1,nVertLevels +!DIR$ IVDEP + do iScalar = 1,num_scalars + horiz_flux_arr(iScalar,k,iEdge) = & + scalar_weight2(k,1) * scalar_new(iScalar,k,ica(1)) + & + scalar_weight2(k,2) * scalar_new(iScalar,k,ica(2)) + & + scalar_weight2(k,3) * scalar_new(iScalar,k,ica(3)) + & + scalar_weight2(k,4) * scalar_new(iScalar,k,ica(4)) + & + scalar_weight2(k,5) * scalar_new(iScalar,k,ica(5)) + & + scalar_weight2(k,6) * scalar_new(iScalar,k,ica(6)) + & + scalar_weight2(k,7) * scalar_new(iScalar,k,ica(7)) + & + scalar_weight2(k,8) * scalar_new(iScalar,k,ica(8)) + & + scalar_weight2(k,9) * scalar_new(iScalar,k,ica(9)) + & + scalar_weight2(k,10) * scalar_new(iScalar,k,ica(10)) end do end do + + case default + + horiz_flux_arr(:,:,iEdge) = 0.0 + do j=1,nAdvCellsForEdge(iEdge) + iAdvCell = advCellsForEdge(j,iEdge) +!DIR$ IVDEP + do k=1,nVertLevels + scalar_weight = adv_coefs(j,iEdge) + sign(1.0_RKIND,uhAvg(k,iEdge))*adv_coefs_3rd(j,iEdge) +!DIR$ IVDEP + do iScalar=1,num_scalars + horiz_flux_arr(iScalar,k,iEdge) = horiz_flux_arr(iScalar,k,iEdge) + scalar_weight * scalar_new(iScalar,k,iAdvCell) + end do + end do + end do + + end select + end do + +!$OMP BARRIER + +! scalar update, for each column sum fluxes over horizontal edges, add physics tendency, and add vertical flux divergence in update. + + + do iCell=cellSolveStart,cellSolveEnd +#ifndef DO_PHYSICS + scalar_tend_save(:,:,iCell) = 0.0 ! testing purposes - we have no sources or sinks +#endif + scalar_tend_column(1:num_scalars,1:nVertlevels) = 0. + + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) + + ! here we add the horizontal flux divergence into the scalar tendency. + ! note that the scalar tendency is modified. +!DIR$ IVDEP + do k=1,nVertLevels +!DIR$ IVDEP + do iScalar=1,num_scalars + scalar_tend_column(iScalar,k) = scalar_tend_column(iScalar,k) & + - edgesOnCell_sign(i,iCell) * uhAvg(k,iEdge)*horiz_flux_arr(iScalar,k,iEdge) + end do + end do + + end do + +!DIR$ IVDEP do k=1,nVertLevels +!DIR$ IVDEP do iScalar=1,num_scalars - scalar_new(iScalar,k,iCell) = ( scalar_old(iScalar,k,iCell)*rho_zz_old(k,iCell) & - + dt*( scalar_tend(iScalar,k,iCell) -rdnw(k)*(wdtn(iScalar,k+1)-wdtn(iScalar,k)) ) )/rho_zz_int(k,iCell) + scalar_tend_column(iScalar,k) = scalar_tend_column(iScalar,k) * invAreaCell(iCell) + scalar_tend_save(iScalar,k,iCell) end do end do + + + ! + ! vertical flux divergence and update of the scalars + ! + wdtn(:,1) = 0.0 + wdtn(:,nVertLevels+1) = 0.0 + + k = 2 + do iScalar=1,num_scalars + wdtn(iScalar,k) = wwAvg(k,iCell)*(fnm(k)*scalar_new(iScalar,k,iCell)+fnp(k)*scalar_new(iScalar,k-1,iCell)) end do + +!DIR$ IVDEP + do k=3,nVertLevels-1 +!DIR$ IVDEP + do iScalar=1,num_scalars + wdtn(iScalar,k) = flux3( scalar_new(iScalar,k-2,iCell),scalar_new(iScalar,k-1,iCell), & + scalar_new(iScalar,k ,iCell),scalar_new(iScalar,k+1,iCell), & + wwAvg(k,iCell), coef_3rd_order ) + end do + end do + k = nVertLevels + do iScalar=1,num_scalars + wdtn(iScalar,k) = wwAvg(k,iCell)*(fnm(k)*scalar_new(iScalar,k,iCell)+fnp(k)*scalar_new(iScalar,k-1,iCell)) + end do + +!DIR$ IVDEP + do k=1,nVertLevels + rho_zz_new_inv = 1.0_RKIND / (weight_time_old*rho_zz_old(k,iCell) + weight_time_new*rho_zz_new(k,iCell)) +!DIR$ IVDEP + do iScalar=1,num_scalars + scalar_new(iScalar,k,iCell) = ( scalar_old(iScalar,k,iCell)*rho_zz_old(k,iCell) & + + dt*( scalar_tend_column(iScalar,k) -rdnw(k)*(wdtn(iScalar,k+1)-wdtn(iScalar,k)) ) ) * rho_zz_new_inv + end do + end do + + end do + + end subroutine atm_advance_scalars_work_new + + + subroutine atm_advance_scalars_mono(block, tend, state, diag, mesh, configs, nCells, nEdges, nVertLevels_dummy, dt, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd, & + scalar_old, scalar_new, s_max, s_min, wdtn, scale_arr, flux_arr, & + flux_upwind_tmp, flux_tmp, advance_density, rho_zz_int) + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! + ! Integrate scalar equations - transport plus other tendencies + ! + ! wrapper routine for atm_advance_scalars_mono_work + ! + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - else if ( config_scalar_vadv_order == 3 ) then + use mpas_atm_dimensions - do iCell=1,nCellsSolve + implicit none - k = 2 - do iScalar=1,num_scalars - wdtn(iScalar,k) = wwAvg(k,iCell)*(fnm(k)*scalar_new(iScalar,k,iCell)+fnp(k)*scalar_new(iScalar,k-1,iCell)) - end do - - do k=3,nVertLevels-1 - do iScalar=1,num_scalars - wdtn(iScalar,k) = flux3( scalar_new(iScalar,k-2,iCell),scalar_new(iScalar,k-1,iCell), & - scalar_new(iScalar,k ,iCell),scalar_new(iScalar,k+1,iCell), & - wwAvg(k,iCell), coef_3rd_order ) - end do - end do - k = nVertLevels - do iScalar=1,num_scalars - wdtn(iScalar,k) = wwAvg(k,iCell)*(fnm(k)*scalar_new(iScalar,k,iCell)+fnp(k)*scalar_new(iScalar,k-1,iCell)) - end do + type (block_type), intent(inout), target :: block + type (mpas_pool_type), intent(in) :: tend + type (mpas_pool_type), intent(inout) :: state + type (mpas_pool_type), intent(in) :: diag + type (mpas_pool_type), intent(in) :: mesh + type (mpas_pool_type), intent(in) :: configs + integer, intent(in) :: nCells ! for allocating stack variables + integer, intent(in) :: nEdges ! for allocating stack variables + integer, intent(in) :: nVertLevels_dummy ! for allocating stack variables + real (kind=RKIND), intent(in) :: dt + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(inout) :: scalar_old, scalar_new + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(inout) :: s_max, s_min + real (kind=RKIND), dimension(nVertLevels+1,nCells+1), intent(inout) :: wdtn + real (kind=RKIND), dimension(nVertLevels,2,nCells+1), intent(inout) :: scale_arr + real (kind=RKIND), dimension(nVertLevels,nEdges+1), intent(inout) :: flux_arr + real (kind=RKIND), dimension(nVertLevels,nEdges+1), intent(inout) :: flux_upwind_tmp, flux_tmp + logical, intent(in), optional :: advance_density + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(inout), optional :: rho_zz_int - do k=1,nVertLevels - rho_zz_new_inv = 1.0_RKIND / rho_zz_int(k,iCell) - do iScalar=1,num_scalars - scalar_new(iScalar,k,iCell) = ( scalar_old(iScalar,k,iCell)*rho_zz_old(k,iCell) & - + dt*( scalar_tend(iScalar,k,iCell) -rdnw(k)*(wdtn(iScalar,k+1)-wdtn(iScalar,k)) ) ) * rho_zz_new_inv - end do - end do + real (kind=RKIND), dimension(:,:,:), pointer :: scalar_tend + real (kind=RKIND), dimension(:,:), pointer :: uhAvg, rho_zz_old, rho_zz_new, wwAvg + real (kind=RKIND), dimension(:), pointer :: dvEdge, invAreaCell + integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnCell, edgesOnCell + real (kind=RKIND), dimension(:,:), pointer :: edgesOnCell_sign - end do + integer, dimension(:,:), pointer :: advCellsForEdge + integer, dimension(:), pointer :: nAdvCellsForEdge + real (kind=RKIND), dimension(:,:), pointer :: adv_coefs, adv_coefs_3rd + real (kind=RKIND), dimension(:,:,:), pointer :: scalars_old, scalars_new - else if ( config_scalar_vadv_order == 4 ) then + integer, pointer :: nCellsSolve - do iCell=1,nCellsSolve + real (kind=RKIND), dimension(:), pointer :: fnm, fnp, rdnw + integer, dimension(:), pointer :: nEdgesOnCell + real (kind=RKIND), pointer :: coef_3rd_order - k = 2 - do iScalar=1,num_scalars - wdtn(iScalar,k) = wwAvg(k,iCell)*(fnm(k)*scalar_new(iScalar,k,iCell)+fnp(k)*scalar_new(iScalar,k-1,iCell)) - end do - do k=3,nVertLevels-1 - do iScalar=1,num_scalars - wdtn(iScalar,k) = flux4( scalar_new(iScalar,k-2,iCell),scalar_new(iScalar,k-1,iCell), & - scalar_new(iScalar,k ,iCell),scalar_new(iScalar,k+1,iCell), wwAvg(k,iCell) ) - end do - end do - k = nVertLevels - do iScalar=1,num_scalars - wdtn(iScalar,k) = wwAvg(k,iCell)*(fnm(k)*scalar_new(iScalar,k,iCell)+fnp(k)*scalar_new(iScalar,k-1,iCell)) - end do + call mpas_pool_get_config(configs, 'config_coef_3rd_order', coef_3rd_order) - do k=1,nVertLevels - do iScalar=1,num_scalars - scalar_new(iScalar,k,iCell) = ( scalar_old(iScalar,k,iCell)*rho_zz_old(k,iCell) & - + dt*( scalar_tend(iScalar,k,iCell) -rdnw(k)*(wdtn(iScalar,k+1)-wdtn(iScalar,k)) ) )/rho_zz_int(k,iCell) - end do - end do + call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) - end do - - else + call mpas_pool_get_array(diag, 'ruAvg', uhAvg) + call mpas_pool_get_array(diag, 'wwAvg', wwAvg) - write(0,*) ' bad value for config_scalar_vadv_order - ', config_scalar_vadv_order + call mpas_pool_get_array(tend, 'scalars_tend', scalar_tend) - end if + call mpas_pool_get_array(state, 'rho_zz', rho_zz_old, 1) + call mpas_pool_get_array(state, 'rho_zz', rho_zz_new, 2) + call mpas_pool_get_array(state, 'scalars', scalars_old, 1) + call mpas_pool_get_array(state, 'scalars', scalars_new, 2) - if (local_advance_density) then - scalar_tend(:,:,1:nCells) = scalar_tend_save(:,:,1:nCells) - deallocate(rho_zz_int) - deallocate(scalar_tend_save) - end if + call mpas_pool_get_array(mesh, 'invAreaCell', invAreaCell) + call mpas_pool_get_array(mesh, 'dvEdge', dvEdge) + call mpas_pool_get_array(mesh, 'cellsOnEdge', cellsOnEdge) + call mpas_pool_get_array(mesh, 'cellsOnCell', cellsOnCell) + call mpas_pool_get_array(mesh, 'edgesOnCell', edgesOnCell) + call mpas_pool_get_array(mesh, 'edgesOnCell_sign', edgesOnCell_sign) + call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) + call mpas_pool_get_array(mesh, 'fzm', fnm) + call mpas_pool_get_array(mesh, 'fzp', fnp) + call mpas_pool_get_array(mesh, 'rdzw', rdnw) + call mpas_pool_get_array(mesh, 'nAdvCellsForEdge', nAdvCellsForEdge) + call mpas_pool_get_array(mesh, 'advCellsForEdge', advCellsForEdge) + call mpas_pool_get_array(mesh, 'adv_coefs', adv_coefs) + call mpas_pool_get_array(mesh, 'adv_coefs_3rd', adv_coefs_3rd) - end subroutine atm_advance_scalars + call atm_advance_scalars_mono_work(block, state, nCells, nEdges, nVertLevels, dt, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd, & + coef_3rd_order, nCellsSolve, num_scalars, uhAvg, wwAvg, scalar_tend, rho_zz_old, & + rho_zz_new, scalars_old, scalars_new, invAreaCell, dvEdge, cellsOnEdge, cellsOnCell, & + edgesOnCell, edgesOnCell_sign, nEdgesOnCell, fnm, fnp, rdnw, nAdvCellsForEdge, & + advCellsForEdge, adv_coefs, adv_coefs_3rd, scalar_old, scalar_new, s_max, s_min, & + wdtn, scale_arr, flux_arr, flux_upwind_tmp, flux_tmp, & + advance_density, rho_zz_int) + + end subroutine atm_advance_scalars_mono -!------------------------------------------------ - subroutine atm_advance_scalars_mono(block, tend, state, diag, mesh, configs, nCells, nEdges, nVertLevels, dt, advance_density) + subroutine atm_advance_scalars_mono_work(block, state, nCells, nEdges, nVertLevels_dummy, dt, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd, & + coef_3rd_order, nCellsSolve, num_scalars_dummy, uhAvg, wwAvg, scalar_tend, rho_zz_old, & + rho_zz_new, scalars_old, scalars_new, invAreaCell, dvEdge, cellsOnEdge, cellsOnCell, & + edgesOnCell, edgesOnCell_sign, nEdgesOnCell, fnm, fnp, rdnw, nAdvCellsForEdge, & + advCellsForEdge, adv_coefs, adv_coefs_3rd, scalar_old, scalar_new, s_max, s_min, & + wdtn, scale_arr, flux_arr, flux_upwind_tmp, flux_tmp, & + advance_density, rho_zz_int) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! ! Integrate scalar equations - transport plus other tendencies ! - ! this transport routine is similar to the original atm_advance_scalars_mono, except it also advances + ! this transport routine is similar to the original atm_advance_scalars_mono_work, except it also advances ! (re-integrates) the density. This re-integration allows the scalar transport routine to use a different ! timestep than the dry dynamics, and also makes possible a spatial splitting of the scalar transport integration ! (and density integration). The current integration is, however, not spatially split. @@ -1920,19 +3305,20 @@ subroutine atm_advance_scalars_mono(block, tend, state, diag, mesh, configs, nCe ! as used in the RK3 scheme as described in Wang et al MWR 2009 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + use mpas_atm_dimensions + implicit none type (block_type), intent(inout), target :: block - type (mpas_pool_type), intent(in) :: tend type (mpas_pool_type), intent(inout) :: state - type (mpas_pool_type), intent(in) :: diag - type (mpas_pool_type), intent(in) :: mesh - type (mpas_pool_type), intent(in) :: configs integer, intent(in) :: nCells ! for allocating stack variables integer, intent(in) :: nEdges ! for allocating stack variables - integer, intent(in) :: nVertLevels ! for allocating stack variables + integer, intent(in) :: nVertLevels_dummy ! for allocating stack variables real (kind=RKIND), intent(in) :: dt + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd logical, intent(in), optional :: advance_density + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(inout), optional :: rho_zz_int integer :: ii,jj integer, dimension(10) :: ica @@ -1940,51 +3326,49 @@ subroutine atm_advance_scalars_mono(block, tend, state, diag, mesh, configs, nCe integer :: i, iCell, iEdge, k, iScalar, cell1, cell2 real (kind=RKIND) :: flux, scalar_weight - real (kind=RKIND) :: f1, f2 - real (kind=RKIND), dimension(:,:,:), pointer :: scalar_tend - real (kind=RKIND), dimension(:,:,:), pointer :: deriv_two - real (kind=RKIND), dimension(:,:), pointer :: uhAvg, rho_zz_old, rho_zz_new, wwAvg, rho_edge, rho_zz, zgrid, kdiff - real (kind=RKIND), dimension(:), pointer :: dvEdge, dcEdge, areaCell, qv_init - integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnCell - - integer, dimension(:,:), pointer :: advCellsForEdge - integer, dimension(:), pointer :: nAdvCellsForEdge - real (kind=RKIND), dimension(:,:), pointer :: adv_coefs, adv_coefs_3rd - real (kind=RKIND), dimension(:,:,:), pointer :: scalars_old, scalars_new + real (kind=RKIND), dimension(num_scalars,nVertLevels,nCells+1), intent(inout) :: scalar_tend + real (kind=RKIND), dimension(nVertLevels,nEdges+1), intent(in) :: uhAvg + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rho_zz_old, rho_zz_new + real (kind=RKIND), dimension(nVertLevels+1,nCells+1), intent(in) :: wwAvg + real (kind=RKIND), dimension(:), intent(in) :: dvEdge, invAreaCell + integer, dimension(:,:), intent(in) :: cellsOnEdge, cellsOnCell, edgesOnCell + real (kind=RKIND), dimension(:,:), intent(in) :: edgesOnCell_sign + + integer, dimension(:,:), intent(in) :: advCellsForEdge + integer, dimension(:), intent(in) :: nAdvCellsForEdge + real (kind=RKIND), dimension(:,:), intent(in) :: adv_coefs, adv_coefs_3rd + real (kind=RKIND), dimension(num_scalars,nVertLevels,nCells+1), intent(inout) :: scalars_old, scalars_new + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(inout) :: scalar_old, scalar_new + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(inout) :: s_max, s_min + real (kind=RKIND), dimension(nVertLevels+1,nCells+1), intent(inout) :: wdtn + real (kind=RKIND), dimension(nVertLevels,2,nCells+1), intent(inout), target :: scale_arr + real (kind=RKIND), dimension(nVertLevels,nEdges+1), intent(inout) :: flux_arr + real (kind=RKIND), dimension(nVertLevels,nEdges+1), intent(inout) :: flux_upwind_tmp, flux_tmp type (field3DReal), pointer :: scalars_old_field type (field3DReal), pointer :: tempField type (field3DReal), target :: tempFieldTarget - real (kind=RKIND), dimension( nVertLevels, nCells ) :: scalar_old, scalar_new - real (kind=RKIND), dimension( nVertLevels, nCells ) :: s_max, s_min - real (kind=RKIND), dimension( 2, nVertLevels, nCells ), target :: scale_arr - real (kind=RKIND), dimension(:,:), pointer :: rho_zz_int integer, parameter :: SCALE_IN = 1, SCALE_OUT = 2 - real (kind=RKIND), dimension( nVertLevels, nEdges ) :: flux_arr - real (kind=RKIND), dimension( nVertLevels + 1, nCells ) :: wdtn - - integer, pointer :: nCellsSolve, num_scalars + integer, intent(in) :: nCellsSolve, num_scalars_dummy integer :: icellmax, kmax - real (kind=RKIND), dimension(:), pointer :: fnm, fnp, rdnw, meshScalingDel2, meshScalingDel4 - integer, dimension(:), pointer :: nEdgesOnCell - real (kind=RKIND), pointer :: coef_3rd_order + real (kind=RKIND), dimension(nVertLevels), intent(in) :: fnm, fnp, rdnw + integer, dimension(:), intent(in) :: nEdgesOnCell + real (kind=RKIND), intent(in) :: coef_3rd_order - real (kind=RKIND), pointer :: h_theta_eddy_visc2, v_theta_eddy_visc2 + real (kind=RKIND), dimension(nVertLevels) :: flux_upwind_arr real (kind=RKIND) :: flux3, flux4, flux_upwind real (kind=RKIND) :: q_im2, q_im1, q_i, q_ip1, ua, coef3, scmin,scmax - real (kind=RKIND) :: s_min_update, s_max_update, s_upwind, scale_factor + real (kind=RKIND) :: scale_factor logical :: local_advance_density - integer, parameter :: hadv_opt = 2 real (kind=RKIND), parameter :: eps=1.e-20 - logical, parameter :: debug_print = .false. flux4(q_im2, q_im1, q_i, q_ip1, ua) = & ua*( 7.*(q_i + q_im1) - (q_ip1 + q_im2) )/12.0 @@ -1999,140 +3383,116 @@ subroutine atm_advance_scalars_mono(block, tend, state, diag, mesh, configs, nCe local_advance_density = .true. end if - call mpas_pool_get_config(configs, 'config_coef_3rd_order', coef_3rd_order) - call mpas_pool_get_config(configs, 'config_h_theta_eddy_visc2', h_theta_eddy_visc2) - call mpas_pool_get_config(configs, 'config_v_theta_eddy_visc2', v_theta_eddy_visc2) - - call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) - call mpas_pool_get_dimension(state, 'num_scalars', num_scalars) - - call mpas_pool_get_array(diag, 'kdiff', kdiff) - call mpas_pool_get_array(diag, 'ruAvg', uhAvg) - call mpas_pool_get_array(diag, 'wwAvg', wwAvg) - call mpas_pool_get_array(diag, 'rho_edge', rho_edge) - - call mpas_pool_get_array(tend, 'scalars_tend', scalar_tend) - - call mpas_pool_get_array(state, 'rho_zz', rho_zz_old, 1) - call mpas_pool_get_array(state, 'rho_zz', rho_zz_new, 2) - call mpas_pool_get_array(state, 'rho_zz', rho_zz, 2) - call mpas_pool_get_array(state, 'scalars', scalars_old, 1) - call mpas_pool_get_array(state, 'scalars', scalars_new, 2) call mpas_pool_get_field(state, 'scalars', scalars_old_field, 1) - call mpas_pool_get_array(mesh, 'areaCell', areaCell) - call mpas_pool_get_array(mesh, 'deriv_two', deriv_two) - call mpas_pool_get_array(mesh, 'dvEdge', dvEdge) - call mpas_pool_get_array(mesh, 'dcEdge', dcEdge) - call mpas_pool_get_array(mesh, 'cellsOnEdge', cellsOnEdge) - call mpas_pool_get_array(mesh, 'cellsOnCell', cellsOnCell) - call mpas_pool_get_array(mesh, 'fzm', fnm) - call mpas_pool_get_array(mesh, 'fzp', fnp) - call mpas_pool_get_array(mesh, 'rdzw', rdnw) - call mpas_pool_get_array(mesh, 'meshScalingDel2', meshScalingDel2) - call mpas_pool_get_array(mesh, 'meshScalingDel4', meshScalingDel4) - call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) - call mpas_pool_get_array(mesh, 'nAdvCellsForEdge', nAdvCellsForEdge) - call mpas_pool_get_array(mesh, 'advCellsForEdge', advCellsForEdge) - call mpas_pool_get_array(mesh, 'adv_coefs', adv_coefs) - call mpas_pool_get_array(mesh, 'adv_coefs_3rd', adv_coefs_3rd) - call mpas_pool_get_array(mesh, 'qv_init', qv_init) - call mpas_pool_get_array(mesh, 'zgrid', zgrid) - -#ifndef DO_PHYSICS - scalar_tend = 0. ! testing purposes - we have no sources or sinks -#endif - ! for positive-definite or monotonic option, we first update scalars using the tendency from sources other than ! the resolved transport (these should constitute a positive definite update). ! Note, however, that we enforce positive-definiteness in this update. ! The transport will maintain this positive definite solution and optionally, shape preservation (monotonicity). - do iCell = 1, nCellsSolve - do k = 1, nVertLevels + do iCell=cellSolveStart,cellSolveEnd +!DIR$ IVDEP + do k = 1,nVertLevels +!DIR$ IVDEP do iScalar = 1,num_scalars + +#ifndef DO_PHYSICS +!TBH: Michael, would you please check this change? Our test uses -DDO_PHYSICS +!TBH: so this code is not executed. The change avoids redundant work. + scalar_tend(iScalar,k,iCell) = 0.0 ! testing purposes - we have no sources or sinks +#endif scalars_old(iScalar,k,iCell) = scalars_old(iScalar,k,iCell)+dt*scalar_tend(iScalar,k,iCell) / rho_zz_old(k,iCell) - scalar_tend(iScalar,k,iCell) = 0. + scalar_tend(iScalar,k,iCell) = 0.0 end do end do end do - ! halo exchange +!$OMP BARRIER +!$OMP MASTER call mpas_dmpar_exch_halo_field(scalars_old_field) +!$OMP END MASTER +!$OMP BARRIER ! ! Runge Kutta integration, so we compute fluxes from scalar_new values, update starts from scalar_old ! if (local_advance_density) then + if (.not.present(rho_zz_int)) then + call mpas_dmpar_global_abort('Error: rho_zz_int not supplied to atm_advance_scalars_mono_work( ) when advance_density=.true.') + end if + ! begin with update of density - allocate(rho_zz_int(nVertLevels,nCells)) - rho_zz_int(:,:) = 0.0 - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then ! only for owned cells + do iCell=cellStart,cellEnd + rho_zz_int(:,iCell) = 0.0 + end do +!$OMP BARRIER + do iCell=cellSolveStart,cellSolveEnd + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) + +!DIR$ IVDEP do k=1,nVertLevels - rho_zz_int(k,cell1) = rho_zz_int(k,cell1) - uhAvg(k,iEdge)*dvEdge(iEdge)/areaCell(cell1) - rho_zz_int(k,cell2) = rho_zz_int(k,cell2) + uhAvg(k,iEdge)*dvEdge(iEdge)/areaCell(cell2) + rho_zz_int(k,iCell) = rho_zz_int(k,iCell) - edgesOnCell_sign(i,iCell) * uhAvg(k,iEdge) * dvEdge(iEdge) * invAreaCell(iCell) end do - end if + + end do end do - do iCell=1,nCellsSolve + do iCell=cellSolveStart,cellSolveEnd +!DIR$ IVDEP do k=1,nVertLevels rho_zz_int(k,iCell) = rho_zz_old(k,iCell) + dt*( rho_zz_int(k,iCell) - rdnw(k)*(wwAvg(k+1,iCell)-wwAvg(k,iCell)) ) end do end do - else - rho_zz_int => rho_zz_new +!$OMP BARRIER end if ! next, do one scalar at a time do iScalar = 1, num_scalars -! write(0,*) ' mono transport for scalar ',iScalar - do iCell = 1, nCells - do k = 1, nVertLevels - scalar_old(k,iCell) = scalars_old(iScalar,k,iCell) - scalar_new(k,iCell) = scalars_new(iScalar,k,iCell) - end do + do iCell=cellStart,cellEnd +!DIR$ IVDEP + do k=1,nVertLevels + scalar_old(k,iCell) = scalars_old(iScalar,k,iCell) + scalar_new(k,iCell) = scalars_new(iScalar,k,iCell) + end do end do - if (debug_print) then - scmin = scalar_old(1,1) - scmax = scalar_old(1,1) - do iCell = 1, nCells - do k=1, nVertLevels - scmin = min(scmin,scalar_old(k,iCell)) - scmax = max(scmax,scalar_old(k,iCell)) - end do - end do - write(0,*) ' scmin, scmin old in ',scmin,scmax +!$OMP BARRIER - scmin = scalar_new(1,1) - scmax = scalar_new(1,1) - do iCell = 1, nCells - do k=1, nVertLevels - scmin = min(scmin,scalar_new(k,iCell)) - scmax = max(scmax,scalar_new(k,iCell)) - end do - end do - write(0,*) ' scmin, scmin new in ',scmin,scmax - end if +#ifdef DEBUG_TRANSPORT + scmin = scalar_old(1,1) + scmax = scalar_old(1,1) + do iCell = 1, nCells + do k=1, nVertLevels + scmin = min(scmin,scalar_old(k,iCell)) + scmax = max(scmax,scalar_old(k,iCell)) + end do + end do + write(0,*) ' scmin, scmin old in ',scmin,scmax + scmin = scalar_new(1,1) + scmax = scalar_new(1,1) + do iCell = 1, nCells + do k=1, nVertLevels + scmin = min(scmin,scalar_new(k,iCell)) + scmax = max(scmax,scalar_new(k,iCell)) + end do + end do + write(0,*) ' scmin, scmin new in ',scmin,scmax +#endif - ! - ! vertical flux divergence, and min and max bounds for flux limiter - ! - - do iCell=1,nCellsSolve + ! + ! vertical flux divergence, and min and max bounds for flux limiter + ! + do iCell=cellSolveStart,cellSolveEnd ! zero flux at top and bottom - wdtn(1,iCell) = 0. - wdtn(nVertLevels+1,iCell) = 0. + wdtn(1,iCell) = 0.0 + wdtn(nVertLevels+1,iCell) = 0.0 k = 1 s_max(k,iCell) = max(scalar_old(1,iCell),scalar_old(2,iCell)) @@ -2143,6 +3503,7 @@ subroutine atm_advance_scalars_mono(block, tend, state, diag, mesh, configs, nCe s_max(k,iCell) = max(scalar_old(k-1,iCell),scalar_old(k,iCell),scalar_old(k+1,iCell)) s_min(k,iCell) = min(scalar_old(k-1,iCell),scalar_old(k,iCell),scalar_old(k+1,iCell)) +!DIR$ IVDEP do k=3,nVertLevels-1 wdtn(k,iCell) = flux3( scalar_new(k-2,iCell),scalar_new(k-1,iCell), & scalar_new(k ,iCell),scalar_new(k+1,iCell), & @@ -2156,12 +3517,15 @@ subroutine atm_advance_scalars_mono(block, tend, state, diag, mesh, configs, nCe s_max(k,iCell) = max(scalar_old(k,iCell),scalar_old(k-1,iCell)) s_min(k,iCell) = min(scalar_old(k,iCell),scalar_old(k-1,iCell)) - ! pull s_min and s_max from the (horizontal) surrounding cells + ! + ! pull s_min and s_max from the (horizontal) surrounding cells + ! ! speclal treatment of calculations involving hexagonal cells ! original code retained in select "default" case select case(nEdgesOnCell(iCell)) case(6) +!DIR$ IVDEP do k=1, nVertLevels s_max(k,iCell) = max(s_max(k,iCell), & scalar_old(k, cellsOnCell(1,iCell)), & @@ -2181,6 +3545,7 @@ subroutine atm_advance_scalars_mono(block, tend, state, diag, mesh, configs, nCe case default do i=1, nEdgesOnCell(iCell) +!DIR$ IVDEP do k=1, nVertLevels s_max(k,iCell) = max(s_max(k,iCell),scalar_old(k, cellsOnCell(i,iCell))) s_min(k,iCell) = min(s_min(k,iCell),scalar_old(k, cellsOnCell(i,iCell))) @@ -2190,11 +3555,13 @@ subroutine atm_advance_scalars_mono(block, tend, state, diag, mesh, configs, nCe end do - ! - ! horizontal flux divergence +!$OMP BARRIER + + ! + ! horizontal flux divergence + ! - !flux_arr(:,:) = 0. ! Now only initialized as needed (see default case) - do iEdge=1,nEdges + do iEdge=edgeStart,edgeEnd cell1 = cellsOnEdge(1,iEdge) cell2 = cellsOnEdge(2,iEdge) @@ -2202,15 +3569,14 @@ subroutine atm_advance_scalars_mono(block, tend, state, diag, mesh, configs, nCe if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then ! only for owned cells ! speclal treatment of calculations involving edges between hexagonal cells - ! also took advantage of fact that coef_3rd_order is never negative ! original code retained in select "default" case ! be sure to see additional declarations near top of subroutine select case(nAdvCellsForEdge(iEdge)) case(10) do jj=1,10 ica(jj) = advCellsForEdge(jj,iEdge) - swa(jj,1) = adv_coefs(jj,iEdge) + coef_3rd_order*adv_coefs_3rd(jj,iEdge) - swa(jj,2) = adv_coefs(jj,iEdge) - coef_3rd_order*adv_coefs_3rd(jj,iEdge) + swa(jj,1) = adv_coefs(jj,iEdge) + adv_coefs_3rd(jj,iEdge) + swa(jj,2) = adv_coefs(jj,iEdge) - adv_coefs_3rd(jj,iEdge) enddo do k=1,nVertLevels ii = merge(1, 2, uhAvg(k,iEdge) > 0) @@ -2228,8 +3594,9 @@ subroutine atm_advance_scalars_mono(block, tend, state, diag, mesh, configs, nCe enddo do i=1,nAdvCellsForEdge(iEdge) iCell = advCellsForEdge(i,iEdge) +!DIR$ IVDEP do k=1,nVertLevels - scalar_weight = uhAvg(k,iEdge)*(adv_coefs(i,iEdge) + coef_3rd_order*sign(1.0_RKIND,uhAvg(k,iEdge))*adv_coefs_3rd(i,iEdge)) + scalar_weight = uhAvg(k,iEdge)*(adv_coefs(i,iEdge) + sign(1.0_RKIND,uhAvg(k,iEdge))*adv_coefs_3rd(i,iEdge)) flux_arr(k,iEdge) = flux_arr(k,iEdge) + scalar_weight* scalar_new(k,iCell) end do end do @@ -2239,183 +3606,250 @@ subroutine atm_advance_scalars_mono(block, tend, state, diag, mesh, configs, nCe end do -! vertical flux divergence for upwind update, we will put upwind update into scalar_new, and put factor of dt in fluxes +!$OMP BARRIER - do iCell = 1, nCellsSolve + ! + ! vertical flux divergence for upwind update, we will put upwind update into scalar_new, and put factor of dt in fluxes + ! + + do iCell=cellSolveStart,cellSolveEnd k = 1 - scalar_new(k,iCell) = scalar_old(k,iCell)*rho_zz_old(k,iCell) + scalar_new(k,iCell) = scalar_old(k,iCell) * rho_zz_old(k,iCell) +!DIR$ IVDEP do k = 2, nVertLevels scalar_new(k,iCell) = scalar_old(k,iCell)*rho_zz_old(k,iCell) - flux_upwind = dt*(max(0.0_RKIND,wwAvg(k,iCell))*scalar_old(k-1,iCell) + min(0.0_RKIND,wwAvg(k,iCell))*scalar_old(k,iCell)) - scalar_new(k-1,iCell) = scalar_new(k-1,iCell) - flux_upwind*rdnw(k-1) - scalar_new(k ,iCell) = scalar_new(k ,iCell) + flux_upwind*rdnw(k) - wdtn(k,iCell) = dt*wdtn(k,iCell) - flux_upwind + flux_upwind_arr(k) = dt*(max(0.0_RKIND,wwAvg(k,iCell))*scalar_old(k-1,iCell) + min(0.0_RKIND,wwAvg(k,iCell))*scalar_old(k,iCell)) + end do + do k = 1, nVertLevels-1 + scalar_new(k,iCell) = scalar_new(k,iCell) - flux_upwind_arr(k+1)*rdnw(k) + end do +!DIR$ IVDEP + do k = 2, nVertLevels + scalar_new(k ,iCell) = scalar_new(k ,iCell) + flux_upwind_arr(k)*rdnw(k) + wdtn(k,iCell) = dt*wdtn(k,iCell) - flux_upwind_arr(k) end do -! scale_arr(SCALE_IN,:,:) and scale_arr(SCALE_OUT:,:) are used here to store the incoming and outgoing perturbation flux -! contributions to the update: first the vertical flux component, then the horizontal + ! + ! scale_arr(SCALE_IN,:,:) and scale_arr(SCALE_OUT:,:) are used here to store the incoming and outgoing perturbation flux + ! contributions to the update: first the vertical flux component, then the horizontal + ! + +!DIR$ IVDEP do k=1,nVertLevels - scale_arr(SCALE_IN, k,iCell) = - rdnw(k)*(min(0.0_RKIND,wdtn(k+1,iCell))-max(0.0_RKIND,wdtn(k,iCell))) - scale_arr(SCALE_OUT,k,iCell) = - rdnw(k)*(max(0.0_RKIND,wdtn(k+1,iCell))-min(0.0_RKIND,wdtn(k,iCell))) + scale_arr(k,SCALE_IN, iCell) = - rdnw(k)*(min(0.0_RKIND,wdtn(k+1,iCell))-max(0.0_RKIND,wdtn(k,iCell))) + scale_arr(k,SCALE_OUT,iCell) = - rdnw(k)*(max(0.0_RKIND,wdtn(k+1,iCell))-min(0.0_RKIND,wdtn(k,iCell))) end do end do -! horizontal flux divergence for upwind update + ! + ! horizontal flux divergence for upwind update + ! ! upwind flux computation - - ! Precompute the flux_arr/areaCell before updating scale_arr - do iEdge=1,nEdges + do iEdge=edgeStart,edgeEnd cell1 = cellsOnEdge(1,iEdge) cell2 = cellsOnEdge(2,iEdge) - if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then ! only for owned cells - do k=1, nVertLevels - flux_upwind = dvEdge(iEdge) * dt * & - (max(0.0_RKIND,uhAvg(k,iEdge))*scalar_old(k,cell1) + min(0.0_RKIND,uhAvg(k,iEdge))*scalar_old(k,cell2)) - flux_arr(k,iEdge) = dt*flux_arr(k,iEdge) - flux_upwind - scalar_new(k,cell1) = scalar_new(k,cell1) - flux_upwind / areaCell(cell1) - scalar_new(k,cell2) = scalar_new(k,cell2) + flux_upwind / areaCell(cell2) - - f1 = flux_arr(k,iEdge) / areaCell(cell1) - scale_arr(SCALE_OUT,k,cell1) = scale_arr(SCALE_OUT,k,cell1) - max(0.0_RKIND,f1) - scale_arr(SCALE_IN, k,cell1) = scale_arr(SCALE_IN, k,cell1) - min(0.0_RKIND,f1) - - f2 = flux_arr(k,iEdge) / areaCell(cell2) - scale_arr(SCALE_OUT,k,cell2) = scale_arr(SCALE_OUT,k,cell2) + min(0.0_RKIND,f2) - scale_arr(SCALE_IN, k,cell2) = scale_arr(SCALE_IN, k,cell2) + max(0.0_RKIND,f2) - - ! scale_arr(SCALE_OUT,k,cell1) = scale_arr(SCALE_OUT,k,cell1) - max(0.0_RKIND,flux_arr(k,iEdge)) / areaCell(cell1) - ! scale_arr(SCALE_IN, k,cell1) = scale_arr(SCALE_IN, k,cell1) - min(0.0_RKIND,flux_arr(k,iEdge)) / areaCell(cell1) - ! scale_arr(SCALE_OUT,k,cell2) = scale_arr(SCALE_OUT,k,cell2) + min(0.0_RKIND,flux_arr(k,iEdge)) / areaCell(cell2) - ! scale_arr(SCALE_IN, k,cell2) = scale_arr(SCALE_IN, k,cell2) + max(0.0_RKIND,flux_arr(k,iEdge)) / areaCell(cell2) +!DIR$ IVDEP + do k=1, nVertLevels + flux_upwind_tmp(k,iEdge) = dvEdge(iEdge) * dt * & + (max(0.0_RKIND,uhAvg(k,iEdge))*scalar_old(k,cell1) + min(0.0_RKIND,uhAvg(k,iEdge))*scalar_old(k,cell2)) + flux_tmp(k,iEdge) = dt * flux_arr(k,iEdge) - flux_upwind_tmp(k,iEdge) + end do + end do +!$OMP BARRIER + do iCell=cellSolveStart,cellSolveEnd + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) +!DIR$ IVDEP + do k=1, nVertLevels + scalar_new(k,iCell) = scalar_new(k,iCell) - edgesOnCell_sign(i,iCell) * flux_upwind_tmp(k,iEdge) * invAreaCell(iCell) + + scale_arr(k,SCALE_OUT,iCell) = scale_arr(k,SCALE_OUT,iCell) & + - max(0.0_RKIND,edgesOnCell_sign(i,iCell)*flux_tmp(k,iEdge)) * invAreaCell(iCell) + scale_arr(k,SCALE_IN, iCell) = scale_arr(k,SCALE_IN, iCell) & + - min(0.0_RKIND,edgesOnCell_sign(i,iCell)*flux_tmp(k,iEdge)) * invAreaCell(iCell) end do - end if + + end do end do -! next, the limiter + ! + ! next, the limiter + ! ! simplification of limiter calculations ! worked through algebra and found equivalent form ! added benefit that it should address ifort single prec overflow issue - do iCell = 1, nCellsSolve + if (local_advance_density) then + do iCell=cellSolveStart,cellSolveEnd +!DIR$ IVDEP do k = 1, nVertLevels scale_factor = (s_max(k,iCell)*rho_zz_int(k,iCell) - scalar_new(k,iCell)) / & - (scale_arr(SCALE_IN,k,iCell) + eps) - scale_arr(SCALE_IN,k,iCell) = min( 1.0_RKIND, max( 0.0_RKIND, scale_factor) ) + (scale_arr(k,SCALE_IN,iCell) + eps) + scale_arr(k,SCALE_IN,iCell) = min( 1.0_RKIND, max( 0.0_RKIND, scale_factor) ) scale_factor = (s_min(k,iCell)*rho_zz_int(k,iCell) - scalar_new(k,iCell)) / & - (scale_arr(SCALE_OUT,k,iCell) - eps) - scale_arr(SCALE_OUT,k,iCell) = min( 1.0_RKIND, max( 0.0_RKIND, scale_factor) ) + (scale_arr(k,SCALE_OUT,iCell) - eps) + scale_arr(k,SCALE_OUT,iCell) = min( 1.0_RKIND, max( 0.0_RKIND, scale_factor) ) end do end do + else + do iCell=cellSolveStart,cellSolveEnd +!DIR$ IVDEP + do k = 1, nVertLevels -! -! communicate scale factors here. -! communicate only first halo row in these next two exchanges -! + scale_factor = (s_max(k,iCell)*rho_zz_new(k,iCell) - scalar_new(k,iCell)) / & + (scale_arr(k,SCALE_IN,iCell) + eps) + scale_arr(k,SCALE_IN,iCell) = min( 1.0_RKIND, max( 0.0_RKIND, scale_factor) ) + + scale_factor = (s_min(k,iCell)*rho_zz_new(k,iCell) - scalar_new(k,iCell)) / & + (scale_arr(k,SCALE_OUT,iCell) - eps) + scale_arr(k,SCALE_OUT,iCell) = min( 1.0_RKIND, max( 0.0_RKIND, scale_factor) ) + end do + end do + end if + + ! + ! communicate scale factors here. + ! communicate only first halo row in these next two exchanges + ! +!$OMP BARRIER +!$OMP MASTER tempField => tempFieldTarget tempField % block => block - tempField % dimSizes(1) = 2 - tempField % dimSizes(2) = nVertLevels + tempField % dimSizes(1) = nVertLevels + tempField % dimSizes(2) = 2 tempField % dimSizes(3) = nCells tempField % sendList => block % parinfo % cellsToSend tempField % recvList => block % parinfo % cellsToRecv tempField % copyList => block % parinfo % cellsToCopy tempField % prev => null() tempField % next => null() + tempField % isActive = .true. tempField % array => scale_arr call mpas_dmpar_exch_halo_field(tempField, (/ 1 /)) +!$OMP END MASTER +!$OMP BARRIER + + do iEdge=edgeStart,edgeEnd + cell1 = cellsOnEdge(1,iEdge) + cell2 = cellsOnEdge(2,iEdge) + if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then ! only for owned cells +!DIR$ IVDEP + do k=1, nVertLevels + flux_upwind = dvEdge(iEdge) * dt * & + (max(0.0_RKIND,uhAvg(k,iEdge))*scalar_old(k,cell1) + min(0.0_RKIND,uhAvg(k,iEdge))*scalar_old(k,cell2)) + flux_arr(k,iEdge) = dt*flux_arr(k,iEdge) - flux_upwind + end do + end if + end do + + ! + ! rescale the fluxes + ! -! -! rescale the fluxes -! ! moved assignment to scalar_new from separate loop (see commented code below) ! into the following loops. Avoids having to save elements of flux array - do iEdge = 1, nEdges + do iEdge=edgeStart,edgeEnd cell1 = cellsOnEdge(1,iEdge) cell2 = cellsOnEdge(2,iEdge) if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then +!DIR$ IVDEP do k = 1, nVertLevels flux = flux_arr(k,iEdge) - flux = max(0.0_RKIND,flux) * min(scale_arr(SCALE_OUT,k,cell1), scale_arr(SCALE_IN, k,cell2)) & - + min(0.0_RKIND,flux) * min(scale_arr(SCALE_IN, k,cell1), scale_arr(SCALE_OUT,k,cell2)) - ! flux_arr(k,iEdge) = flux - scalar_new(k,cell1) = scalar_new(k,cell1) - flux/areaCell(cell1) - scalar_new(k,cell2) = scalar_new(k,cell2) + flux/areaCell(cell2) + flux = max(0.0_RKIND,flux) * min(scale_arr(k,SCALE_OUT,cell1), scale_arr(k,SCALE_IN, cell2)) & + + min(0.0_RKIND,flux) * min(scale_arr(k,SCALE_IN, cell1), scale_arr(k,SCALE_OUT,cell2)) + flux_arr(k,iEdge) = flux end do end if end do - ! rescale the vertical flux + ! + ! rescale the vertical flux + ! +!$OMP BARRIER - do iCell=1,nCellsSolve + do iCell=cellSolveStart,cellSolveEnd +!DIR$ IVDEP do k = 2, nVertLevels - flux = wdtn(k,iCell) - flux = max(0.0_RKIND,flux) * min(scale_arr(SCALE_OUT,k-1,iCell), scale_arr(SCALE_IN,k ,iCell)) & - + min(0.0_RKIND,flux) * min(scale_arr(SCALE_OUT,k ,iCell), scale_arr(SCALE_IN,k-1,iCell)) + flux = wdtn(k,iCell) + flux = max(0.0_RKIND,flux) * min(scale_arr(k-1,SCALE_OUT,iCell), scale_arr(k ,SCALE_IN,iCell)) & + + min(0.0_RKIND,flux) * min(scale_arr(k ,SCALE_OUT,iCell), scale_arr(k-1,SCALE_IN,iCell)) wdtn(k,iCell) = flux end do end do -! -! do the scalar update now that we have the fluxes -! - do iCell=1,nCellsSolve - do k=1,nVertLevels - scalar_new(k,iCell) = ( scalar_new(k,iCell) & - + (-rdnw(k)*(wdtn(k+1,iCell)-wdtn(k,iCell)) ) )/rho_zz_int(k,iCell) - end do - end do - if(debug_print) then + ! + ! do the scalar update now that we have the fluxes + ! + do iCell=cellSolveStart,cellSolveEnd + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) +!DIR$ IVDEP + do k=1,nVertLevels + scalar_new(k,iCell) = scalar_new(k,iCell) - edgesOnCell_sign(i,iCell)*flux_arr(k,iEdge) * invAreaCell(iCell) + end do + end do - scmin = scalar_new(1,1) - scmax = scalar_new(1,1) - do iCell = 1, nCellsSolve - do k=1, nVertLevels - scmax = max(scmax,scalar_new(k,iCell)) - scmin = min(scmin,scalar_new(k,iCell)) - if (s_max(k,iCell) < scalar_new(k,iCell)) then - write(32,*) ' over - k,iCell,s_min,s_max,scalar_new ',k,iCell,s_min(k,iCell),s_max(k,iCell),scalar_new(k,iCell) - end if - if (s_min(k,iCell) > scalar_new(k,iCell)) then - write(32,*) ' under - k,iCell,s_min,s_max,scalar_new ',k,iCell,s_min(k,iCell),s_max(k,iCell),scalar_new(k,iCell) - end if + if (local_advance_density) then +!DIR$ IVDEP + do k=1,nVertLevels + scalar_new(k,iCell) = ( scalar_new(k,iCell) + (-rdnw(k)*(wdtn(k+1,iCell)-wdtn(k,iCell)) ) )/rho_zz_int(k,iCell) end do + else +!DIR$ IVDEP + do k=1,nVertLevels + scalar_new(k,iCell) = ( scalar_new(k,iCell) + (-rdnw(k)*(wdtn(k+1,iCell)-wdtn(k,iCell)) ) )/rho_zz_new(k,iCell) end do - write(0,*) ' scmin, scmax new out ',scmin,scmax - write(0,*) ' icell_min, k_min ',icellmax, kmax + end if + end do - end if +#ifdef DEBUG_TRANSPORT + scmin = scalar_new(1,1) + scmax = scalar_new(1,1) + do iCell = 1, nCellsSolve + do k=1, nVertLevels + scmax = max(scmax,scalar_new(k,iCell)) + scmin = min(scmin,scalar_new(k,iCell)) + if (s_max(k,iCell) < scalar_new(k,iCell)) then + write(32,*) ' over - k,iCell,s_min,s_max,scalar_new ',k,iCell,s_min(k,iCell),s_max(k,iCell),scalar_new(k,iCell) + end if + if (s_min(k,iCell) > scalar_new(k,iCell)) then + write(32,*) ' under - k,iCell,s_min,s_max,scalar_new ',k,iCell,s_min(k,iCell),s_max(k,iCell),scalar_new(k,iCell) + end if + end do + end do + write(0,*) ' scmin, scmax new out ',scmin,scmax + write(0,*) ' icell_min, k_min ',icellmax, kmax +#endif ! the update should be positive definite. but roundoff can sometimes leave small negative values ! hence the enforcement of PD in the copy back to the model state. +!$OMP BARRIER - do iCell = 1, nCells - do k=1, nVertLevels - scalars_new(iScalar,k,iCell) = max(0.0_RKIND,scalar_new(k,iCell)) - end do + do iCell=cellStart,cellEnd + do k=1, nVertLevels + scalars_new(iScalar,k,iCell) = max(0.0_RKIND,scalar_new(k,iCell)) + end do end do end do ! loop over scalars - if (local_advance_density) then - deallocate(rho_zz_int) - end if - - end subroutine atm_advance_scalars_mono + end subroutine atm_advance_scalars_mono_work -!---- - subroutine atm_compute_dyn_tend(tend, state, diag, mesh, configs, nVertLevels, rk_step, dt) + subroutine atm_compute_dyn_tend(tend, tend_physics, state, diag, mesh, configs, nVertLevels, rk_step, dt, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! Compute height and normal wind tendencies, as well as diagnostic variables ! @@ -2431,7 +3865,11 @@ subroutine atm_compute_dyn_tend(tend, state, diag, mesh, configs, nVertLevels, r implicit none + ! + ! Dummy arguments + ! type (mpas_pool_type), intent(inout) :: tend + type (mpas_pool_type), intent(inout) :: tend_physics type (mpas_pool_type), intent(in) :: state type (mpas_pool_type), intent(in) :: diag type (mpas_pool_type), intent(in) :: mesh @@ -2439,119 +3877,92 @@ subroutine atm_compute_dyn_tend(tend, state, diag, mesh, configs, nVertLevels, r integer, intent(in) :: nVertLevels ! for allocating stack variables integer, intent(in) :: rk_step real (kind=RKIND), intent(in) :: dt + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd + + ! + ! Local variables + ! + integer, pointer :: nCells, nEdges, nVertices, nCellsSolve, nEdgesSolve, vertexDegree, maxEdges, maxEdges2 + integer, pointer :: moist_start, moist_end, num_scalars + real (kind=RKIND), dimension(:), pointer :: fEdge, dvEdge, dcEdge, invDcEdge, invDvEdge, invAreaCell, invAreaTriangle, & + meshScalingDel2, meshScalingDel4 + real (kind=RKIND), dimension(:,:), pointer :: weightsOnEdge, zgrid, rho_edge, rho_zz, ru, u, v, tend_u, & + divergence, vorticity, ke, pv_edge, theta_m, rw, tend_rho, & + rt_diabatic_tend, tend_theta, tend_w, w, cqw, rb, rr, pp, pressure_b, zz, zxu, cqu, & + h_divergence, kdiff, edgesOnCell_sign, edgesOnVertex_sign, rw_save, ru_save - logical, parameter :: rk_diffusion = .false. + real (kind=RKIND), dimension(:,:), pointer :: theta_m_save - integer :: iEdge, iCell, iVertex, k, cell1, cell2, vertex1, vertex2, eoe, i, j, iq - real (kind=RKIND) :: flux, workpv + real (kind=RKIND), dimension(:,:), pointer :: exner + + real (kind=RKIND), dimension(:,:), pointer :: rr_save + + + real (kind=RKIND), dimension(:,:), pointer :: tend_rtheta_adv ! needed for Tiedtke convection scheme + real (kind=RKIND), dimension(:,:), pointer :: rthdynten ! needed for Grell-Freitas convection scheme - integer, pointer :: nCells, nEdges, nVertices, nCellsSolve, nEdgesSolve - integer, pointer :: moist_start, moist_end - real (kind=RKIND), pointer :: h_mom_eddy_visc2, v_mom_eddy_visc2 - real (kind=RKIND), pointer :: h_theta_eddy_visc2, v_theta_eddy_visc2 - real (kind=RKIND) :: h_mom_eddy_visc4 - real (kind=RKIND) :: h_theta_eddy_visc4 - real (kind=RKIND) :: u_diffusion - real (kind=RKIND), dimension(:), pointer :: fEdge, dvEdge, dcEdge, areaCell, areaTriangle, meshScalingDel2, meshScalingDel4 - real (kind=RKIND), dimension(:,:), pointer :: weightsOnEdge, zgrid, rho_edge, rho_zz, ru, u, v, tend_u, & - circulation, divergence, vorticity, ke, pv_edge, theta_m, rw, tend_rho, & - rt_diabatic_tend, tend_theta, tend_w, w, cqw, rb, rr, pp, pressure_b, zz, zx, cqu, & - h_divergence, kdiff real (kind=RKIND), dimension(:,:,:), pointer :: scalars real (kind=RKIND), dimension(:,:), pointer :: tend_u_euler, tend_w_euler, tend_theta_euler real (kind=RKIND), dimension(:,:,:), pointer :: deriv_two - integer, dimension(:,:), pointer :: cellsOnEdge, verticesOnEdge, edgesOnCell, edgesOnEdge, cellsOnCell + integer, dimension(:,:), pointer :: cellsOnEdge, verticesOnEdge, edgesOnCell, edgesOnEdge, cellsOnCell, edgesOnVertex integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge real (kind=RKIND), dimension(:), pointer :: latCell, latEdge, angleEdge, u_init - real (kind=RKIND), dimension( nVertLevels + 1 ) :: wduz, wdwz, wdtz, dpzx - real (kind=RKIND), dimension( nVertLevels ) :: u_mix, ru_edge_w, q - real (kind=RKIND) :: theta_turb_flux, z1, z2, z3, z4, zm, z0, zp, r - real (kind=RKIND) :: d2fdx2_cell1, d2fdx2_cell2 - integer, dimension(:,:), pointer :: advCellsForEdge integer, dimension(:), pointer :: nAdvCellsForEdge real (kind=RKIND), dimension(:,:), pointer :: adv_coefs, adv_coefs_3rd - real (kind=RKIND) :: scalar_weight real (kind=RKIND), dimension(:), pointer :: rdzu, rdzw, fzm, fzp, qv_init real (kind=RKIND), dimension(:,:), pointer :: t_init - real (kind=RKIND), dimension(:,:), pointer :: cpr, cpl, pzp, pzm - integer :: kr, kl - - real (kind=RKIND), allocatable, dimension(:,:) :: divergence_ru, qtot - real (kind=RKIND), allocatable, dimension(:,:) :: delsq_theta, delsq_divergence - real (kind=RKIND), allocatable, dimension(:,:) :: delsq_u - real (kind=RKIND), allocatable, dimension(:,:) :: delsq_circulation, delsq_vorticity real (kind=RKIND), pointer :: cf1, cf2, cf3 - real (kind=RKIND) :: pr, pl - real (kind=RKIND) :: prandtl_inv - logical, parameter :: debug = .false. - - logical, parameter :: curvature = .true. real (kind=RKIND), pointer :: r_earth real (kind=RKIND), dimension(:,:), pointer :: ur_cell, vr_cell - real (kind=RKIND), parameter :: c_s = 0.125 -! real (kind=RKIND), parameter :: c_s = 0.25 - real (kind=RKIND), dimension( nVertLevels ) :: d_diag, d_off_diag, flux_arr real (kind=RKIND), dimension(:,:), pointer :: defc_a, defc_b - logical :: delsq_horiz_mixing real(kind=RKIND), dimension(:,:), pointer :: tend_w_pgf, tend_w_buoy - real (kind=RKIND), pointer :: coef_3rd_order - logical, pointer :: newpx + real (kind=RKIND), pointer :: coef_3rd_order, c_s, smdiv logical, pointer :: config_mix_full - integer, pointer :: config_u_vadv_order - integer, pointer :: config_theta_vadv_order character (len=StrKIND), pointer :: config_horiz_mixing - integer, pointer :: config_theta_adv_order - integer, pointer :: config_w_vadv_order - integer, pointer :: config_w_adv_order real (kind=RKIND), pointer :: config_del4u_div_factor real (kind=RKIND), pointer :: config_h_theta_eddy_visc4 real (kind=RKIND), pointer :: config_h_mom_eddy_visc4 real (kind=RKIND), pointer :: config_visc4_2dsmag real (kind=RKIND), pointer :: config_len_disp + real (kind=RKIND), pointer :: config_h_mom_eddy_visc2, config_v_mom_eddy_visc2 + real (kind=RKIND), pointer :: config_h_theta_eddy_visc2, config_v_theta_eddy_visc2 - real (kind=RKIND) :: flux3, flux4 - real (kind=RKIND) :: q_im2, q_im1, q_i, q_ip1, ua, coef3 - - flux4(q_im2, q_im1, q_i, q_ip1, ua) = & - ua*( 7.*(q_i + q_im1) - (q_ip1 + q_im2) )/12.0 - - flux3(q_im2, q_im1, q_i, q_ip1, ua, coef3) = & - flux4(q_im2, q_im1, q_i, q_ip1, ua) + & - coef3*abs(ua)*((q_ip1 - q_im2)-3.*(q_i-q_im1))/12.0 + logical :: inactive_rthdynten -!----------- call mpas_pool_get_config(mesh, 'sphere_radius', r_earth) - call mpas_pool_get_config(configs, 'config_newpx', newpx) call mpas_pool_get_config(configs, 'config_coef_3rd_order', coef_3rd_order) call mpas_pool_get_config(configs, 'config_mix_full', config_mix_full) - call mpas_pool_get_config(configs, 'config_u_vadv_order', config_u_vadv_order) - call mpas_pool_get_config(configs, 'config_theta_vadv_order', config_theta_vadv_order) call mpas_pool_get_config(configs, 'config_horiz_mixing', config_horiz_mixing) - call mpas_pool_get_config(configs, 'config_theta_adv_order', config_theta_adv_order) - call mpas_pool_get_config(configs, 'config_w_vadv_order', config_w_vadv_order) - call mpas_pool_get_config(configs, 'config_w_adv_order', config_w_adv_order) call mpas_pool_get_config(configs, 'config_del4u_div_factor', config_del4u_div_factor) call mpas_pool_get_config(configs, 'config_h_theta_eddy_visc4', config_h_theta_eddy_visc4) call mpas_pool_get_config(configs, 'config_h_mom_eddy_visc4', config_h_mom_eddy_visc4) + call mpas_pool_get_config(configs, 'config_h_theta_eddy_visc2', config_h_theta_eddy_visc2) + call mpas_pool_get_config(configs, 'config_h_mom_eddy_visc2', config_h_mom_eddy_visc2) + call mpas_pool_get_config(configs, 'config_v_theta_eddy_visc2', config_v_theta_eddy_visc2) + call mpas_pool_get_config(configs, 'config_v_mom_eddy_visc2', config_v_mom_eddy_visc2) call mpas_pool_get_config(configs, 'config_visc4_2dsmag', config_visc4_2dsmag) call mpas_pool_get_config(configs, 'config_len_disp', config_len_disp) + call mpas_pool_get_config(configs, 'config_smagorinsky_coef', c_s) + call mpas_pool_get_config(configs, 'config_smdiv', smdiv) call mpas_pool_get_array(state, 'rho_zz', rho_zz, 2) call mpas_pool_get_array(state, 'u', u, 2) call mpas_pool_get_array(state, 'w', w, 2) call mpas_pool_get_array(state, 'theta_m', theta_m, 2) + call mpas_pool_get_array(state, 'theta_m', theta_m_save, 1) call mpas_pool_get_array(state, 'scalars', scalars, 2) call mpas_pool_get_array(diag, 'uReconstructZonal', ur_cell) @@ -2559,11 +3970,13 @@ subroutine atm_compute_dyn_tend(tend, state, diag, mesh, configs, nVertLevels, r call mpas_pool_get_array(diag, 'rho_edge', rho_edge) call mpas_pool_get_array(diag, 'rho_base', rb) call mpas_pool_get_array(diag, 'rho_p', rr) + call mpas_pool_get_array(diag, 'rho_p_save', rr_save) call mpas_pool_get_array(diag, 'v', v) call mpas_pool_get_array(diag, 'kdiff', kdiff) call mpas_pool_get_array(diag, 'ru', ru) + call mpas_pool_get_array(diag, 'ru_save', ru_save) call mpas_pool_get_array(diag, 'rw', rw) - call mpas_pool_get_array(diag, 'circulation', circulation) + call mpas_pool_get_array(diag, 'rw_save', rw_save) call mpas_pool_get_array(diag, 'divergence', divergence) call mpas_pool_get_array(diag, 'vorticity', vorticity) call mpas_pool_get_array(diag, 'ke', ke) @@ -2571,9 +3984,11 @@ subroutine atm_compute_dyn_tend(tend, state, diag, mesh, configs, nVertLevels, r call mpas_pool_get_array(diag, 'pressure_p', pp) call mpas_pool_get_array(diag, 'pressure_base', pressure_b) call mpas_pool_get_array(diag, 'h_divergence', h_divergence) + call mpas_pool_get_array(diag, 'exner', exner) + + call mpas_pool_get_array(diag, 'tend_rtheta_adv', tend_rtheta_adv) + call mpas_pool_get_array(tend_physics, 'rthdynten', rthdynten) - call mpas_pool_get_array(mesh, 'pzp', pzp) - call mpas_pool_get_array(mesh, 'pzm', pzm) call mpas_pool_get_array(mesh, 'weightsOnEdge', weightsOnEdge) call mpas_pool_get_array(mesh, 'cellsOnEdge', cellsOnEdge) call mpas_pool_get_array(mesh, 'cellsOnCell', cellsOnCell) @@ -2581,14 +3996,19 @@ subroutine atm_compute_dyn_tend(tend, state, diag, mesh, configs, nVertLevels, r call mpas_pool_get_array(mesh, 'nEdgesOnEdge', nEdgesOnEdge) call mpas_pool_get_array(mesh, 'edgesOnEdge', edgesOnEdge) call mpas_pool_get_array(mesh, 'edgesOnCell', edgesOnCell) + call mpas_pool_get_array(mesh, 'edgesOnCell_sign', edgesOnCell_sign) + call mpas_pool_get_array(mesh, 'edgesOnVertex', edgesOnVertex) + call mpas_pool_get_array(mesh, 'edgesOnVertex_sign', edgesOnVertex_sign) call mpas_pool_get_array(mesh, 'dcEdge', dcEdge) call mpas_pool_get_array(mesh, 'dvEdge', dvEdge) - call mpas_pool_get_array(mesh, 'areaCell', areaCell) - call mpas_pool_get_array(mesh, 'areaTriangle', areaTriangle) + call mpas_pool_get_array(mesh, 'invDcEdge', invDcEdge) + call mpas_pool_get_array(mesh, 'invDvEdge', invDvEdge) + call mpas_pool_get_array(mesh, 'invAreaCell', invAreaCell) + call mpas_pool_get_array(mesh, 'invAreaTriangle', invAreaTriangle) call mpas_pool_get_array(mesh, 'fEdge', fEdge) call mpas_pool_get_array(mesh, 'deriv_two', deriv_two) call mpas_pool_get_array(mesh, 'zz', zz) - call mpas_pool_get_array(mesh, 'zx', zx) + call mpas_pool_get_array(mesh, 'zxu', zxu) call mpas_pool_get_array(mesh, 'latCell', latCell) call mpas_pool_get_array(mesh, 'latEdge', latEdge) call mpas_pool_get_array(mesh, 'angleEdge', angleEdge) @@ -2605,8 +4025,6 @@ subroutine atm_compute_dyn_tend(tend, state, diag, mesh, configs, nVertLevels, r call mpas_pool_get_array(mesh, 'fzm', fzm) call mpas_pool_get_array(mesh, 'fzp', fzp) call mpas_pool_get_array(mesh, 'zgrid', zgrid) - call mpas_pool_get_array(mesh, 'cpr', cpr) - call mpas_pool_get_array(mesh, 'cpl', cpl) call mpas_pool_get_array(tend, 'u', tend_u) call mpas_pool_get_array(tend, 'theta_m', tend_theta) @@ -2627,15 +4045,14 @@ subroutine atm_compute_dyn_tend(tend, state, diag, mesh, configs, nVertLevels, r call mpas_pool_get_dimension(mesh, 'nVertices', nVertices) call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) call mpas_pool_get_dimension(mesh, 'nEdgesSolve', nEdgesSolve) + call mpas_pool_get_dimension(mesh, 'vertexDegree', vertexDegree) + call mpas_pool_get_dimension(mesh, 'maxEdges', maxEdges) + call mpas_pool_get_dimension(mesh, 'maxEdges2', maxEdges2) + call mpas_pool_get_dimension(state, 'num_scalars', num_scalars) call mpas_pool_get_dimension(state, 'moist_start', moist_start) call mpas_pool_get_dimension(state, 'moist_end', moist_end) - call mpas_pool_get_config(configs, 'config_h_mom_eddy_visc2', h_mom_eddy_visc2) - call mpas_pool_get_config(configs, 'config_v_mom_eddy_visc2', v_mom_eddy_visc2) - call mpas_pool_get_config(configs, 'config_h_theta_eddy_visc2', h_theta_eddy_visc2) - call mpas_pool_get_config(configs, 'config_v_theta_eddy_visc2', v_theta_eddy_visc2) - call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) call mpas_pool_get_array(mesh, 'nAdvCellsForEdge', nAdvCellsForEdge) call mpas_pool_get_array(mesh, 'advCellsForEdge', advCellsForEdge) @@ -2646,188 +4063,347 @@ subroutine atm_compute_dyn_tend(tend, state, diag, mesh, configs, nVertLevels, r call mpas_pool_get_array(mesh, 'cf2', cf2) call mpas_pool_get_array(mesh, 'cf3', cf3) + ! + ! rthdynten is currently associated with packages, and if those packages + ! are not active at run-time, we need to produce an rthdynten array for + ! use in the atm_compute_dyn_tend_work routine + ! + inactive_rthdynten = .false. + if (.not. associated(rthdynten)) then + allocate(rthdynten(nVertLevels,nCells+1)) + rthdynten(:,nCells+1) = 0.0_RKIND + inactive_rthdynten = .true. + end if - prandtl_inv = 1.0_RKIND/prandtl - -! write(0,*) ' rk_step in compute_dyn_tend ',rk_step - - - delsq_horiz_mixing = .false. - if (config_horiz_mixing == "2d_smagorinsky" .and. (rk_step == 1 .or. rk_diffusion)) then + call atm_compute_dyn_tend_work(nCells, nEdges, nVertices, nVertLevels, & + nCellsSolve, nEdgesSolve, vertexDegree, maxEdges, maxEdges2, num_scalars, moist_start, moist_end, & + fEdge, dvEdge, dcEdge, invDcEdge, invDvEdge, invAreaCell, invAreaTriangle, meshScalingDel2, meshScalingDel4, & + weightsOnEdge, zgrid, rho_edge, rho_zz, ru, u, v, tend_u, & + divergence, vorticity, ke, pv_edge, theta_m, rw, tend_rho, & + rt_diabatic_tend, tend_theta, tend_w, w, cqw, rb, rr, pp, pressure_b, zz, zxu, cqu, & + h_divergence, kdiff, edgesOnCell_sign, edgesOnVertex_sign, rw_save, ru_save, & + theta_m_save, exner, rr_save, scalars, tend_u_euler, tend_w_euler, tend_theta_euler, deriv_two, & + cellsOnEdge, verticesOnEdge, edgesOnCell, edgesOnEdge, cellsOnCell, edgesOnVertex, nEdgesOnCell, nEdgesOnEdge, & + latCell, latEdge, angleEdge, u_init, advCellsForEdge, nAdvCellsForEdge, adv_coefs, adv_coefs_3rd, & + rdzu, rdzw, fzm, fzp, qv_init, t_init, cf1, cf2, cf3, r_earth, ur_cell, vr_cell, defc_a, defc_b, & + tend_w_pgf, tend_w_buoy, coef_3rd_order, c_s, smdiv, config_mix_full, config_horiz_mixing, config_del4u_div_factor, & + config_h_mom_eddy_visc2, config_v_mom_eddy_visc2, config_h_theta_eddy_visc2, config_v_theta_eddy_visc2, & + config_h_theta_eddy_visc4, config_h_mom_eddy_visc4, config_visc4_2dsmag, config_len_disp, rk_step, dt, & + tend_rtheta_adv, rthdynten, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd) + + if (inactive_rthdynten) then + deallocate(rthdynten) + end if - ! Smagorinsky eddy viscosity, based on horizontal deformation (in this case on model coordinate surfaces). - ! The integration coefficients were precomputed and stored in defc_a and defc_b + end subroutine atm_compute_dyn_tend - do iCell = 1, nCells - d_diag(:) = 0. - d_off_diag(:) = 0. - do iEdge = 1, nEdgesOnCell(iCell) - do k=1, nVertLevels - d_diag(k) = d_diag(k) + defc_a(iEdge,iCell)*u(k,EdgesOnCell(iEdge,iCell)) & - - defc_b(iEdge,iCell)*v(k,EdgesOnCell(iEdge,iCell)) - d_off_diag(k) = d_off_diag(k) + defc_b(iEdge,iCell)*u(k,EdgesOnCell(iEdge,iCell)) & - + defc_a(iEdge,iCell)*v(k,EdgesOnCell(iEdge,iCell)) - end do - end do - do k=1, nVertLevels - ! here is the Smagorinsky formulation, - ! followed by imposition of an upper bound on the eddy viscosity - kdiff(k,iCell) = (c_s * config_len_disp)**2 * sqrt(d_diag(k)**2 + d_off_diag(k)**2) - kdiff(k,iCell) = min(kdiff(k,iCell),(0.01*config_len_disp**2)/dt) - end do - end do -!ldf (2012-10-10): - h_mom_eddy_visc4 = config_visc4_2dsmag * config_len_disp**3 - h_theta_eddy_visc4 = h_mom_eddy_visc4 - delsq_horiz_mixing = .true. -! write(0,*) '... config_visc4_2dsmag = ', config_visc4_2dsmag -! write(0,*) '... h_mom_eddy_visc4 = ', h_mom_eddy_visc4 -! write(0,*) '... h_theta_eddy_visc4 = ', h_theta_eddy_visc4 - else if ( config_horiz_mixing == "2d_fixed") then - h_mom_eddy_visc4 = config_h_mom_eddy_visc4 - h_theta_eddy_visc4 = config_h_theta_eddy_visc4 - delsq_horiz_mixing = .true. -!ldf (2012-10-10): - end if - tend_u(:,:) = 0.0 + subroutine atm_compute_dyn_tend_work(nCells, nEdges, nVertices, nVertLevels_dummy, & + nCellsSolve, nEdgesSolve, vertexDegree, maxEdges_dummy, maxEdges2_dummy, num_scalars_dummy, moist_start, moist_end, & + fEdge, dvEdge, dcEdge, invDcEdge, invDvEdge, invAreaCell, invAreaTriangle, meshScalingDel2, meshScalingDel4, & + weightsOnEdge, zgrid, rho_edge, rho_zz, ru, u, v, tend_u, & + divergence, vorticity, ke, pv_edge, theta_m, rw, tend_rho, & + rt_diabatic_tend, tend_theta, tend_w, w, cqw, rb, rr, pp, pressure_b, zz, zxu, cqu, & + h_divergence, kdiff, edgesOnCell_sign, edgesOnVertex_sign, rw_save, ru_save, & + theta_m_save, exner, rr_save, scalars, tend_u_euler, tend_w_euler, tend_theta_euler, deriv_two, & + cellsOnEdge, verticesOnEdge, edgesOnCell, edgesOnEdge, cellsOnCell, edgesOnVertex, nEdgesOnCell, nEdgesOnEdge, & + latCell, latEdge, angleEdge, u_init, advCellsForEdge, nAdvCellsForEdge, adv_coefs, adv_coefs_3rd, & + rdzu, rdzw, fzm, fzp, qv_init, t_init, cf1, cf2, cf3, r_earth, ur_cell, vr_cell, defc_a, defc_b, & + tend_w_pgf, tend_w_buoy, coef_3rd_order, c_s, smdiv, config_mix_full, config_horiz_mixing, config_del4u_div_factor, & + config_h_mom_eddy_visc2, config_v_mom_eddy_visc2, config_h_theta_eddy_visc2, config_v_theta_eddy_visc2, & + config_h_theta_eddy_visc4, config_h_mom_eddy_visc4, config_visc4_2dsmag, config_len_disp, rk_step, dt, & + tend_rtheta_adv, rthdynten, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd) - ! tendency for density. - ! accumulate total water here for later use in w tendency calculation. - allocate(divergence_ru(nVertLevels, nCells+1)) - allocate(qtot(nVertLevels, nCells+1)) + use mpas_atm_dimensions - divergence_ru(:,:) = 0.0 - h_divergence(:,:) = 0. - ! accumulate horizontal mass-flux + implicit none - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - do k=1,nVertLevels - flux = ru(k,iEdge)*dvEdge(iEdge) - divergence_ru(k,cell1) = divergence_ru(k,cell1) + flux - divergence_ru(k,cell2) = divergence_ru(k,cell2) - flux - end do - end do - qtot(:,:)=0. + ! + ! Dummy arguments + ! + integer :: nCells, nEdges, nVertices, nVertLevels_dummy, nCellsSolve, nEdgesSolve, vertexDegree, & + maxEdges_dummy, maxEdges2_dummy, num_scalars_dummy, moist_start, moist_end + + real (kind=RKIND), dimension(nEdges+1) :: fEdge + real (kind=RKIND), dimension(nEdges+1) :: dvEdge + real (kind=RKIND), dimension(nEdges+1) :: dcEdge + real (kind=RKIND), dimension(nEdges+1) :: invDcEdge + real (kind=RKIND), dimension(nEdges+1) :: invDvEdge + real (kind=RKIND), dimension(nCells+1) :: invAreaCell + real (kind=RKIND), dimension(nVertices+1) :: invAreaTriangle + real (kind=RKIND), dimension(nEdges+1) :: meshScalingDel2 + real (kind=RKIND), dimension(nEdges+1) :: meshScalingDel4 + real (kind=RKIND), dimension(maxEdges2,nEdges+1) :: weightsOnEdge + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: zgrid + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: rho_edge + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rho_zz + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: ru + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: u + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: v + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: tend_u + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: divergence + real (kind=RKIND), dimension(nVertLevels,nVertices+1) :: vorticity + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: ke + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: pv_edge + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: theta_m + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: rw + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: tend_rho + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rt_diabatic_tend + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: tend_theta + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: tend_w + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: w + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: cqw + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rb + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rr + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: pp + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: pressure_b + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: zz + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: zxu + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: cqu + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: h_divergence + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: kdiff + real (kind=RKIND), dimension(maxEdges,nCells+1) :: edgesOnCell_sign + real (kind=RKIND), dimension(vertexDegree,nVertices+1) :: edgesOnVertex_sign + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: rw_save + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: ru_save + + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: theta_m_save + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: exner + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rr_save + real (kind=RKIND), dimension(num_scalars,nVertLevels,nCells+1) :: scalars + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: tend_u_euler + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: tend_w_euler + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: tend_theta_euler + real (kind=RKIND), dimension(15,2,nEdges+1) :: deriv_two + integer, dimension(2,nEdges+1) :: cellsOnEdge + integer, dimension(2,nEdges+1) :: verticesOnEdge + integer, dimension(maxEdges,nCells+1) :: edgesOnCell + integer, dimension(maxEdges2,nEdges+1) :: edgesOnEdge + integer, dimension(maxEdges,nCells+1) :: cellsOnCell + integer, dimension(vertexDegree,nVertices+1) :: edgesOnVertex + integer, dimension(nCells+1) :: nEdgesOnCell + integer, dimension(nEdges+1) :: nEdgesOnEdge + real (kind=RKIND), dimension(nCells+1) :: latCell + real (kind=RKIND), dimension(nEdges+1) :: latEdge + real (kind=RKIND), dimension(nEdges+1) :: angleEdge + real (kind=RKIND), dimension(nVertLevels) :: u_init + + integer, dimension(15,nEdges+1) :: advCellsForEdge + integer, dimension(nEdges+1) :: nAdvCellsForEdge + real (kind=RKIND), dimension(15,nEdges+1) :: adv_coefs + real (kind=RKIND), dimension(15,nEdges+1) :: adv_coefs_3rd + + real (kind=RKIND), dimension(nVertLevels) :: rdzu + real (kind=RKIND), dimension(nVertLevels) :: rdzw + real (kind=RKIND), dimension(nVertLevels) :: fzm + real (kind=RKIND), dimension(nVertLevels) :: fzp + real (kind=RKIND), dimension(nVertLevels) :: qv_init + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: t_init + + real (kind=RKIND) :: cf1, cf2, cf3 + real (kind=RKIND) :: prandtl_inv, r_areaCell, rgas_cprcv + + real (kind=RKIND) :: r_earth + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: ur_cell + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: vr_cell + + real (kind=RKIND), dimension(maxEdges,nCells+1) :: defc_a + real (kind=RKIND), dimension(maxEdges,nCells+1) :: defc_b + + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: tend_w_pgf + real (kind=RKIND), dimension(nVertLevels+1,nCells+1) :: tend_w_buoy + + real (kind=RKIND) :: coef_3rd_order, c_s, smdiv + logical :: config_mix_full + character (len=StrKIND) :: config_horiz_mixing + real (kind=RKIND) :: config_del4u_div_factor + real (kind=RKIND) :: config_h_theta_eddy_visc4 + real (kind=RKIND) :: config_h_mom_eddy_visc4 + real (kind=RKIND) :: config_visc4_2dsmag + real (kind=RKIND) :: config_len_disp + real (kind=RKIND) :: config_h_mom_eddy_visc2, config_v_mom_eddy_visc2, config_h_theta_eddy_visc2, config_v_theta_eddy_visc2 - ! compute horiontal mass-flux divergence, add vertical mass flux divergence to complete tend_rho + integer, intent(in) :: rk_step + real (kind=RKIND), intent(in) :: dt - do iCell = 1,nCells - r = 1.0 / areaCell(iCell) - do k = 1,nVertLevels - divergence_ru(k,iCell) = divergence_ru(k,iCell) * r - h_divergence(k,iCell) = divergence_ru(k,iCell) - tend_rho(k,iCell) = -divergence_ru(k,iCell)-rdzw(k)*(rw(k+1,iCell)-rw(k,iCell)) + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: tend_rtheta_adv + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: rthdynten - do iq = moist_start, moist_end - qtot(k,iCell) = qtot(k,iCell) + scalars(iq, k, iCell) - end do + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd - end do - end do ! - ! Compute u (normal) velocity tendency for each edge (cell face) + ! Local variables ! + integer :: iEdge, iCell, iVertex, k, cell1, cell2, vertex1, vertex2, eoe, i, j, iq, iAdvCell - do iEdge=1,nEdgesSolve + !real (kind=RKIND), parameter :: c_s = 0.125 + real (kind=RKIND), dimension( nVertLevels+1 ) :: d_diag, d_off_diag, flux_arr + real (kind=RKIND), dimension( nVertLevels + 1 ) :: wduz, wdwz, wdtz, dpzx + real (kind=RKIND), dimension( nVertLevels ) :: ru_edge_w, q, u_mix + real (kind=RKIND) :: theta_turb_flux, w_turb_flux, r + real (kind=RKIND) :: scalar_weight + real (kind=RKIND) :: inv_r_earth - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) + real (kind=RKIND) :: invDt, flux, workpv + real (kind=RKIND) :: edge_sign, pr_scale, r_dc, r_dv, u_mix_scale + real (kind=RKIND) :: h_mom_eddy_visc4, v_mom_eddy_visc2 + real (kind=RKIND) :: h_theta_eddy_visc4, v_theta_eddy_visc2 + real (kind=RKIND) :: u_diffusion - ! horizontal pressure gradient + real (kind=RKIND) :: kdiffu, z1, z2, z3, z4, zm, z0, zp + + - if (newpx) then + real (kind=RKIND) :: flux3, flux4 + real (kind=RKIND) :: q_im2, q_im1, q_i, q_ip1, ua, coef3 - k = 1 - pr = cpr(k,iEdge)*pp(k,cell2)+cpr(k+1,iEdge)*pp(k+1,cell2)+cpr(k+2,iEdge)*pp(k+2,cell2) - pl = cpl(k,iEdge)*pp(k,cell1)+cpl(k+1,iEdge)*pp(k+1,cell1)+cpl(k+2,iEdge)*pp(k+2,cell1) - tend_u(k,iEdge) = - cqu(k,iEdge)*2./(zz(k,cell1)+zz(k,cell2))*(pr-pl)/dcEdge(iEdge) + flux4(q_im2, q_im1, q_i, q_ip1, ua) = & + ua*( 7.*(q_i + q_im1) - (q_ip1 + q_im2) )/12.0 - do k=2,nVertLevels + flux3(q_im2, q_im1, q_i, q_ip1, ua, coef3) = & + flux4(q_im2, q_im1, q_i, q_ip1, ua) + & + coef3*abs(ua)*((q_ip1 - q_im2)-3.*(q_i-q_im1))/12.0 - kr = min(nVertLevels,k+ nint(.5-sign(0.5_RKIND,zx(k,iEdge)+zx(k+1,iEdge)))) - kl = min(nVertLevels,2*k+1-kr) - pr = pp(k,cell2)+.5*(zgrid(k ,cell1)+zgrid(k +1,cell1)-zgrid(k ,cell2)-zgrid(k +1,cell2)) & - /(zgrid(kr+1,cell2)-zgrid(kr-1,cell2))*( pp(kr,cell2)-pp (kr-1,cell2)) - pl = pp(k,cell1)+.5*(zgrid(k ,cell2)+zgrid(k +1,cell2)-zgrid(k ,cell1)-zgrid(k +1,cell1)) & - /(zgrid(kl+1,cell1)-zgrid(kl-1,cell1))*( pp(kl,cell1)-pp (kl-1,cell1)) - tend_u(k,iEdge) = - cqu(k,iEdge)*2./(zz(k,cell1)+zz(k,cell2))*(pr-pl)/dcEdge(iEdge) + prandtl_inv = 1.0_RKIND / prandtl + invDt = 1.0_RKIND / dt + inv_r_earth = 1.0_RKIND / r_earth - end do + v_mom_eddy_visc2 = config_v_mom_eddy_visc2 + v_theta_eddy_visc2 = config_v_theta_eddy_visc2 - else - k = 1 + if (rk_step == 1) then - dpzx(k) = .25*(zx(k,iEdge)+zx(k+1,iEdge)) & - *(pzm(k,cell2)*(pp(k+1,cell2)-pp(k,cell2)) & - +pzm(k,cell1)*(pp(k+1,cell1)-pp(k,cell1)) & - +pzp(k,cell2)*(pp(k+2,cell2)-pp(k,cell2)) & - +pzp(k,cell1)*(pp(k+2,cell1)-pp(k,cell1))) - - do k = 2, nVertLevels-1 +! tend_u_euler(1:nVertLevels,edgeStart:edgeEnd) = 0.0 - dpzx(k) = .25*(zx(k,iEdge)+zx(k+1,iEdge)) & - *(pzp(k,cell2)*(pp(k+1,cell2)-pp(k ,cell2)) & - +pzm(k,cell2)*(pp(k ,cell2)-pp(k-1,cell2)) & - +pzp(k,cell1)*(pp(k+1,cell1)-pp(k ,cell1)) & - +pzm(k,cell1)*(pp(k ,cell1)-pp(k-1,cell1))) + ! Smagorinsky eddy viscosity, based on horizontal deformation (in this case on model coordinate surfaces). + ! The integration coefficients were precomputed and stored in defc_a and defc_b + if(config_horiz_mixing == "2d_smagorinsky") then + do iCell = cellStart,cellEnd + d_diag(1:nVertLevels) = 0.0 + d_off_diag(1:nVertLevels) = 0.0 + do iEdge=1,nEdgesOnCell(iCell) + do k=1,nVertLevels + d_diag(k) = d_diag(k) + defc_a(iEdge,iCell)*u(k,EdgesOnCell(iEdge,iCell)) & + - defc_b(iEdge,iCell)*v(k,EdgesOnCell(iEdge,iCell)) + d_off_diag(k) = d_off_diag(k) + defc_b(iEdge,iCell)*u(k,EdgesOnCell(iEdge,iCell)) & + + defc_a(iEdge,iCell)*v(k,EdgesOnCell(iEdge,iCell)) + end do + end do +!DIR$ IVDEP + do k=1, nVertLevels + ! here is the Smagorinsky formulation, + ! followed by imposition of an upper bound on the eddy viscosity + kdiff(k,iCell) = min((c_s * config_len_disp)**2 * sqrt(d_diag(k)**2 + d_off_diag(k)**2),(0.01*config_len_disp**2) * invDt) + end do end do - k = nVertLevels - dpzx(k) = .25*(zx(k,iEdge)+zx(k+1,iEdge)) & - *(pzm(k,cell2)*(pp(k ,cell2)-pp(k-1,cell2)) & - +pzm(k,cell1)*(pp(k ,cell1)-pp(k-1,cell1))) + h_mom_eddy_visc4 = config_visc4_2dsmag * config_len_disp**3 + h_theta_eddy_visc4 = h_mom_eddy_visc4 - do k=1,nVertLevels + else if(config_horiz_mixing == "2d_fixed") then - tend_u(k,iEdge) = - cqu(k,iEdge)*((pp(k,cell2)-pp(k,cell1))/dcEdge(iEdge) & - - dpzx(k) ) / (.5*(zz(k,cell2)+zz(k,cell1))) - end do + kdiff(1:nVertLevels,cellStart:cellEnd) = config_h_theta_eddy_visc2 + h_mom_eddy_visc4 = config_h_mom_eddy_visc4 + h_theta_eddy_visc4 = config_h_theta_eddy_visc4 end if + + end if - ! vertical transport of u + ! tendency for density. + ! accumulate total water here for later use in w tendency calculation. - wduz(1) = 0. - if (config_u_vadv_order == 2) then + ! accumulate horizontal mass-flux - do k=2,nVertLevels - wduz(k) = 0.5*(rw(k,cell1)+rw(k,cell2))*(fzm(k)*u(k,iEdge)+fzp(k)*u(k-1,iEdge)) + do iCell=cellStart,cellEnd + h_divergence(1:nVertLevels,iCell) = 0.0 + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) + edge_sign = edgesOnCell_sign(i,iCell) * dvEdge(iEdge) +!DIR$ IVDEP + do k=1,nVertLevels + h_divergence(k,iCell) = h_divergence(k,iCell) + edge_sign * ru(k,iEdge) end do + end do + end do - else if (config_u_vadv_order == 3) then + ! compute horiontal mass-flux divergence, add vertical mass flux divergence to complete tend_rho - k = 2 - wduz(k) = 0.5*( rw(k,cell1)+rw(k,cell2))*(fzm(k)*u(k,iEdge)+fzp(k)*u(k-1,iEdge)) - do k=3,nVertLevels-1 - wduz(k) = flux3( u(k-2,iEdge),u(k-1,iEdge),u(k,iEdge),u(k+1,iEdge),0.5*(rw(k,cell1)+rw(k,cell2)), 1.0_RKIND ) - end do - k = nVertLevels - wduz(k) = 0.5*( rw(k,cell1)+rw(k,cell2))*(fzm(k)*u(k,iEdge)+fzp(k)*u(k-1,iEdge)) + do iCell = cellStart,cellEnd + r = invAreaCell(iCell) + do k = 1,nVertLevels + h_divergence(k,iCell) = h_divergence(k,iCell) * r + end do + end do - else if (config_u_vadv_order == 4) then + ! + ! dp / dz and tend_rho + ! + ! only needed on first rk_step with pert variables defined a pert from time t + ! + if(rk_step == 1) then - k = 2 - wduz(k) = 0.5*( rw(k,cell1)+rw(k,cell2))*(fzm(k)*u(k,iEdge)+fzp(k)*u(k-1,iEdge)) - do k=3,nVertLevels-1 - wduz(k) = flux4( u(k-2,iEdge),u(k-1,iEdge),u(k,iEdge),u(k+1,iEdge),0.5*(rw(k,cell1)+rw(k,cell2))) + rgas_cprcv = rgas*cp/cv + do iCell = cellStart,cellEnd + +!DIR$ IVDEP + do k = 1,nVertLevels + tend_rho(k,iCell) = -h_divergence(k,iCell)-rdzw(k)*(rw(k+1,iCell)-rw(k,iCell)) + tend_rho_physics(k,iCell) + dpdz(k,iCell) = -gravity*(rb(k,iCell)*(qtot(k,iCell)) + rr_save(k,iCell)*(1.+qtot(k,iCell))) + end do + end do + end if + +!$OMP BARRIER + + ! + ! Compute u (normal) velocity tendency for each edge (cell face) + ! + + do iEdge=edgeSolveStart,edgeSolveEnd + + cell1 = cellsOnEdge(1,iEdge) + cell2 = cellsOnEdge(2,iEdge) + + ! horizontal pressure gradient + + if(rk_step == 1) then +!DIR$ IVDEP + do k=1,nVertLevels + tend_u_euler(k,iEdge) = - cqu(k,iEdge)*( (pp(k,cell2)-pp(k,cell1))*invDcEdge(iEdge)/(.5*(zz(k,cell2)+zz(k,cell1))) & + -0.5*zxu(k,iEdge)*(dpdz(k,cell1)+dpdz(k,cell2)) ) end do - k = nVertLevels - wduz(k) = 0.5*( rw(k,cell1)+rw(k,cell2))*(fzm(k)*u(k,iEdge)+fzp(k)*u(k-1,iEdge)) end if + + ! vertical transport of u + + wduz(1) = 0. + + k = 2 + wduz(k) = 0.5*( rw(k,cell1)+rw(k,cell2))*(fzm(k)*u(k,iEdge)+fzp(k)*u(k-1,iEdge)) + do k=3,nVertLevels-1 + wduz(k) = flux3( u(k-2,iEdge),u(k-1,iEdge),u(k,iEdge),u(k+1,iEdge),0.5*(rw(k,cell1)+rw(k,cell2)), 1.0_RKIND ) + end do + k = nVertLevels + wduz(k) = 0.5*( rw(k,cell1)+rw(k,cell2))*(fzm(k)*u(k,iEdge)+fzp(k)*u(k-1,iEdge)) + wduz(nVertLevels+1) = 0. +!DIR$ IVDEP do k=1,nVertLevels - tend_u(k,iEdge) = tend_u(k,iEdge) - rdzw(k)*(wduz(k+1)-wduz(k)) + tend_u(k,iEdge) = - rdzw(k)*(wduz(k+1)-wduz(k)) ! first use of tend_u end do ! Next, nonlinear Coriolis term (q) following Ringler et al JCP 2009 @@ -2837,31 +4413,32 @@ subroutine atm_compute_dyn_tend(tend, state, diag, mesh, configs, nVertLevels, r eoe = edgesOnEdge(j,iEdge) do k=1,nVertLevels workpv = 0.5 * (pv_edge(k,iEdge) + pv_edge(k,eoe)) - q(k) = q(k) + weightsOnEdge(j,iEdge) * u(k,eoe) * workpv * rho_edge(k,eoe) +! the original definition of pv_edge had a factor of 1/density. We have removed that factor +! given that it was not integral to any conservation property of the system + q(k) = q(k) + weightsOnEdge(j,iEdge) * u(k,eoe) * workpv end do end do +!DIR$ IVDEP do k=1,nVertLevels ! horizontal ke gradient and vorticity terms in the vector invariant formulation ! of the horizontal momentum equation tend_u(k,iEdge) = tend_u(k,iEdge) + rho_edge(k,iEdge)* (q(k) - (ke(k,cell2) - ke(k,cell1)) & - / dcEdge(iEdge)) & - - u(k,iEdge)*0.5*(divergence_ru(k,cell1)+divergence_ru(k,cell2)) - if (curvature) then - - ! curvature terms for the sphere - - tend_u(k,iEdge) = tend_u(k,iEdge) & - - 2.*omega*cos(angleEdge(iEdge))*cos(latEdge(iEdge)) & - *rho_edge(k,iEdge)*.25*(w(k,cell1)+w(k+1,cell1)+w(k,cell2)+w(k+1,cell2)) & - - u(k,iEdge)*.25*(w(k+1,cell1)+w(k,cell1)+w(k,cell2)+w(k+1,cell2)) & - *rho_edge(k,iEdge)/r_earth - end if + * invDcEdge(iEdge)) & + - u(k,iEdge)*0.5*(h_divergence(k,cell1)+h_divergence(k,cell2)) +#ifdef CURVATURE + ! curvature terms for the sphere + tend_u(k,iEdge) = tend_u(k,iEdge) & + - 2.*omega*cos(angleEdge(iEdge))*cos(latEdge(iEdge)) & + *rho_edge(k,iEdge)*.25*(w(k,cell1)+w(k+1,cell1)+w(k,cell2)+w(k+1,cell2)) & + - u(k,iEdge)*.25*(w(k+1,cell1)+w(k,cell1)+w(k,cell2)+w(k+1,cell2)) & + *rho_edge(k,iEdge) * inv_r_earth +#endif end do + end do - deallocate(divergence_ru) ! ! horizontal mixing for u @@ -2869,611 +4446,410 @@ subroutine atm_compute_dyn_tend(tend, state, diag, mesh, configs, nVertLevels, r ! first Runge-Kutta substep and saved for use in later RK substeps 2 and 3. ! - if (rk_step == 1 .or. rk_diffusion) then - - tend_u_euler = 0. - - if (delsq_horiz_mixing) then - - if ((h_mom_eddy_visc2 > 0.0) .and. (config_horiz_mixing == "2d_fixed")) then - do iEdge=1, nEdgesSolve - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - vertex1 = verticesOnEdge(1,iEdge) - vertex2 = verticesOnEdge(2,iEdge) - - do k=1,nVertLevels - - ! - ! Compute diffusion, computed as \nabla divergence - k \times \nabla vorticity - ! only valid for h_mom_eddy_visc2 == constant - ! - ! Note that we impose a lower bound on the edge length used in the derivative of the vorticity; - ! this is done to avoid an overly stringent stability constraint for small edge lengths that can - ! occur on some variable-resolution meshes. - ! - u_diffusion = ( divergence(k,cell2) - divergence(k,cell1) ) / dcEdge(iEdge) & - -( vorticity(k,vertex2) - vorticity(k,vertex1) ) / max(dvEdge(iEdge),0.25*dcEdge(iEdge)) - u_diffusion = rho_edge(k,iEdge)*h_mom_eddy_visc2 * u_diffusion - u_diffusion = u_diffusion * meshScalingDel2(iEdge) - - tend_u_euler(k,iEdge) = tend_u_euler(k,iEdge) + u_diffusion - end do - end do - - else if ( config_horiz_mixing == "2d_smagorinsky") then - - do iEdge=1, nEdgesSolve - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - vertex1 = verticesOnEdge(1,iEdge) - vertex2 = verticesOnEdge(2,iEdge) - - do k=1,nVertLevels - ! - ! Compute diffusion, computed as \nabla divergence - k \times \nabla vorticity - ! only valid for h_mom_eddy_visc2 == constant - ! - u_diffusion = ( divergence(k,cell2) - divergence(k,cell1) ) / dcEdge(iEdge) & - -( vorticity(k,vertex2) - vorticity(k,vertex1) ) / max(dvEdge(iEdge),0.25*dcEdge(iEdge)) - u_diffusion = rho_edge(k,iEdge)* 0.5*(kdiff(k,cell1)+kdiff(k,cell2)) * u_diffusion - u_diffusion = u_diffusion * meshScalingDel2(iEdge) - - tend_u_euler(k,iEdge) = tend_u_euler(k,iEdge) + u_diffusion - end do - end do - end if - - end if ! delsq_horiz_mixing for u + if (rk_step == 1) then - if ((h_mom_eddy_visc4 > 0.0 .and. config_horiz_mixing == "2d_fixed") .or. & - (h_mom_eddy_visc4 > 0.0 .and. config_horiz_mixing == "2d_smagorinsky")) then +!$OMP BARRIER ! del^4 horizontal filter. We compute this as del^2 ( del^2 (u) ). ! First, storage to hold the result from the first del^2 computation. - allocate(delsq_divergence(nVertLevels, nCells+1)) - allocate(delsq_u(nVertLevels, nEdges+1)) - allocate(delsq_circulation(nVertLevels, nVertices+1)) - allocate(delsq_vorticity(nVertLevels, nVertices+1)) + delsq_u(1:nVertLevels,edgeStart:edgeEnd) = 0.0 - delsq_u(:,:) = 0.0 - - do iEdge=1, nEdges + do iEdge=edgeStart,edgeEnd cell1 = cellsOnEdge(1,iEdge) cell2 = cellsOnEdge(2,iEdge) vertex1 = verticesOnEdge(1,iEdge) vertex2 = verticesOnEdge(2,iEdge) + r_dc = invDcEdge(iEdge) + r_dv = min(invDvEdge(iEdge), 4*invDcEdge(iEdge)) +!DIR$ IVDEP do k=1,nVertLevels - ! ! Compute diffusion, computed as \nabla divergence - k \times \nabla vorticity ! only valid for h_mom_eddy_visc4 == constant - ! - u_diffusion = ( divergence(k,cell2) - divergence(k,cell1) ) / dcEdge(iEdge) & - -( vorticity(k,vertex2) - vorticity(k,vertex1) ) / max(dvEdge(iEdge), 0.25*dcEdge(iEdge)) + u_diffusion = ( divergence(k,cell2) - divergence(k,cell1) ) * r_dc & + -( vorticity(k,vertex2) - vorticity(k,vertex1) ) * r_dv delsq_u(k,iEdge) = delsq_u(k,iEdge) + u_diffusion - end do - end do - delsq_circulation(:,:) = 0.0 - do iEdge=1,nEdges - do k=1,nVertLevels - delsq_circulation(k,verticesOnEdge(1,iEdge)) = delsq_circulation(k,verticesOnEdge(1,iEdge)) - dcEdge(iEdge) * delsq_u(k,iEdge) - delsq_circulation(k,verticesOnEdge(2,iEdge)) = delsq_circulation(k,verticesOnEdge(2,iEdge)) + dcEdge(iEdge) * delsq_u(k,iEdge) - end do - end do - do iVertex=1,nVertices - r = 1.0 / areaTriangle(iVertex) - do k=1,nVertLevels - delsq_vorticity(k,iVertex) = delsq_circulation(k,iVertex) * r - end do - end do + kdiffu = 0.5*(kdiff(k,cell1)+kdiff(k,cell2)) + + ! include 2nd-orer diffusion here + tend_u_euler(k,iEdge) = tend_u_euler(k,iEdge) & + + rho_edge(k,iEdge)* kdiffu * u_diffusion * meshScalingDel2(iEdge) - delsq_divergence(:,:) = 0.0 - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - do k=1,nVertLevels - delsq_divergence(k,cell1) = delsq_divergence(k,cell1) + delsq_u(k,iEdge)*dvEdge(iEdge) - delsq_divergence(k,cell2) = delsq_divergence(k,cell2) - delsq_u(k,iEdge)*dvEdge(iEdge) - end do - end do - do iCell = 1,nCells - r = 1.0 / areaCell(iCell) - do k = 1,nVertLevels - delsq_divergence(k,iCell) = delsq_divergence(k,iCell) * r end do end do - do iEdge=1,nEdgesSolve - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - vertex1 = verticesOnEdge(1,iEdge) - vertex2 = verticesOnEdge(2,iEdge) + if (h_mom_eddy_visc4 > 0.0) then ! 4th order mixing is active - do k=1,nVertLevels +!$OMP BARRIER - ! - ! Compute diffusion, computed as \nabla divergence - k \times \nabla vorticity - ! only valid for h_mom_eddy_visc4 == constant - ! - ! Here, we scale the diffusion on the divergence part a factor of config_del4u_div_factor - ! relative to the rotational part. The stability constraint on the divergence component is much less - ! stringent than the rotational part, and this flexibility may be useful. - ! - u_diffusion = rho_edge(k,iEdge) * ( config_del4u_div_factor * ( delsq_divergence(k,cell2) - delsq_divergence(k,cell1) ) / dcEdge(iEdge) & - -( delsq_vorticity(k,vertex2) - delsq_vorticity(k,vertex1) ) / max(dvEdge(iEdge), 0.25*dcEdge(iEdge)) & - ) + do iVertex=vertexStart,vertexEnd + delsq_vorticity(1:nVertLevels,iVertex) = 0.0 + do i=1,vertexDegree + iEdge = edgesOnVertex(i,iVertex) + edge_sign = invAreaTriangle(iVertex) * dcEdge(iEdge) * edgesOnVertex_sign(i,iVertex) + do k=1,nVertLevels + delsq_vorticity(k,iVertex) = delsq_vorticity(k,iVertex) + edge_sign * delsq_u(k,iEdge) + end do + end do + end do - u_diffusion = u_diffusion * meshScalingDel4(iEdge) - tend_u_euler(k,iEdge) = tend_u_euler(k,iEdge) - h_mom_eddy_visc4 * u_diffusion + do iCell=cellStart,cellEnd + delsq_divergence(1:nVertLevels,iCell) = 0.0 + r = invAreaCell(iCell) + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) + edge_sign = r * dvEdge(iEdge) * edgesOnCell_sign(i,iCell) + do k=1,nVertLevels + delsq_divergence(k,iCell) = delsq_divergence(k,iCell) + edge_sign * delsq_u(k,iEdge) + end do + end do end do - end do - deallocate(delsq_divergence) - deallocate(delsq_u) - deallocate(delsq_circulation) - deallocate(delsq_vorticity) +!$OMP BARRIER + + do iEdge=edgeSolveStart,edgeSolveEnd + cell1 = cellsOnEdge(1,iEdge) + cell2 = cellsOnEdge(2,iEdge) + vertex1 = verticesOnEdge(1,iEdge) + vertex2 = verticesOnEdge(2,iEdge) + + u_mix_scale = meshScalingDel4(iEdge)*h_mom_eddy_visc4 + r_dc = u_mix_scale * config_del4u_div_factor * invDcEdge(iEdge) + r_dv = u_mix_scale * min(invDvEdge(iEdge), 4*invDcEdge(iEdge)) - end if +!DIR$ IVDEP + do k=1,nVertLevels + + ! Compute diffusion, computed as \nabla divergence - k \times \nabla vorticity + ! only valid for h_mom_eddy_visc4 == constant + ! + ! Here, we scale the diffusion on the divergence part a factor of config_del4u_div_factor + ! relative to the rotational part. The stability constraint on the divergence component is much less + ! stringent than the rotational part, and this flexibility may be useful. + ! + u_diffusion = rho_edge(k,iEdge) * ( ( delsq_divergence(k,cell2) - delsq_divergence(k,cell1) ) * r_dc & + -( delsq_vorticity(k,vertex2) - delsq_vorticity(k,vertex1) ) * r_dv ) + tend_u_euler(k,iEdge) = tend_u_euler(k,iEdge) - u_diffusion + + end do + end do + + end if ! 4th order mixing is active ! ! vertical mixing for u - 2nd order filter in physical (z) space ! - if ( v_mom_eddy_visc2 > 0.0 ) then + if ( v_mom_eddy_visc2 > 0.0 ) then - if (config_mix_full) then + if (config_mix_full) then ! mix full state - do iEdge=1,nEdgesSolve + do iEdge=edgeSolveStart,edgeSolveEnd - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) + cell1 = cellsOnEdge(1,iEdge) + cell2 = cellsOnEdge(2,iEdge) - do k=2,nVertLevels-1 + do k=2,nVertLevels-1 - z1 = 0.5*(zgrid(k-1,cell1)+zgrid(k-1,cell2)) - z2 = 0.5*(zgrid(k ,cell1)+zgrid(k ,cell2)) - z3 = 0.5*(zgrid(k+1,cell1)+zgrid(k+1,cell2)) - z4 = 0.5*(zgrid(k+2,cell1)+zgrid(k+2,cell2)) + z1 = 0.5*(zgrid(k-1,cell1)+zgrid(k-1,cell2)) + z2 = 0.5*(zgrid(k ,cell1)+zgrid(k ,cell2)) + z3 = 0.5*(zgrid(k+1,cell1)+zgrid(k+1,cell2)) + z4 = 0.5*(zgrid(k+2,cell1)+zgrid(k+2,cell2)) - zm = 0.5*(z1+z2) - z0 = 0.5*(z2+z3) - zp = 0.5*(z3+z4) + zm = 0.5*(z1+z2) + z0 = 0.5*(z2+z3) + zp = 0.5*(z3+z4) - tend_u_euler(k,iEdge) = tend_u_euler(k,iEdge) + rho_edge(k,iEdge) * v_mom_eddy_visc2*( & - (u(k+1,iEdge)-u(k ,iEdge))/(zp-z0) & - -(u(k ,iEdge)-u(k-1,iEdge))/(z0-zm) )/(0.5*(zp-zm)) + tend_u_euler(k,iEdge) = tend_u_euler(k,iEdge) + rho_edge(k,iEdge) * v_mom_eddy_visc2*( & + (u(k+1,iEdge)-u(k ,iEdge))/(zp-z0) & + -(u(k ,iEdge)-u(k-1,iEdge))/(z0-zm) )/(0.5*(zp-zm)) + end do end do - end do - else ! idealized cases where we mix on the perturbation from the initial 1-D state + else ! idealized cases where we mix on the perturbation from the initial 1-D state - do iEdge=1,nEdgesSolve + do iEdge=edgeSolveStart,edgeSolveEnd - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) + cell1 = cellsOnEdge(1,iEdge) + cell2 = cellsOnEdge(2,iEdge) - do k=1,nVertLevels + do k=1,nVertLevels #ifdef ROTATED_GRID - u_mix(k) = u(k,iEdge) - u_init(k) * sin( angleEdge(iEdge) ) + u_mix(k) = u(k,iEdge) - u_init(k) * sin( angleEdge(iEdge) ) #else - u_mix(k) = u(k,iEdge) - u_init(k) * cos( angleEdge(iEdge) ) + u_mix(k) = u(k,iEdge) - u_init(k) * cos( angleEdge(iEdge) ) #endif - end do + end do - do k=2,nVertLevels-1 + do k=2,nVertLevels-1 - z1 = 0.5*(zgrid(k-1,cell1)+zgrid(k-1,cell2)) - z2 = 0.5*(zgrid(k ,cell1)+zgrid(k ,cell2)) - z3 = 0.5*(zgrid(k+1,cell1)+zgrid(k+1,cell2)) - z4 = 0.5*(zgrid(k+2,cell1)+zgrid(k+2,cell2)) + z1 = 0.5*(zgrid(k-1,cell1)+zgrid(k-1,cell2)) + z2 = 0.5*(zgrid(k ,cell1)+zgrid(k ,cell2)) + z3 = 0.5*(zgrid(k+1,cell1)+zgrid(k+1,cell2)) + z4 = 0.5*(zgrid(k+2,cell1)+zgrid(k+2,cell2)) - zm = 0.5*(z1+z2) - z0 = 0.5*(z2+z3) - zp = 0.5*(z3+z4) + zm = 0.5*(z1+z2) + z0 = 0.5*(z2+z3) + zp = 0.5*(z3+z4) - tend_u_euler(k,iEdge) = tend_u_euler(k,iEdge) + rho_edge(k,iEdge) * v_mom_eddy_visc2*( & - (u_mix(k+1)-u_mix(k ))/(zp-z0) & - -(u_mix(k )-u_mix(k-1))/(z0-zm) )/(0.5*(zp-zm)) + tend_u_euler(k,iEdge) = tend_u_euler(k,iEdge) + rho_edge(k,iEdge) * v_mom_eddy_visc2*( & + (u_mix(k+1)-u_mix(k ))/(zp-z0) & + -(u_mix(k )-u_mix(k-1))/(z0-zm) )/(0.5*(zp-zm)) + end do end do - end do - end if + end if ! mix perturbation state - end if + end if ! vertical mixing of horizontal momentum end if ! (rk_step 1 test for computing mixing terms) +!$OMP BARRIER + ! add in mixing for u - do iEdge=1,nEdgesSolve + do iEdge=edgeSolveStart,edgeSolveEnd +!DIR$ IVDEP do k=1,nVertLevels - tend_u(k,iEdge) = tend_u(k,iEdge) + tend_u_euler(k,iEdge) +! tend_u(k,iEdge) = tend_u(k,iEdge) + tend_u_euler(k,iEdge) + tend_u(k,iEdge) = tend_u(k,iEdge) + tend_u_euler(k,iEdge) + tend_ru_physics(k,iEdge) end do end do + !----------- rhs for w - tend_w(:,:) = 0. - if(rk_step .eq. 1) then - tend_w_pgf(:,:) = 0. - tend_w_buoy(:,:) = 0. - endif ! ! horizontal advection for w ! - if (config_w_adv_order == 2) then - - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then - do k=2,nVertLevels - flux = dvEdge(iEdge) * (fzm(k)*ru(k,iEdge) + fzp(k)*ru(k-1,iEdge) ) & - *(w(k,cell1) + w(k,cell2))*0.5 - tend_w(k,cell1) = tend_w(k,cell1) - flux - tend_w(k,cell2) = tend_w(k,cell2) + flux - end do - end if - end do - - else if (config_w_adv_order == 3) then - - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then - - do k=2,nVertLevels - ru_edge_w(k) = fzm(k)*ru(k,iEdge) + fzp(k)*ru(k-1,iEdge) - end do - - flux_arr(:) = 0. - - ! flux_arr stores the value of w at the cell edge used in the horizontal transport - - do i=1,nAdvCellsForEdge(iEdge) - iCell = advCellsForEdge(i,iEdge) - do k=2,nVertLevels - scalar_weight = adv_coefs(i,iEdge) + coef_3rd_order*sign(1.0_RKIND,ru_edge_w(k))*adv_coefs_3rd(i,iEdge) - flux_arr(k) = flux_arr(k) + scalar_weight* w(k,iCell) - end do - end do - - do k=2,nVertLevels - tend_w(k,cell1) = tend_w(k,cell1) - ru_edge_w(k)*flux_arr(k) - tend_w(k,cell2) = tend_w(k,cell2) + ru_edge_w(k)*flux_arr(k) - end do + do iCell=cellSolveStart,cellSolveEnd ! Technically updating fewer cells than before... + tend_w(1:nVertLevels+1,iCell) = 0.0 + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) + edge_sign = edgesOnCell_sign(i,iCell) * dvEdge(iEdge) * 0.5 - end if - end do + do k=2,nVertLevels + ru_edge_w(k) = fzm(k)*ru(k,iEdge) + fzp(k)*ru(k-1,iEdge) + end do - else if (config_w_adv_order == 4) then + flux_arr(1:nVertLevels) = 0.0 - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then + ! flux_arr stores the value of w at the cell edge used in the horizontal transport + do j=1,nAdvCellsForEdge(iEdge) + iAdvCell = advCellsForEdge(j,iEdge) do k=2,nVertLevels - - d2fdx2_cell1 = deriv_two(1,1,iEdge) * w(k,cell1) - d2fdx2_cell2 = deriv_two(1,2,iEdge) * w(k,cell2) - do i=1, nEdgesOnCell(cell1) - if ( cellsOnCell(i,cell1) <= nCells) & - d2fdx2_cell1 = d2fdx2_cell1 + deriv_two(i+1,1,iEdge) * w(k,cellsOnCell(i,cell1)) - end do - do i=1, nEdgesOnCell(cell2) - if ( cellsOnCell(i,cell2) <= nCells) & - d2fdx2_cell2 = d2fdx2_cell2 + deriv_two(i+1,2,iEdge) * w(k,cellsOnCell(i,cell2)) - end do - - flux = dvEdge(iEdge) * (fzm(k)*ru(k,iEdge) + fzp(k)*ru(k-1,iEdge)) * ( & - 0.5*(w(k,cell1) + w(k,cell2)) & - -(dcEdge(iEdge) **2) * (d2fdx2_cell1 + d2fdx2_cell2) / 12. ) - - tend_w(k,cell1) = tend_w(k,cell1) - flux - tend_w(k,cell2) = tend_w(k,cell2) + flux + scalar_weight = adv_coefs(j,iEdge) + sign(1.0_RKIND,ru_edge_w(k)) * adv_coefs_3rd(j,iEdge) + flux_arr(k) = flux_arr(k) + scalar_weight * w(k,iAdvCell) end do + end do - end if +!DIR$ IVDEP + do k=2,nVertLevels + tend_w(k,iCell) = tend_w(k,iCell) - edgesOnCell_sign(i,iCell) * ru_edge_w(k)*flux_arr(k) + end do end do - end if - - if (curvature) then + end do - do iCell = 1, nCellsSolve - do k=2,nVertLevels - tend_w(k,iCell) = tend_w(k,iCell) + (rho_zz(k,iCell)*fzm(k)+rho_zz(k-1,iCell)*fzp(k))* & - ( (fzm(k)*ur_cell(k,iCell)+fzp(k)*ur_cell(k-1,iCell))**2. & - +(fzm(k)*vr_cell(k,iCell)+fzp(k)*vr_cell(k-1,iCell))**2. )/r_earth & - + 2.*omega*cos(latCell(iCell)) & - *(fzm(k)*ur_cell(k,iCell)+fzp(k)*ur_cell(k-1,iCell)) & - *(rho_zz(k,iCell)*fzm(k)+rho_zz(k-1,iCell)*fzp(k)) +#ifdef CURVATURE + do iCell = cellSolveStart, cellSolveEnd +!DIR$ IVDEP + do k=2,nVertLevels + tend_w(k,iCell) = tend_w(k,iCell) + (rho_zz(k,iCell)*fzm(k)+rho_zz(k-1,iCell)*fzp(k))* & + ( (fzm(k)*ur_cell(k,iCell)+fzp(k)*ur_cell(k-1,iCell))**2. & + +(fzm(k)*vr_cell(k,iCell)+fzp(k)*vr_cell(k-1,iCell))**2. )/r_earth & + + 2.*omega*cos(latCell(iCell)) & + *(fzm(k)*ur_cell(k,iCell)+fzp(k)*ur_cell(k-1,iCell)) & + *(rho_zz(k,iCell)*fzm(k)+rho_zz(k-1,iCell)*fzp(k)) - end do end do + end do +#endif - end if ! ! horizontal mixing for w - we could combine this with advection directly (i.e. as a turbulent flux), ! but here we can also code in hyperdiffusion if we wish (2nd order at present) ! - if (rk_step == 1 .or. rk_diffusion) then - - tend_w_euler = 0. - - if (delsq_horiz_mixing) then - - if ((h_mom_eddy_visc2 > 0.0) .and. (config_horiz_mixing == "2d_fixed")) then - - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then - - ! horizontal flux divergence of the gradient (i.e. del^2) - ! note, for w, even though we use theta_* local scratch variables - do k=2,nVertLevels - theta_turb_flux = h_mom_eddy_visc2*(w(k,cell2) - w(k,cell1))/dcEdge(iEdge) - theta_turb_flux = theta_turb_flux * meshScalingDel2(iEdge) - flux = 0.5*dvEdge (iEdge) * (rho_edge(k,iEdge)+rho_edge(k-1,iEdge)) * theta_turb_flux - tend_w_euler(k,cell1) = tend_w_euler(k,cell1) + flux/areaCell(cell1) - tend_w_euler(k,cell2) = tend_w_euler(k,cell2) - flux/areaCell(cell2) - end do - - end if - end do - - else if (config_horiz_mixing == "2d_smagorinsky") then - - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then - - do k=2,nVertLevels - theta_turb_flux = 0.25*(kdiff(k,cell1)+kdiff(k,cell2)+kdiff(k-1,cell1)+kdiff(k-1,cell2)) & - *(w(k,cell2) - w(k,cell1))/dcEdge(iEdge) - theta_turb_flux = theta_turb_flux * meshScalingDel2(iEdge) - flux = 0.5*dvEdge (iEdge) * (rho_edge(k,iEdge)+rho_edge(k-1,iEdge)) * theta_turb_flux - tend_w_euler(k,cell1) = tend_w_euler(k,cell1) + flux/areaCell(cell1) - tend_w_euler(k,cell2) = tend_w_euler(k,cell2) - flux/areaCell(cell2) - end do + if (rk_step == 1) then - end if - end do - end if - end if ! delsq_horiz_mixing +! !OMP BARRIER why is this openmp barrier here??? - if ((h_mom_eddy_visc4 > 0.0 .and. config_horiz_mixing == "2d_fixed") .or. & - (h_mom_eddy_visc4 > 0.0 .and. config_horiz_mixing == "2d_smagorinsky")) then + ! del^4 horizontal filter. We compute this as del^2 ( del^2 (u) ). + ! + ! First, storage to hold the result from the first del^2 computation. + ! we copied code from the theta mixing, hence the theta* names. - ! del^4 horizontal filter. We compute this as del^2 ( del^2 (u) ). - ! - ! First, storage to hold the result from the first del^2 computation. - ! we copied code from the theta mixing, hence the theta* names. + delsq_w(1:nVertLevels,cellStart:cellEnd) = 0.0 - allocate(delsq_theta(nVertLevels, nCells+1)) + do iCell=cellStart,cellEnd + tend_w_euler(1:nVertLevels+1,iCell) = 0.0 + r_areaCell = invAreaCell(iCell) + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) - delsq_theta(:,:) = 0. + edge_sign = 0.5 * r_areaCell*edgesOnCell_sign(i,iCell) * dvEdge(iEdge) * invDcEdge(iEdge) - do iEdge=1,nEdges cell1 = cellsOnEdge(1,iEdge) cell2 = cellsOnEdge(2,iEdge) - do k=2,nVertLevels - delsq_theta(k,cell1) = delsq_theta(k,cell1) + dvEdge(iEdge)*0.5*(rho_edge(k,iEdge)+rho_edge(k-1,iEdge))*(w(k,cell2) - w(k,cell1))/dcEdge(iEdge) - delsq_theta(k,cell2) = delsq_theta(k,cell2) - dvEdge(iEdge)*0.5*(rho_edge(k,iEdge)+rho_edge(k-1,iEdge))*(w(k,cell2) - w(k,cell1))/dcEdge(iEdge) - end do - end do - do iCell = 1, nCells - r = 1.0 / areaCell(iCell) - do k=2,nVertLevels - delsq_theta(k,iCell) = delsq_theta(k,iCell) * r +!DIR$ IVDEP + do k=2,nVertLevels + + w_turb_flux = edge_sign*(rho_edge(k,iEdge)+rho_edge(k-1,iEdge))*(w(k,cell2) - w(k,cell1)) + delsq_w(k,iCell) = delsq_w(k,iCell) + w_turb_flux + w_turb_flux = w_turb_flux * meshScalingDel2(iEdge) * 0.25 * & + (kdiff(k,cell1)+kdiff(k,cell2)+kdiff(k-1,cell1)+kdiff(k-1,cell2)) + tend_w_euler(k,iCell) = tend_w_euler(k,iCell) + w_turb_flux end do end do + end do - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then +!$OMP BARRIER + + if (h_mom_eddy_visc4 > 0.0) then ! 4th order mixing is active + + do iCell=cellSolveStart,cellSolveEnd ! Technically updating fewer cells than before... + r_areaCell = h_mom_eddy_visc4 * invAreaCell(iCell) + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) + cell1 = cellsOnEdge(1,iEdge) + cell2 = cellsOnEdge(2,iEdge) + + edge_sign = meshScalingDel4(iEdge)*r_areaCell*dvEdge(iEdge)*edgesOnCell_sign(i,iCell) * invDcEdge(iEdge) do k=2,nVertLevels - theta_turb_flux = h_mom_eddy_visc4*(delsq_theta(k,cell2) - delsq_theta(k,cell1))/dcEdge(iEdge) - theta_turb_flux = theta_turb_flux * meshScalingDel4(iEdge) - flux = dvEdge (iEdge) * theta_turb_flux - tend_w_euler(k,cell1) = tend_w_euler(k,cell1) - flux/areaCell(cell1) - tend_w_euler(k,cell2) = tend_w_euler(k,cell2) + flux/areaCell(cell2) + tend_w_euler(k,iCell) = tend_w_euler(k,iCell) - edge_sign * (delsq_w(k,cell2) - delsq_w(k,cell1)) end do - - end if + + end do end do - deallocate(delsq_theta) - - end if + end if ! 4th order mixing is active end if ! horizontal mixing for w computed in first rk_step +! Note for OpenMP parallelization: We could avoid allocating the delsq_w scratch +! array, and just use the delsq_theta array as was previously done; however, +! particularly when oversubscribing cores with threads, there is the risk that +! some threads may reach code further below that re-uses the delsq_theta array, +! in which case we would need a barrier somewhere between here and that code +! below to ensure correct behavior. + ! ! vertical advection, pressure gradient and buoyancy for w ! - do iCell = 1, nCells - - wdwz(1) = 0. - if (config_w_vadv_order == 2) then - - do k=2,nVertLevels - wdwz(k) = 0.25*(rw(k,icell)+rw(k-1,iCell))*(w(k,iCell)+w(k-1,iCell)) - end do - - else if (config_w_vadv_order == 3) then - - k = 2 - wdwz(k) = 0.25*(rw(k,icell)+rw(k-1,iCell))*(w(k,iCell)+w(k-1,iCell)) - do k=3,nVertLevels-1 - wdwz(k) = flux3( w(k-2,iCell),w(k-1,iCell),w(k,iCell),w(k+1,iCell),0.5*(rw(k,iCell)+rw(k-1,iCell)), 1.0_RKIND ) - end do - k = nVertLevels - wdwz(k) = 0.25*(rw(k,icell)+rw(k-1,iCell))*(w(k,iCell)+w(k-1,iCell)) + do iCell=cellSolveStart,cellSolveEnd - else if (config_w_vadv_order == 4) then + wdwz(1) = 0.0 - k = 2 - wdwz(k) = 0.25*(rw(k,icell)+rw(k-1,iCell))*(w(k,iCell)+w(k-1,iCell)) - do k=3,nVertLevels-1 - wdwz(k) = flux4( w(k-2,iCell),w(k-1,iCell),w(k,iCell),w(k+1,iCell),0.5*(rw(k,iCell)+rw(k-1,iCell)) ) - end do - k = nVertLevels - wdwz(k) = 0.25*(rw(k,icell)+rw(k-1,iCell))*(w(k,iCell)+w(k-1,iCell)) - - end if + k = 2 + wdwz(k) = 0.25*(rw(k,icell)+rw(k-1,iCell))*(w(k,iCell)+w(k-1,iCell)) + do k=3,nVertLevels-1 + wdwz(k) = flux3( w(k-2,iCell),w(k-1,iCell),w(k,iCell),w(k+1,iCell),0.5*(rw(k,iCell)+rw(k-1,iCell)), 1.0_RKIND ) + end do + k = nVertLevels + wdwz(k) = 0.25*(rw(k,icell)+rw(k-1,iCell))*(w(k,iCell)+w(k-1,iCell)) - wdwz(nVertLevels+1) = 0. + wdwz(nVertLevels+1) = 0.0 ! Note: next we are also dividing through by the cell area after the horizontal flux divergence +!DIR$ IVDEP do k=2,nVertLevels + tend_w(k,iCell) = tend_w(k,iCell) * invAreaCell(iCell) -rdzu(k)*(wdwz(k+1)-wdwz(k)) + end do - tend_w(k,iCell) = tend_w(k,iCell)/areaCell(iCell) -rdzu(k)*(wdwz(k+1)-wdwz(k)) & - - cqw(k,iCell)*( rdzu(k)*(pp(k,iCell)-pp(k-1,iCell)) & - + gravity* & - ( fzm(k)*(rb(k,iCell)*(qtot(k,iCell)) + & - rr(k,iCell)*(1.+qtot(k,iCell))) & - +fzp(k)*(rb(k-1,iCell)*(qtot(k-1,iCell)) + & - rr(k-1,iCell)*(1.+qtot(k-1,iCell))) )) - - if(rk_step == 1) then - tend_w_pgf(k,iCell) = cqw(k,iCell)*(rdzu(k)*(pp(k,iCell)-pp(k-1,iCell))) - tend_w_buoy(k,iCell) = cqw(k,iCell)*gravity* & - ( fzm(k)*(rb(k,iCell)*(qtot(k,iCell)) + & - rr(k,iCell)*(1.+qtot(k,iCell))) & - +fzp(k)*(rb(k-1,iCell)*(qtot(k-1,iCell)) + & - rr(k-1,iCell)*(1.+qtot(k-1,iCell))) ) - endif + if(rk_step == 1) then +!DIR$ IVDEP + do k=2,nVertLevels + tend_w_euler(k,iCell) = tend_w_euler(k,iCell) - cqw(k,iCell)*( & + rdzu(k)*(pp(k,iCell)-pp(k-1,iCell)) & + - (fzm(k)*dpdz(k,iCell) + fzp(k)*dpdz(k-1,iCell)) ) ! dpdz is the buoyancy term here. + end do + end if - end do end do - ! - ! vertical mixing for w - 2nd order - ! - - if (rk_step == 1 .or. rk_diffusion) then + if (rk_step == 1) then if ( v_mom_eddy_visc2 > 0.0 ) then - do iCell = 1, nCellsSolve - do k=2,nVertLevels - tend_w_euler(k,iCell) = tend_w_euler(k,iCell) + v_mom_eddy_visc2*0.5*(rho_zz(k,iCell)+rho_zz(k-1,iCell))*( & - (w(k+1,iCell)-w(k ,iCell))*rdzw(k) & - -(w(k ,iCell)-w(k-1,iCell))*rdzw(k-1) )*rdzu(k) - end do + do iCell=cellSolveStart,cellSolveEnd +!DIR$ IVDEP + do k=2,nVertLevels + tend_w_euler(k,iCell) = tend_w_euler(k,iCell) + v_mom_eddy_visc2*0.5*(rho_zz(k,iCell)+rho_zz(k-1,iCell))*( & + (w(k+1,iCell)-w(k ,iCell))*rdzw(k) & + -(w(k ,iCell)-w(k-1,iCell))*rdzw(k-1) )*rdzu(k) + end do end do end if end if ! mixing term computed first rk_step + ! add in mixing terms for w -! add in mixing terms for w - - do iCell = 1, nCellsSolve + do iCell = cellSolveStart,cellSolveEnd +!DIR$ IVDEP do k=2,nVertLevels tend_w(k,iCell) = tend_w(k,iCell) + tend_w_euler(k,iCell) end do end do - deallocate(qtot) - !----------- rhs for theta - tend_theta(:,:) = 0. - ! ! horizontal advection for theta ! - if (config_theta_adv_order == 2) then - - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then - do k=1,nVertLevels - flux = dvEdge(iEdge) * ru(k,iEdge) * ( 0.5*(theta_m(k,cell1) + theta_m(k,cell2)) ) - tend_theta(k,cell1) = tend_theta(k,cell1) - flux - tend_theta(k,cell2) = tend_theta(k,cell2) + flux - end do - end if - end do - - else if (config_theta_adv_order == 3) then - - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then + do iCell=cellSolveStart,cellSolveEnd ! Technically updating fewer cells than before... + tend_theta(1:nVertLevels,iCell) = 0.0 + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) - flux_arr(:) = 0. - do i=1,nAdvCellsForEdge(iEdge) - iCell = advCellsForEdge(i,iEdge) - do k=1,nVertLevels - scalar_weight = adv_coefs(i,iEdge) + coef_3rd_order*sign(1.0_RKIND,ru(k,iEdge))*adv_coefs_3rd(i,iEdge) - flux_arr(k) = flux_arr(k) + scalar_weight* theta_m(k,iCell) - end do - end do + flux_arr(1:nVertLevels) = 0.0 + do j=1,nAdvCellsForEdge(iEdge) + iAdvCell = advCellsForEdge(j,iEdge) do k=1,nVertLevels - tend_theta(k,cell1) = tend_theta(k,cell1) - ru(k,iEdge)*flux_arr(k) - tend_theta(k,cell2) = tend_theta(k,cell2) + ru(k,iEdge)*flux_arr(k) + scalar_weight = adv_coefs(j,iEdge) + sign(1.0_RKIND,ru(k,iEdge))*adv_coefs_3rd(j,iEdge) + flux_arr(k) = flux_arr(k) + scalar_weight* theta_m(k,iAdvCell) end do + end do + +!DIR$ IVDEP + do k=1,nVertLevels + tend_theta(k,iCell) = tend_theta(k,iCell) - edgesOnCell_sign(i,iCell) * ru(k,iEdge) * flux_arr(k) + end do - end if end do + end do - else if (config_theta_adv_order == 4) then +! addition to pick up perturbation flux for rtheta_pp equation - do iEdge=1,nEdges + if(rk_step > 1) then + do iCell=cellSolveStart,cellSolveEnd + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) cell1 = cellsOnEdge(1,iEdge) cell2 = cellsOnEdge(2,iEdge) - if (cell1 <= nCells .and. cell2 <= nCells) then - - do k=1,nVertLevels - - d2fdx2_cell1 = deriv_two(1,1,iEdge) * theta_m(k,cell1) - d2fdx2_cell2 = deriv_two(1,2,iEdge) * theta_m(k,cell2) - do i=1, nEdgesOnCell(cell1) - if ( cellsOnCell(i,cell1) <= nCells) & - d2fdx2_cell1 = d2fdx2_cell1 + deriv_two(i+1,1,iEdge) * theta_m(k,cellsOnCell(i,cell1)) - end do - do i=1, nEdgesOnCell(cell2) - if ( cellsOnCell(i,cell2) <= nCells) & - d2fdx2_cell2 = d2fdx2_cell2 + deriv_two(i+1,2,iEdge) * theta_m(k,cellsOnCell(i,cell2)) - end do - - flux = dvEdge(iEdge) * ru(k,iEdge) * ( & - 0.5*(theta_m(k,cell1) + theta_m(k,cell2)) & - -(dcEdge(iEdge) **2) * (d2fdx2_cell1 + d2fdx2_cell2) / 12. ) - - tend_theta(k,cell1) = tend_theta(k,cell1) - flux - tend_theta(k,cell2) = tend_theta(k,cell2) + flux - end do - - end if - - end do +!DIR$ IVDEP + do k=1,nVertLevels + flux = edgesOnCell_sign(i,iCell)*dvEdge(iEdge)*(ru_save(k,iEdge)-ru(k,iEdge))*0.5*(theta_m_save(k,cell2)+theta_m_save(k,cell1)) + tend_theta(k,iCell) = tend_theta(k,iCell)-flux ! division by areaCell picked up down below + end do + end do + end do end if ! @@ -3481,136 +4857,82 @@ subroutine atm_compute_dyn_tend(tend, state, diag, mesh, configs, nVertLevels, r ! but here we can also code in hyperdiffusion if we wish (2nd order at present) ! - if (rk_step == 1 .or. rk_diffusion) then - - tend_theta_euler = 0. - - if (delsq_horiz_mixing) then - if ( (h_theta_eddy_visc2 > 0.0) .and. (config_horiz_mixing == "2d_fixed") ) then - - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then - - do k=1,nVertLevels - theta_turb_flux = h_theta_eddy_visc2*prandtl_inv*(theta_m(k,cell2) - theta_m(k,cell1))/dcEdge(iEdge) - theta_turb_flux = theta_turb_flux * meshScalingDel2(iEdge) - flux = dvEdge (iEdge) * rho_edge(k,iEdge) * theta_turb_flux - tend_theta_euler(k,cell1) = tend_theta_euler(k,cell1) + flux/areaCell(cell1) - tend_theta_euler(k,cell2) = tend_theta_euler(k,cell2) - flux/areaCell(cell2) - end do - - end if - end do + if (rk_step == 1) then - else if ( ( config_horiz_mixing == "2d_smagorinsky") ) then + delsq_theta(1:nVertLevels,cellStart:cellEnd) = 0.0 - do iEdge=1,nEdges + do iCell=cellStart,cellEnd + tend_theta_euler(1:nVertLevels,iCell) = 0.0 + r_areaCell = invAreaCell(iCell) + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) + edge_sign = r_areaCell*edgesOnCell_sign(i,iCell) * dvEdge(iEdge) * invDcEdge(iEdge) + pr_scale = prandtl_inv * meshScalingDel2(iEdge) cell1 = cellsOnEdge(1,iEdge) cell2 = cellsOnEdge(2,iEdge) - if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then - - do k=1,nVertLevels - theta_turb_flux = 0.5*(kdiff(k,cell1)+kdiff(k,cell2))*prandtl_inv & - *(theta_m(k,cell2) - theta_m(k,cell1))/dcEdge(iEdge) - theta_turb_flux = theta_turb_flux * meshScalingDel2(iEdge) - flux = dvEdge (iEdge) * rho_edge(k,iEdge) * theta_turb_flux - tend_theta_euler(k,cell1) = tend_theta_euler(k,cell1) + flux/areaCell(cell1) - tend_theta_euler(k,cell2) = tend_theta_euler(k,cell2) - flux/areaCell(cell2) - end do - - end if - end do - end if - - end if - - if ((h_theta_eddy_visc4 > 0.0 .and. config_horiz_mixing == "2d_fixed") .or. & - (h_theta_eddy_visc4 > 0.0 .and. config_horiz_mixing == "2d_smagorinsky")) then - - allocate(delsq_theta(nVertLevels, nCells+1)) - - delsq_theta(:,:) = 0. - - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - do k=1,nVertLevels - delsq_theta(k,cell1) = delsq_theta(k,cell1) + dvEdge(iEdge)*rho_edge(k,iEdge)*(theta_m(k,cell2) - theta_m(k,cell1))/dcEdge(iEdge) - delsq_theta(k,cell2) = delsq_theta(k,cell2) - dvEdge(iEdge)*rho_edge(k,iEdge)*(theta_m(k,cell2) - theta_m(k,cell1))/dcEdge(iEdge) - end do - end do - - do iCell = 1, nCells - r = 1.0 / areaCell(iCell) - do k=1,nVertLevels - delsq_theta(k,iCell) = delsq_theta(k,iCell) * r - end do - end do - - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then - +!DIR$ IVDEP do k=1,nVertLevels - theta_turb_flux = h_theta_eddy_visc4*prandtl_inv*(delsq_theta(k,cell2) - delsq_theta(k,cell1))/dcEdge(iEdge) - theta_turb_flux = theta_turb_flux * meshScalingDel4(iEdge) - flux = dvEdge (iEdge) * theta_turb_flux - tend_theta_euler(k,cell1) = tend_theta_euler(k,cell1) - flux/areaCell(cell1) - tend_theta_euler(k,cell2) = tend_theta_euler(k,cell2) + flux/areaCell(cell2) - end do - - end if - end do - deallocate(delsq_theta) +! we are computing the Smagorinsky filter at more points than needed here so as to pick up the delsq_theta for 4th order filter below - end if + theta_turb_flux = edge_sign*(theta_m(k,cell2) - theta_m(k,cell1))*rho_edge(k,iEdge) + delsq_theta(k,iCell) = delsq_theta(k,iCell) + theta_turb_flux + theta_turb_flux = theta_turb_flux*0.5*(kdiff(k,cell1)+kdiff(k,cell2)) * pr_scale + tend_theta_euler(k,iCell) = tend_theta_euler(k,iCell) + theta_turb_flux - end if ! theta mixing calculated first rk_step + end do + end do + end do - ! - ! vertical advection plus diabatic term - ! Note: we are also dividing through by the cell area after the horizontal flux divergence - ! - do iCell = 1, nCells +!$OMP BARRIER + + if (h_theta_eddy_visc4 > 0.0) then ! 4th order mixing is active - wdtz(1) = 0. - if (config_theta_vadv_order == 2) then + do iCell=cellSolveStart,cellSolveEnd ! Technically updating fewer cells than before... + r_areaCell = h_theta_eddy_visc4 * prandtl_inv * invAreaCell(iCell) + do i=1,nEdgesOnCell(iCell) - do k=2,nVertLevels - wdtz(k) = rw(k,icell)*(fzm(k)*theta_m(k,iCell)+fzp(k)*theta_m(k-1,iCell)) - end do + iEdge = edgesOnCell(i,iCell) + edge_sign = meshScalingDel4(iEdge)*r_areaCell*dvEdge(iEdge)*edgesOnCell_sign(i,iCell)*invDcEdge(iEdge) - else if (config_theta_vadv_order == 3) then + cell1 = cellsOnEdge(1,iEdge) + cell2 = cellsOnEdge(2,iEdge) - k = 2 - wdtz(k) = rw(k,icell)*(fzm(k)*theta_m(k,iCell)+fzp(k)*theta_m(k-1,iCell)) - do k=3,nVertLevels-1 - wdtz(k) = flux3( theta_m(k-2,iCell),theta_m(k-1,iCell),theta_m(k,iCell),theta_m(k+1,iCell), rw(k,iCell), coef_3rd_order ) + do k=1,nVertLevels + tend_theta_euler(k,iCell) = tend_theta_euler(k,iCell) - edge_sign*(delsq_theta(k,cell2) - delsq_theta(k,cell1)) + end do + end do end do - k = nVertLevels - wdtz(k) = rw(k,icell)*(fzm(k)*theta_m(k,iCell)+fzp(k)*theta_m(k-1,iCell)) - else if (config_theta_vadv_order == 4) then + end if ! 4th order mixing is active - k = 2 - wdtz(k) = rw(k,icell)*(fzm(k)*theta_m(k,iCell)+fzp(k)*theta_m(k-1,iCell)) - do k=3,nVertLevels-1 - wdtz(k) = flux4( theta_m(k-2,iCell),theta_m(k-1,iCell),theta_m(k,iCell),theta_m(k+1,iCell), rw(k,iCell) ) - end do - k = nVertLevels - wdtz(k) = rw(k,icell)*(fzm(k)*theta_m(k,iCell)+fzp(k)*theta_m(k-1,iCell)) + end if ! theta mixing calculated first rk_step - end if + ! + ! vertical advection plus diabatic term + ! Note: we are also dividing through by the cell area after the horizontal flux divergence + ! + do iCell = cellSolveStart,cellSolveEnd + wdtz(1) = 0.0 - wdtz(nVertLevels+1) = 0. + k = 2 + wdtz(k) = rw(k,icell)*(fzm(k)*theta_m(k,iCell)+fzp(k)*theta_m(k-1,iCell)) + wdtz(k) = wdtz(k)+(rw_save(k,icell)-rw(k,icell))*(fzm(k)*theta_m_save(k,iCell)+fzp(k)*theta_m_save(k-1,iCell)) + do k=3,nVertLevels-1 + wdtz(k) = flux3( theta_m(k-2,iCell),theta_m(k-1,iCell),theta_m(k,iCell),theta_m(k+1,iCell), rw(k,iCell), coef_3rd_order ) + wdtz(k) = wdtz(k) + (rw_save(k,icell)-rw(k,iCell))*(fzm(k)*theta_m_save(k,iCell)+fzp(k)*theta_m_save(k-1,iCell)) ! rtheta_pp redefinition + end do + k = nVertLevels + wdtz(k) = rw_save(k,icell)*(fzm(k)*theta_m(k,iCell)+fzp(k)*theta_m(k-1,iCell)) ! rtheta_pp redefinition + + wdtz(nVertLevels+1) = 0.0 +!DIR$ IVDEP do k=1,nVertLevels - tend_theta(k,iCell) = tend_theta(k,iCell)/areaCell(iCell) -rdzw(k)*(wdtz(k+1)-wdtz(k)) + tend_theta(k,iCell) = tend_theta(k,iCell)*invAreaCell(iCell) -rdzw(k)*(wdtz(k+1)-wdtz(k)) + tend_rtheta_adv(k,iCell) = tend_theta(k,iCell) ! this is for the Tiedke scheme + rthdynten(k,iCell) = tend_theta(k,iCell)/rho_zz(k,iCell) ! this is for the Grell-Freitas scheme tend_theta(k,iCell) = tend_theta(k,iCell) + rho_zz(k,iCell)*rt_diabatic_tend(k,iCell) end do end do @@ -3619,65 +4941,68 @@ subroutine atm_compute_dyn_tend(tend, state, diag, mesh, configs, nVertLevels, r ! vertical mixing for theta - 2nd order ! - if (rk_step == 1 .or. rk_diffusion) then + if (rk_step == 1) then - if ( v_theta_eddy_visc2 > 0.0 ) then + if ( v_theta_eddy_visc2 > 0.0 ) then ! vertical mixing for theta_m - if (config_mix_full) then + if (config_mix_full) then - do iCell = 1, nCellsSolve - do k=2,nVertLevels-1 - z1 = zgrid(k-1,iCell) - z2 = zgrid(k ,iCell) - z3 = zgrid(k+1,iCell) - z4 = zgrid(k+2,iCell) - - zm = 0.5*(z1+z2) - z0 = 0.5*(z2+z3) - zp = 0.5*(z3+z4) - - tend_theta_euler(k,iCell) = tend_theta_euler(k,iCell) + v_theta_eddy_visc2*prandtl_inv*rho_zz(k,iCell)*(& - (theta_m(k+1,iCell)-theta_m(k ,iCell))/(zp-z0) & - -(theta_m(k ,iCell)-theta_m(k-1,iCell))/(z0-zm) )/(0.5*(zp-zm)) + do iCell = cellSolveStart,cellSolveEnd + do k=2,nVertLevels-1 + z1 = zgrid(k-1,iCell) + z2 = zgrid(k ,iCell) + z3 = zgrid(k+1,iCell) + z4 = zgrid(k+2,iCell) + + zm = 0.5*(z1+z2) + z0 = 0.5*(z2+z3) + zp = 0.5*(z3+z4) + + tend_theta_euler(k,iCell) = tend_theta_euler(k,iCell) + v_theta_eddy_visc2*prandtl_inv*rho_zz(k,iCell)*(& + (theta_m(k+1,iCell)-theta_m(k ,iCell))/(zp-z0) & + -(theta_m(k ,iCell)-theta_m(k-1,iCell))/(z0-zm) )/(0.5*(zp-zm)) + end do end do - end do else ! idealized cases where we mix on the perturbation from the initial 1-D state - do iCell = 1, nCellsSolve - do k=2,nVertLevels-1 - z1 = zgrid(k-1,iCell) - z2 = zgrid(k ,iCell) - z3 = zgrid(k+1,iCell) - z4 = zgrid(k+2,iCell) - - zm = 0.5*(z1+z2) - z0 = 0.5*(z2+z3) - zp = 0.5*(z3+z4) - - tend_theta_euler(k,iCell) = tend_theta_euler(k,iCell) + v_theta_eddy_visc2*prandtl_inv*rho_zz(k,iCell)*(& - ((theta_m(k+1,iCell)-t_init(k+1,iCell))-(theta_m(k ,iCell)-t_init(k,iCell)))/(zp-z0) & - -((theta_m(k ,iCell)-t_init(k,iCell))-(theta_m(k-1,iCell)-t_init(k-1,iCell)))/(z0-zm) )/(0.5*(zp-zm)) + do iCell = cellSolveStart,cellSolveEnd + do k=2,nVertLevels-1 + z1 = zgrid(k-1,iCell) + z2 = zgrid(k ,iCell) + z3 = zgrid(k+1,iCell) + z4 = zgrid(k+2,iCell) + + zm = 0.5*(z1+z2) + z0 = 0.5*(z2+z3) + zp = 0.5*(z3+z4) + + tend_theta_euler(k,iCell) = tend_theta_euler(k,iCell) + v_theta_eddy_visc2*prandtl_inv*rho_zz(k,iCell)*(& + ((theta_m(k+1,iCell)-t_init(k+1,iCell))-(theta_m(k ,iCell)-t_init(k,iCell)))/(zp-z0) & + -((theta_m(k ,iCell)-t_init(k,iCell))-(theta_m(k-1,iCell)-t_init(k-1,iCell)))/(z0-zm) )/(0.5*(zp-zm)) + end do end do - end do - end if + end if - end if + end if - end if ! compute theta mixing on first rk_step + end if ! compute vertical theta mixing on first rk_step - do iCell = 1, nCellsSolve + do iCell = cellSolveStart,cellSolveEnd +!DIR$ IVDEP do k=1,nVertLevels - tend_theta(k,iCell) = tend_theta(k,iCell) + tend_theta_euler(k,iCell) +! tend_theta(k,iCell) = tend_theta(k,iCell) + tend_theta_euler(k,iCell) + tend_theta(k,iCell) = tend_theta(k,iCell) + tend_theta_euler(k,iCell) + tend_rtheta_physics(k,iCell) end do end do - end subroutine atm_compute_dyn_tend + end subroutine atm_compute_dyn_tend_work -!------- - subroutine atm_compute_solve_diagnostics(dt, state, time_lev, diag, mesh, configs) + subroutine atm_compute_solve_diagnostics(dt, state, time_lev, diag, mesh, configs, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + rk_step ) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! Compute diagnostic fields used in the tendency computations ! @@ -3691,26 +5016,24 @@ subroutine atm_compute_solve_diagnostics(dt, state, time_lev, diag, mesh, config real (kind=RKIND), intent(in) :: dt type (mpas_pool_type), intent(inout) :: state integer, intent(in) :: time_lev ! which time level of state to use + integer, intent(in), optional :: rk_step ! which rk_step type (mpas_pool_type), intent(inout) :: diag type (mpas_pool_type), intent(in) :: mesh type (mpas_pool_type), intent(in) :: configs + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd - integer :: iEdge, iCell, iVertex, k, cell1, cell2, eoe, i - real (kind=RKIND) :: h_vertex, r - real (kind=RKIND) :: invAreaTriangle, r1, r2 - integer, pointer :: nCells, nEdges, nVertices, nVertLevels, vertexDegree - real (kind=RKIND), dimension(:), pointer :: fVertex, fEdge, dvEdge, dcEdge, areaCell, areaTriangle, invAreaCell - real (kind=RKIND), dimension(:,:), pointer :: vh, weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, & - circulation, vorticity, ke, pv_edge, pv_vertex, pv_cell, gradPVn, gradPVt, & + real (kind=RKIND), dimension(:), pointer :: fVertex, fEdge, invAreaTriangle, invAreaCell + real (kind=RKIND), dimension(:), pointer :: dvEdge, dcEdge, invDvEdge, invDcEdge + real (kind=RKIND), dimension(:,:), pointer :: weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, & + vorticity, ke, pv_edge, pv_vertex, pv_cell, gradPVn, gradPVt, & divergence - integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnVertex, verticesOnEdge, edgesOnCell, edgesOnEdge, edgesOnVertex + integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnVertex, verticesOnEdge, edgesOnCell, edgesOnEdge, edgesOnVertex, & + kiteForCell, verticesOnCell + real (kind=RKIND), dimension(:,:), pointer :: edgesOnVertex_sign, edgesOnCell_sign integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge - logical, parameter :: hollingsworth=.true. - real (kind=RKIND), allocatable, dimension(:,:) :: ke_vertex - real (kind=RKIND) :: ke_fact real (kind=RKIND), pointer :: config_apvm_upwinding @@ -3720,9 +5043,7 @@ subroutine atm_compute_solve_diagnostics(dt, state, time_lev, diag, mesh, config call mpas_pool_get_array(state, 'u', u, time_lev) call mpas_pool_get_array(diag, 'v', v) - call mpas_pool_get_array(diag, 'rv', vh) call mpas_pool_get_array(diag, 'rho_edge', h_edge) - call mpas_pool_get_array(diag, 'circulation', circulation) call mpas_pool_get_array(diag, 'vorticity', vorticity) call mpas_pool_get_array(diag, 'divergence', divergence) call mpas_pool_get_array(diag, 'ke', ke) @@ -3737,16 +5058,21 @@ subroutine atm_compute_solve_diagnostics(dt, state, time_lev, diag, mesh, config call mpas_pool_get_array(mesh, 'cellsOnEdge', cellsOnEdge) call mpas_pool_get_array(mesh, 'cellsOnVertex', cellsOnVertex) call mpas_pool_get_array(mesh, 'verticesOnEdge', verticesOnEdge) + call mpas_pool_get_array(mesh, 'verticesOnCell', verticesOnCell) call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) call mpas_pool_get_array(mesh, 'edgesOnCell', edgesOnCell) call mpas_pool_get_array(mesh, 'nEdgesOnEdge', nEdgesOnEdge) call mpas_pool_get_array(mesh, 'edgesOnEdge', edgesOnEdge) call mpas_pool_get_array(mesh, 'edgesOnVertex', edgesOnVertex) + call mpas_pool_get_array(mesh, 'edgesOnVertex_sign', edgesOnVertex_sign) + call mpas_pool_get_array(mesh, 'edgesOnCell_sign', edgesOnCell_sign) + call mpas_pool_get_array(mesh, 'kiteForCell', kiteForCell) call mpas_pool_get_array(mesh, 'dcEdge', dcEdge) call mpas_pool_get_array(mesh, 'dvEdge', dvEdge) - call mpas_pool_get_array(mesh, 'areaCell', areaCell) + call mpas_pool_get_array(mesh, 'invDcEdge', invDcEdge) + call mpas_pool_get_array(mesh, 'invDvEdge', invDvEdge) call mpas_pool_get_array(mesh, 'invAreaCell', invAreaCell) - call mpas_pool_get_array(mesh, 'areaTriangle', areaTriangle) + call mpas_pool_get_array(mesh, 'invAreaTriangle', invAreaTriangle) call mpas_pool_get_array(mesh, 'fVertex', fVertex) call mpas_pool_get_array(mesh, 'fEdge', fEdge) @@ -3756,33 +5082,124 @@ subroutine atm_compute_solve_diagnostics(dt, state, time_lev, diag, mesh, config call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) call mpas_pool_get_dimension(mesh, 'vertexDegree', vertexDegree) + call atm_compute_solve_diagnostics_work(nCells, nEdges, nVertices, & + vertexDegree, dt, config_apvm_upwinding, & + fVertex, fEdge, invAreaTriangle, invAreaCell, dvEdge, dcEdge, invDvEdge, invDcEdge, & + weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, vorticity, ke, pv_edge, pv_vertex, pv_cell, & + gradPVn, gradPVt, divergence, cellsOnEdge, cellsOnVertex, verticesOnEdge, edgesOnCell, edgesOnEdge, & + edgesOnVertex, kiteForCell, verticesOnCell, edgesOnVertex_sign, edgesOnCell_sign, nEdgesOnCell, nEdgesOnEdge, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + rk_step) + + end subroutine atm_compute_solve_diagnostics + + + subroutine atm_compute_solve_diagnostics_work(nCells, nEdges, nVertices, & + vertexDegree, dt, config_apvm_upwinding, & + fVertex, fEdge, invAreaTriangle, invAreaCell, dvEdge, dcEdge, invDvEdge, invDcEdge, & + weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, vorticity, ke, pv_edge, pv_vertex, pv_cell, & + gradPVn, gradPVt, divergence, cellsOnEdge, cellsOnVertex, verticesOnEdge, edgesOnCell, edgesOnEdge, & + edgesOnVertex, kiteForCell, verticesOnCell, edgesOnVertex_sign, edgesOnCell_sign, nEdgesOnCell, nEdgesOnEdge, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + rk_step) + + use mpas_atm_dimensions + + implicit none + + ! + ! Dummy arguments + ! + integer, intent(in) :: nCells, nEdges, nVertices, vertexDegree + real (kind=RKIND), intent(in) :: dt, config_apvm_upwinding + real (kind=RKIND), dimension(nVertices+1) :: fVertex + real (kind=RKIND), dimension(nEdges+1) :: fEdge + real (kind=RKIND), dimension(nVertices+1) :: invAreaTriangle + real (kind=RKIND), dimension(nCells+1) :: invAreaCell + real (kind=RKIND), dimension(nEdges+1) :: dvEdge + real (kind=RKIND), dimension(nEdges+1) :: dcEdge + real (kind=RKIND), dimension(nEdges+1) :: invDvEdge + real (kind=RKIND), dimension(nEdges+1) :: invDcEdge + real (kind=RKIND), dimension(maxEdges2,nEdges+1) :: weightsOnEdge + real (kind=RKIND), dimension(3,nVertices+1) :: kiteAreasOnVertex + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: h_edge + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: h + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: u + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: v + real (kind=RKIND), dimension(nVertLevels,nVertices+1) :: vorticity + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: ke + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: pv_edge + real (kind=RKIND), dimension(nVertLevels,nVertices+1) :: pv_vertex + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: pv_cell + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: gradPVn + real (kind=RKIND), dimension(nVertLevels,nEdges+1) :: gradPVt + real (kind=RKIND), dimension(nVertLevels,nCells+1) :: divergence + integer, dimension(2,nEdges+1) :: cellsOnEdge + integer, dimension(3,nVertices+1) :: cellsOnVertex + integer, dimension(2,nEdges+1) :: verticesOnEdge + integer, dimension(maxEdges,nCells+1) :: edgesOnCell + integer, dimension(maxEdges2,nEdges+1) :: edgesOnEdge + integer, dimension(3,nVertices+1) :: edgesOnVertex + integer, dimension(maxEdges,nCells+1) :: kiteForCell + integer, dimension(maxEdges,nCells+1) :: verticesOnCell + real (kind=RKIND), dimension(3,nVertices+1) :: edgesOnVertex_sign + real (kind=RKIND), dimension(maxEdges,nCells+1) :: edgesOnCell_sign + integer, dimension(nCells+1) :: nEdgesOnCell + integer, dimension(nEdges+1) :: nEdgesOnEdge + + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + + integer, intent(in), optional :: rk_step + + ! + ! Local variables + ! + integer :: iEdge, iCell, iVertex, k, cell1, cell2, eoe, i, j + real (kind=RKIND) :: h_vertex, r, s + real (kind=RKIND) :: r1, r2 + + logical, parameter :: hollingsworth=.true. + real (kind=RKIND) :: ke_fact, efac + logical :: reconstruct_v + ! ! Compute height on cell edges at velocity locations ! - do iEdge=1,nEdges + do iEdge=edgeStart,edgeEnd cell1 = cellsOnEdge(1,iEdge) cell2 = cellsOnEdge(2,iEdge) +!DIR$ IVDEP do k=1,nVertLevels h_edge(k,iEdge) = 0.5 * (h(k,cell1) + h(k,cell2)) end do - end do +! the first openmp barrier below is set so that ke_edge is computed +! it would be good to move this somewhere else? + + efac = dcEdge(iEdge)*dvEdge(iEdge) + do k=1,nVertLevels + ke_edge(k,iEdge) = efac*u(k,iEdge)**2 + end do + + end do ! ! Compute circulation and relative vorticity at each vertex ! - circulation(:,:) = 0.0 - do iEdge=1,nEdges - do k=1,nVertLevels - circulation(k,verticesOnEdge(1,iEdge)) = circulation(k,verticesOnEdge(1,iEdge)) - dcEdge(iEdge) * u(k,iEdge) - circulation(k,verticesOnEdge(2,iEdge)) = circulation(k,verticesOnEdge(2,iEdge)) + dcEdge(iEdge) * u(k,iEdge) + do iVertex=vertexStart,vertexEnd + vorticity(1:nVertLevels,iVertex) = 0.0 + do i=1,vertexDegree + iEdge = edgesOnVertex(i,iVertex) + s = edgesOnVertex_sign(i,iVertex) * dcEdge(iEdge) +!DIR$ IVDEP + do k=1,nVertLevels + vorticity(k,iVertex) = vorticity(k,iVertex) + s * u(k,iEdge) + end do end do - end do - do iVertex=1,nVertices - invAreaTriangle = 1.0_RKIND / areaTriangle(iVertex) +!DIR$ IVDEP do k=1,nVertLevels - vorticity(k,iVertex) = circulation(k,iVertex) * invAreaTriangle + vorticity(k,iVertex) = vorticity(k,iVertex) * invAreaTriangle(iVertex) end do end do @@ -3790,102 +5207,117 @@ subroutine atm_compute_solve_diagnostics(dt, state, time_lev, diag, mesh, config ! ! Compute the divergence at each cell center ! - divergence(:,:) = 0.0 - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - do k=1,nVertLevels - divergence(k,cell1) = divergence(k,cell1) + u(k,iEdge)*dvEdge(iEdge) - divergence(k,cell2) = divergence(k,cell2) - u(k,iEdge)*dvEdge(iEdge) + do iCell=cellStart,cellEnd + divergence(1:nVertLevels,iCell) = 0.0 + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) + s = edgesOnCell_sign(i,iCell) * dvEdge(iEdge) +!DIR$ IVDEP + do k=1,nVertLevels + divergence(k,iCell) = divergence(k,iCell) + s * u(k,iEdge) + end do end do - end do - do iCell = 1,nCells - r = 1.0 / areaCell(iCell) + r = invAreaCell(iCell) do k = 1,nVertLevels divergence(k,iCell) = divergence(k,iCell) * r end do end do +!$OMP BARRIER + ! ! Compute kinetic energy in each cell (Ringler et al JCP 2009) ! - ke(:,:) = 0.0 ! Replace 2.0 with 2 in exponentiation to avoid outside chance that ! compiler will actually allow "float raised to float" operation - do iCell=1,nCells + do iCell=cellStart,cellEnd + ke(1:nVertLevels,iCell) = 0.0 do i=1,nEdgesOnCell(iCell) iEdge = edgesOnCell(i,iCell) do k=1,nVertLevels - ke(k,iCell) = ke(k,iCell) + 0.25 * dcEdge(iEdge) * dvEdge(iEdge) * u(k,iEdge)**2 +! ke(k,iCell) = ke(k,iCell) + 0.25 * dcEdge(iEdge) * dvEdge(iEdge) * u(k,iEdge)**2 + ke(k,iCell) = ke(k,iCell) + 0.25 * ke_edge(k,iEdge) end do end do +!DIR$ IVDEP do k=1,nVertLevels ke(k,iCell) = ke(k,iCell) * invAreaCell(iCell) end do end do + if (hollingsworth) then ! Compute ke at cell vertices - AG's new KE construction, part 1 ! *** approximation here because we don't have inner triangle areas ! - allocate (ke_vertex(nVertLevels,nVertices)) - ! Precalculate inverse area triangle to avoid repeated divisions ! Replace 2.0 with 2 in exponentiation to avoid outside chance that ! compiler will actually allow "float raised to float" operation - do iVertex=1,nVertices - invAreaTriangle = 1.0_RKIND / areaTriangle(iVertex) + do iVertex=vertexStart,vertexEnd + r = 0.25 * invAreaTriangle(iVertex) do k=1,nVertLevels - ke_vertex(k,iVertex) = ( dcEdge(EdgesOnVertex(1,iVertex))*dvEdge(EdgesOnVertex(1,iVertex))*u(k,EdgesOnVertex(1,iVertex))**2 & - +dcEdge(EdgesOnVertex(2,iVertex))*dvEdge(EdgesOnVertex(2,iVertex))*u(k,EdgesOnVertex(2,iVertex))**2 & - +dcEdge(EdgesOnVertex(3,iVertex))*dvEdge(EdgesOnVertex(3,iVertex))*u(k,EdgesOnVertex(3,iVertex))**2 & - ) * 0.25 * invAreaTriangle + +! ke_vertex(k,iVertex) = ( dcEdge(EdgesOnVertex(1,iVertex))*dvEdge(EdgesOnVertex(1,iVertex))*u(k,EdgesOnVertex(1,iVertex))**2 & +! +dcEdge(EdgesOnVertex(2,iVertex))*dvEdge(EdgesOnVertex(2,iVertex))*u(k,EdgesOnVertex(2,iVertex))**2 & +! +dcEdge(EdgesOnVertex(3,iVertex))*dvEdge(EdgesOnVertex(3,iVertex))*u(k,EdgesOnVertex(3,iVertex))**2 & +! ) * r + + ke_vertex(k,iVertex) = ( ke_edge(k,EdgesOnVertex(1,iVertex))+ke_edge(k,EdgesOnVertex(2,iVertex))+ke_edge(k,EdgesOnVertex(3,iVertex)) )*r end do end do +!$OMP BARRIER + ! adjust ke at cell vertices - AG's new KE construction, part 2 ! ke_fact = 1.0 - .375 - do iCell=1,nCells + do iCell=cellStart,cellEnd do k=1,nVertLevels - ke(k,iCell) = ke_fact*ke(k,iCell) + ke(k,iCell) = ke_fact * ke(k,iCell) end do end do - ! Avoid FP errors caused by a potential division by zero below by - ! initializing the "garbage cell" of areaCell to a non-zero value - areaCell(nCells+1) = 1.0 - do iVertex = 1, nVertices - do i=1,vertexDegree - iCell = cellsOnVertex(i,iVertex) + do iCell=cellStart,cellEnd + r = invAreaCell(iCell) + do i=1,nEdgesOnCell(iCell) + iVertex = verticesOnCell(i,iCell) + j = kiteForCell(i,iCell) +!DIR$ IVDEP do k = 1,nVertLevels - ke(k,iCell) = ke(k,iCell) + (1.-ke_fact)*kiteAreasOnVertex(i, iVertex) * ke_vertex(k, iVertex) * invAreaCell(iCell) + ke(k,iCell) = ke(k,iCell) + (1.-ke_fact)*kiteAreasOnVertex(j,iVertex) * ke_vertex(k,iVertex) * r end do end do end do - deallocate (ke_vertex) end if ! ! Compute v (tangential) velocities following Thuburn et al JCP 2009 - ! - v(:,:) = 0.0 - do iEdge = 1,nEdges - do i=1,nEdgesOnEdge(iEdge) + ! The tangential velocity is only used to compute the Smagorinsky coefficient + + reconstruct_v = .true. + if(present(rk_step)) then + if(rk_step /= 3) reconstruct_v = .false. + end if + + if (reconstruct_v) then + do iEdge = edgeStart,edgeEnd + v(1:nVertLevels,iEdge) = 0.0 + do i=1,nEdgesOnEdge(iEdge) eoe = edgesOnEdge(i,iEdge) +!DIR$ IVDEP do k = 1,nVertLevels - v(k,iEdge) = v(k,iEdge) + weightsOnEdge(i,iEdge) * u(k, eoe) + v(k,iEdge) = v(k,iEdge) + weightsOnEdge(i,iEdge) * u(k, eoe) end do - end do - end do - + end do + end do + end if ! ! Compute height at vertices, pv at vertices, and average pv to edge locations @@ -3893,47 +5325,55 @@ subroutine atm_compute_solve_diagnostics(dt, state, time_lev, diag, mesh, config ! ! Avoid dividing h_vertex by areaTriangle and move areaTriangle into ! numerator for the pv_vertex calculation - do iVertex = 1,nVertices + do iVertex = vertexStart,vertexEnd +!DIR$ IVDEP do k=1,nVertLevels - h_vertex = 0.0 - do i=1,vertexDegree - h_vertex = h_vertex + h(k,cellsOnVertex(i,iVertex)) * kiteAreasOnVertex(i,iVertex) - end do - pv_vertex(k,iVertex) = (fVertex(iVertex) + vorticity(k,iVertex)) * areaTriangle(iVertex) / h_vertex +! +! the following commented code is for the PV conserving shallow water solver. +! h_vertex = 0.0 +! do i=1,vertexDegree +! h_vertex = h_vertex + h(k,cellsOnVertex(i,iVertex)) * kiteAreasOnVertex(i,iVertex) +! end do +! pv_vertex(k,iVertex) = (fVertex(iVertex) + vorticity(k,iVertex)) * areaTriangle(iVertex) / h_vertex + pv_vertex(k,iVertex) = (fVertex(iVertex) + vorticity(k,iVertex)) end do end do +!$OMP BARRIER ! ! Compute pv at the edges ! ( this computes pv_edge at all edges bounding real cells ) ! - pv_edge(:,:) = 0.0 - do iVertex = 1,nVertices - do i=1,vertexDegree - iEdge = edgesOnVertex(i,iVertex) - do k=1,nVertLevels - pv_edge(k,iEdge) = pv_edge(k,iEdge) + 0.5 * pv_vertex(k,iVertex) - end do + do iEdge = edgeStart,edgeEnd +!DIR$ IVDEP + do k=1,nVertLevels + pv_edge(k,iEdge) = 0.5 * (pv_vertex(k,verticesOnEdge(1,iEdge)) + pv_vertex(k,verticesOnEdge(2,iEdge))) end do end do + if (config_apvm_upwinding > 0.0) then + ! ! Compute pv at cell centers ! ( this computes pv_cell for all real cells ) + ! only needed for APVM upwinding ! - pv_cell(:,:) = 0.0 - do iVertex = 1, nVertices - do i=1,vertexDegree - iCell = cellsOnVertex(i,iVertex) + do iCell=cellStart,cellEnd + pv_cell(1:nVertLevels,iCell) = 0.0 + r = invAreaCell(iCell) + do i=1,nEdgesOnCell(iCell) + iVertex = verticesOnCell(i,iCell) + j = kiteForCell(i,iCell) +!DIR$ IVDEP do k = 1,nVertLevels - pv_cell(k,iCell) = pv_cell(k,iCell) + kiteAreasOnVertex(i, iVertex) * pv_vertex(k, iVertex) * invAreaCell(iCell) + pv_cell(k,iCell) = pv_cell(k,iCell) + kiteAreasOnVertex(j,iVertex) * pv_vertex(k,iVertex) * r end do end do end do - if (config_apvm_upwinding > 0.0) then +!$OMP BARRIER ! ! Modify PV edge with upstream bias. @@ -3949,25 +5389,26 @@ subroutine atm_compute_solve_diagnostics(dt, state, time_lev, diag, mesh, config ! Merged loops for calculating gradPVt, gradPVn and pv_edge ! Also precomputed inverses of dvEdge and dcEdge to avoid repeated divisions ! - do iEdge = 1,nEdges - r1 = 1.0_RKIND / dvEdge(iEdge) - r2 = 1.0_RKIND / dcEdge(iEdge) + r = config_apvm_upwinding * dt + do iEdge = edgeStart,edgeEnd + r1 = 1.0_RKIND * invDvEdge(iEdge) + r2 = 1.0_RKIND * invDcEdge(iEdge) +!DIR$ IVDEP do k = 1,nVertLevels gradPVt(k,iEdge) = (pv_vertex(k,verticesOnEdge(2,iEdge)) - pv_vertex(k,verticesOnEdge(1,iEdge))) * r1 gradPVn(k,iEdge) = (pv_cell(k,cellsOnEdge(2,iEdge)) - pv_cell(k,cellsOnEdge(1,iEdge))) * r2 - pv_edge(k,iEdge) = pv_edge(k,iEdge) - config_apvm_upwinding * dt * & - (v(k,iEdge) * gradPVt(k,iEdge) + u(k,iEdge) * gradPVn(k,iEdge)) + pv_edge(k,iEdge) = pv_edge(k,iEdge) - r * (v(k,iEdge) * gradPVt(k,iEdge) + u(k,iEdge) * gradPVn(k,iEdge)) end do end do end if ! apvm upwinding + end subroutine atm_compute_solve_diagnostics_work - end subroutine atm_compute_solve_diagnostics - -!---------- - subroutine atm_init_coupled_diagnostics( state, time_lev, diag, mesh, configs ) + subroutine atm_init_coupled_diagnostics(state, time_lev, diag, mesh, configs, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd) implicit none @@ -3976,14 +5417,16 @@ subroutine atm_init_coupled_diagnostics( state, time_lev, diag, mesh, configs ) type (mpas_pool_type), intent(inout) :: diag type (mpas_pool_type), intent(inout) :: mesh type (mpas_pool_type), intent(in) :: configs + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd - integer :: k, iCell, iEdge, iCell1, iCell2, cell1, cell2 - real (kind=RKIND), pointer :: coef_3rd_order - integer, pointer :: config_theta_adv_order + integer :: i, k, iCell, iEdge, cell1, cell2 integer, pointer :: nCells, nEdges, nVertLevels integer, pointer :: index_qv real (kind=RKIND) :: p0, rcv, flux - integer, dimension(:,:), pointer :: cellsOnEdge + integer, dimension(:), pointer :: nEdgesOnCell + integer, dimension(:,:), pointer :: cellsOnEdge, edgesOnCell + real (kind=RKIND), dimension(:,:), pointer :: edgesOnCell_sign real (kind=RKIND), dimension(:,:), pointer :: theta_m real (kind=RKIND), dimension(:,:), pointer :: theta real (kind=RKIND), dimension(:,:), pointer :: rho_zz @@ -4003,7 +5446,7 @@ subroutine atm_init_coupled_diagnostics( state, time_lev, diag, mesh, configs ) real (kind=RKIND), dimension(:,:), pointer :: exner real (kind=RKIND), dimension(:,:), pointer :: exner_base real (kind=RKIND), dimension(:), pointer :: fzm, fzp - real (kind=RKIND), dimension(:,:,:), pointer :: zb, zb3 + real (kind=RKIND), dimension(:,:,:), pointer :: zb, zb3, zb_cell, zb3_cell call mpas_pool_get_dimension(mesh, 'nCells', nCells) @@ -4012,9 +5455,9 @@ subroutine atm_init_coupled_diagnostics( state, time_lev, diag, mesh, configs ) call mpas_pool_get_dimension(state, 'index_qv', index_qv) call mpas_pool_get_array(mesh, 'cellsOnEdge', cellsOnEdge) - - call mpas_pool_get_config(configs, 'config_coef_3rd_order', coef_3rd_order) - call mpas_pool_get_config(configs, 'config_theta_adv_order', config_theta_adv_order) + call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) + call mpas_pool_get_array(mesh, 'edgesOnCell', edgesOnCell) + call mpas_pool_get_array(mesh, 'edgesOnCell_sign', edgesOnCell_sign) call mpas_pool_get_array(state, 'theta_m', theta_m, time_lev) call mpas_pool_get_array(diag, 'theta', theta) @@ -4038,31 +5481,36 @@ subroutine atm_init_coupled_diagnostics( state, time_lev, diag, mesh, configs ) call mpas_pool_get_array(mesh, 'fzp', fzp) call mpas_pool_get_array(mesh, 'zb', zb) call mpas_pool_get_array(mesh, 'zb3', zb3) + call mpas_pool_get_array(mesh, 'zb_cell', zb_cell) + call mpas_pool_get_array(mesh, 'zb3_cell', zb3_cell) - if (config_theta_adv_order /= 3) coef_3rd_order = 0.0 rcv = rgas / (cp-rgas) p0 = 1.e5 ! this should come from somewhere else... - do iCell=1,nCells + do iCell=cellStart,cellEnd do k=1,nVertLevels theta_m(k,iCell) = theta(k,iCell) * (1._RKIND + rvord * scalars(index_qv,k,iCell)) rho_zz(k,iCell) = rho(k,iCell) / zz(k,iCell) end do end do - do iEdge = 1, nEdges - iCell1 = cellsOnEdge(1,iEdge) - iCell2 = cellsOnEdge(2,iEdge) +!$OMP BARRIER + + do iEdge=edgeStart,edgeEnd + cell1 = cellsOnEdge(1,iEdge) + cell2 = cellsOnEdge(2,iEdge) do k=1,nVertLevels - ru(k,iEdge) = 0.5 * u(k,iEdge) * (rho_zz(k,iCell1) + rho_zz(k,iCell2)) + ru(k,iEdge) = 0.5 * u(k,iEdge) * (rho_zz(k,cell1) + rho_zz(k,cell2)) end do end do +!$OMP BARRIER + ! Compute rw (i.e. rho_zz * omega) from rho_zz, w, and ru. ! We are reversing the procedure we use in subroutine atm_recover_large_step_variables. ! first, the piece that depends on w. - do iCell=1,nCells + do iCell=cellStart,cellEnd rw(1,iCell) = 0.0 rw(nVertLevels+1,iCell) = 0.0 do k=2,nVertLevels @@ -4073,46 +5521,44 @@ subroutine atm_init_coupled_diagnostics( state, time_lev, diag, mesh, configs ) end do ! next, the piece that depends on ru - do iEdge=1,nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - do k = 2, nVertLevels + do iCell=cellStart,cellEnd + do i=1,nEdgesOnCell(iCell) + iEdge = edgesOnCell(i,iCell) + do k = 2,nVertLevels flux = (fzm(k)*ru(k,iEdge) + fzp(k)*ru(k-1,iEdge)) - rw(k,cell2) = rw(k,cell2) & - + (zb(k,2,iEdge) + coef_3rd_order * sign(1.0_RKIND,flux) * zb3(k,2,iEdge))*flux & - * (fzp(k) * zz(k-1,cell2) + fzm(k) * zz(k,cell2)) - rw(k,cell1) = rw(k,cell1) & - - (zb(k,1,iEdge) + coef_3rd_order * sign(1.0_RKIND,flux) * zb3(k,1,iEdge))*flux & - * (fzp(k) * zz(k-1,cell1) + fzm(k) * zz(k,cell1)) + rw(k,iCell) = rw(k,iCell) & + - edgesOnCell_sign(i,iCell) * (zb_cell(k,i,iCell) + sign(1.0_RKIND,flux) * zb3_cell(k,i,iCell))*flux & + * (fzp(k) * zz(k-1,iCell) + fzm(k) * zz(k,iCell)) + end do end do end do - do iCell = 1, nCells + do iCell=cellStart,cellEnd do k=1,nVertLevels rho_p(k,iCell) = rho_zz(k,iCell) - rho_base(k,iCell) end do end do - do iCell = 1, nCells + do iCell=cellStart,cellEnd do k=1,nVertLevels rtheta_base(k,iCell) = theta_base(k,iCell) * rho_base(k,iCell) end do end do - do iCell = 1, nCells + do iCell=cellStart,cellEnd do k=1,nVertLevels rtheta_p(k,iCell) = theta_m(k,iCell) * rho_p(k,iCell) & + rho_base(k,iCell) * (theta_m(k,iCell) - theta_base(k,iCell)) end do end do - do iCell=1,nCells + do iCell=cellStart,cellEnd do k=1,nVertLevels exner(k,iCell) = (zz(k,iCell) * (rgas/p0) * (rtheta_p(k,iCell) + rtheta_base(k,iCell)))**rcv end do end do - do iCell=1,nCells + do iCell=cellStart,cellEnd do k=1,nVertLevels pressure_p(k,iCell) = zz(k,iCell) * rgas & * ( exner(k,iCell) * rtheta_p(k,iCell) & @@ -4123,9 +5569,10 @@ subroutine atm_init_coupled_diagnostics( state, time_lev, diag, mesh, configs ) end subroutine atm_init_coupled_diagnostics -!--------------------------------------------------------------------------------------- - subroutine atm_rk_dynamics_substep_finish( state, diag, dynamics_substep, dynamics_split ) + subroutine atm_rk_dynamics_substep_finish( state, diag, dynamics_substep, dynamics_split, & + cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd, & + cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd) implicit none @@ -4138,7 +5585,11 @@ subroutine atm_rk_dynamics_substep_finish( state, diag, dynamics_substep, dynami type (mpas_pool_type), intent(inout) :: state type (mpas_pool_type), intent(inout) :: diag integer, intent(in) :: dynamics_substep, dynamics_split + integer, intent(in) :: cellStart, cellEnd, vertexStart, vertexEnd, edgeStart, edgeEnd + integer, intent(in) :: cellSolveStart, cellSolveEnd, vertexSolveStart, vertexSolveEnd, edgeSolveStart, edgeSolveEnd + real (kind=RKIND) :: inv_dynamics_split + real (kind=RKIND), dimension(:,:), pointer :: ru real (kind=RKIND), dimension(:,:), pointer :: ru_save real (kind=RKIND), dimension(:,:), pointer :: rw @@ -4177,35 +5628,375 @@ subroutine atm_rk_dynamics_substep_finish( state, diag, dynamics_substep, dynami call mpas_pool_get_array(state, 'rho_zz', rho_zz_1, 1) call mpas_pool_get_array(state, 'rho_zz', rho_zz_2, 2) + inv_dynamics_split = 1.0_RKIND / real(dynamics_split) + if (dynamics_substep < dynamics_split) then - ru_save(:,:) = ru(:,:) - rw_save(:,:) = rw(:,:) - rtheta_p_save(:,:) = rtheta_p(:,:) - rho_p_save(:,:) = rho_p(:,:) + ru_save(:,edgeStart:edgeEnd) = ru(:,edgeStart:edgeEnd) + rw_save(:,cellStart:cellEnd) = rw(:,cellStart:cellEnd) + rtheta_p_save(:,cellStart:cellEnd) = rtheta_p(:,cellStart:cellEnd) + rho_p_save(:,cellStart:cellEnd) = rho_p(:,cellStart:cellEnd) - u_1(:,:) = u_2(:,:) - w_1(:,:) = w_2(:,:) - theta_m_1(:,:) = theta_m_2(:,:) - rho_zz_1(:,:) = rho_zz_2(:,:) + u_1(:,edgeStart:edgeEnd) = u_2(:,edgeStart:edgeEnd) + w_1(:,cellStart:cellEnd) = w_2(:,cellStart:cellEnd) + theta_m_1(:,cellStart:cellEnd) = theta_m_2(:,cellStart:cellEnd) + rho_zz_1(:,cellStart:cellEnd) = rho_zz_2(:,cellStart:cellEnd) end if if (dynamics_substep == 1) then - ruAvg_split(:,:) = ruAvg(:,:) - wwAvg_split(:,:) = wwAvg(:,:) + ruAvg_split(:,edgeStart:edgeEnd) = ruAvg(:,edgeStart:edgeEnd) + wwAvg_split(:,cellStart:cellEnd) = wwAvg(:,cellStart:cellEnd) else - ruAvg_split(:,:) = ruAvg(:,:)+ruAvg_split(:,:) - wwAvg_split(:,:) = wwAvg(:,:)+wwAvg_split(:,:) + ruAvg_split(:,edgeStart:edgeEnd) = ruAvg(:,edgeStart:edgeEnd)+ruAvg_split(:,edgeStart:edgeEnd) + wwAvg_split(:,cellStart:cellEnd) = wwAvg(:,cellStart:cellEnd)+wwAvg_split(:,cellStart:cellEnd) end if if (dynamics_substep == dynamics_split) then - ruAvg(:,:) = ruAvg_split(:,:)/real(dynamics_split) - wwAvg(:,:) = wwAvg_split(:,:)/real(dynamics_split) - rho_zz_1(:,:) = rho_zz_old_split(:,:) + ruAvg(:,edgeStart:edgeEnd) = ruAvg_split(:,edgeStart:edgeEnd) * inv_dynamics_split + wwAvg(:,cellStart:cellEnd) = wwAvg_split(:,cellStart:cellEnd) * inv_dynamics_split + rho_zz_1(:,cellStart:cellEnd) = rho_zz_old_split(:,cellStart:cellEnd) end if end subroutine atm_rk_dynamics_substep_finish + subroutine summarize_timestep(domain) + + use ieee_arithmetic, only : ieee_is_nan + + implicit none + + type (domain_type), intent(inout) :: domain + + real (kind=RKIND), parameter :: pi_const = 2.0_RKIND*asin(1.0_RKIND) + + logical, pointer :: config_print_global_minmax_vel + logical, pointer :: config_print_detailed_minmax_vel + logical, pointer :: config_print_global_minmax_sca + + type (block_type), pointer :: block + + integer :: iCell, k, iEdge, iScalar + integer, pointer :: num_scalars, nCellsSolve, nEdgesSolve, nVertLevels + + type (mpas_pool_type), pointer :: state + type (mpas_pool_type), pointer :: diag + type (mpas_pool_type), pointer :: mesh + + real (kind=RKIND) :: scalar_min, scalar_max + real (kind=RKIND) :: global_scalar_min, global_scalar_max + + real (kind=RKIND), dimension(:), pointer :: latCell + real (kind=RKIND), dimension(:), pointer :: lonCell + real (kind=RKIND), dimension(:), pointer :: latEdge + real (kind=RKIND), dimension(:), pointer :: lonEdge + integer, dimension(:), pointer :: indexToCellID + integer :: indexMax, indexMax_global + integer :: kMax, kMax_global + real (kind=RKIND) :: latMax, latMax_global + real (kind=RKIND) :: lonMax, lonMax_global + real (kind=RKIND), dimension(5) :: localVals, globalVals + + real (kind=RKIND) :: spd + real (kind=RKIND), dimension(:,:), pointer :: w + real (kind=RKIND), dimension(:,:), pointer :: u, v, uReconstructZonal, uReconstructMeridional, uReconstructX, uReconstructY, uReconstructZ + real (kind=RKIND), dimension(:,:,:), pointer :: scalars, scalars_1, scalars_2 + + call mpas_pool_get_config(domain % blocklist % configs, 'config_print_global_minmax_vel', config_print_global_minmax_vel) + call mpas_pool_get_config(domain % blocklist % configs, 'config_print_detailed_minmax_vel', config_print_detailed_minmax_vel) + call mpas_pool_get_config(domain % blocklist % configs, 'config_print_global_minmax_sca', config_print_global_minmax_sca) + + if (config_print_detailed_minmax_vel) then + write(0,*) ' ' + + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'state', state) + call mpas_pool_get_subpool(block % structs, 'diag', diag) + call mpas_pool_get_subpool(block % structs, 'mesh', mesh) + + call mpas_pool_get_array(state, 'w', w, 2) + call mpas_pool_get_array(state, 'u', u, 2) + call mpas_pool_get_array(diag, 'v', v) + call mpas_pool_get_array(mesh, 'indexToCellID', indexToCellID) + call mpas_pool_get_array(mesh, 'latCell', latCell) + call mpas_pool_get_array(mesh, 'lonCell', lonCell) + call mpas_pool_get_array(mesh, 'latEdge', latEdge) + call mpas_pool_get_array(mesh, 'lonEdge', lonEdge) + call mpas_pool_get_dimension(state, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(state, 'nEdgesSolve', nEdgesSolve) + call mpas_pool_get_dimension(state, 'nVertLevels', nVertLevels) + + scalar_min = 1.0e20 + indexMax = -1 + kMax = -1 + latMax = 0.0 + lonMax = 0.0 + do iCell = 1, nCellsSolve + do k = 1, nVertLevels + if (w(k,iCell) < scalar_min) then + scalar_min = w(k,iCell) + indexMax = iCell + kMax = k + latMax = latCell(iCell) + lonMax = lonCell(iCell) + end if + end do + end do + localVals(1) = scalar_min + localVals(2) = real(indexMax,kind=RKIND) + localVals(3) = real(kMax,kind=RKIND) + localVals(4) = latMax + localVals(5) = lonMax + call mpas_dmpar_minattributes_real(domain % dminfo, scalar_min, localVals, globalVals) + global_scalar_min = globalVals(1) + indexMax_global = int(globalVals(2)) + kMax_global = int(globalVals(3)) + latMax_global = globalVals(4) + lonMax_global = globalVals(5) + latMax_global = latMax_global * 180.0_RKIND / pi_const + lonMax_global = lonMax_global * 180.0_RKIND / pi_const + if (lonMax_global > 180.0) then + lonMax_global = lonMax_global - 360.0 + end if + write(0,'(a,f9.4,a,i4,a,f7.3,a,f8.3,a)') ' global min w: ', global_scalar_min, & + ' k=', kMax_global, ', ', latMax_global, ' lat ', lonMax_global, ' lon' + + scalar_max = -1.0e20 + indexMax = -1 + kMax = -1 + latMax = 0.0 + lonMax = 0.0 + do iCell = 1, nCellsSolve + do k = 1, nVertLevels + if (w(k,iCell) > scalar_max) then + scalar_max = w(k,iCell) + indexMax = iCell + kMax = k + latMax = latCell(iCell) + lonMax = lonCell(iCell) + end if + end do + end do + localVals(1) = scalar_max + localVals(2) = real(indexMax,kind=RKIND) + localVals(3) = real(kMax,kind=RKIND) + localVals(4) = latMax + localVals(5) = lonMax + call mpas_dmpar_maxattributes_real(domain % dminfo, scalar_max, localVals, globalVals) + global_scalar_max = globalVals(1) + indexMax_global = int(globalVals(2)) + kMax_global = int(globalVals(3)) + latMax_global = globalVals(4) + lonMax_global = globalVals(5) + latMax_global = latMax_global * 180.0_RKIND / pi_const + lonMax_global = lonMax_global * 180.0_RKIND / pi_const + if (lonMax_global > 180.0) then + lonMax_global = lonMax_global - 360.0 + end if + write(0,'(a,f9.4,a,i4,a,f7.3,a,f8.3,a)') ' global max w: ', global_scalar_max, & + ' k=', kMax_global, ', ', latMax_global, ' lat ', lonMax_global, ' lon' + + scalar_min = 1.0e20 + indexMax = -1 + kMax = -1 + latMax = 0.0 + lonMax = 0.0 + do iEdge = 1, nEdgesSolve + do k = 1, nVertLevels + if (u(k,iEdge) < scalar_min) then + scalar_min = u(k,iEdge) + indexMax = iEdge + kMax = k + latMax = latEdge(iEdge) + lonMax = lonEdge(iEdge) + end if + end do + end do + localVals(1) = scalar_min + localVals(2) = real(indexMax,kind=RKIND) + localVals(3) = real(kMax,kind=RKIND) + localVals(4) = latMax + localVals(5) = lonMax + call mpas_dmpar_minattributes_real(domain % dminfo, scalar_min, localVals, globalVals) + global_scalar_min = globalVals(1) + indexMax_global = int(globalVals(2)) + kMax_global = int(globalVals(3)) + latMax_global = globalVals(4) + lonMax_global = globalVals(5) + latMax_global = latMax_global * 180.0_RKIND / pi_const + lonMax_global = lonMax_global * 180.0_RKIND / pi_const + if (lonMax_global > 180.0) then + lonMax_global = lonMax_global - 360.0 + end if + write(0,'(a,f9.4,a,i4,a,f7.3,a,f8.3,a)') ' global min u: ', global_scalar_min, & + ' k=', kMax_global, ', ', latMax_global, ' lat ', lonMax_global, ' lon' + + scalar_max = -1.0e20 + indexMax = -1 + kMax = -1 + latMax = 0.0 + lonMax = 0.0 + do iEdge = 1, nEdgesSolve + do k = 1, nVertLevels + if (u(k,iEdge) > scalar_max) then + scalar_max = u(k,iEdge) + indexMax = iEdge + kMax = k + latMax = latEdge(iEdge) + lonMax = lonEdge(iEdge) + end if + end do + end do + localVals(1) = scalar_max + localVals(2) = real(indexMax,kind=RKIND) + localVals(3) = real(kMax,kind=RKIND) + localVals(4) = latMax + localVals(5) = lonMax + call mpas_dmpar_maxattributes_real(domain % dminfo, scalar_max, localVals, globalVals) + global_scalar_max = globalVals(1) + indexMax_global = int(globalVals(2)) + kMax_global = int(globalVals(3)) + latMax_global = globalVals(4) + lonMax_global = globalVals(5) + latMax_global = latMax_global * 180.0_RKIND / pi_const + lonMax_global = lonMax_global * 180.0_RKIND / pi_const + if (lonMax_global > 180.0) then + lonMax_global = lonMax_global - 360.0 + end if + write(0,'(a,f9.4,a,i4,a,f7.3,a,f8.3,a)') ' global max u: ', global_scalar_max, & + ' k=', kMax_global, ', ', latMax_global, ' lat ', lonMax_global, ' lon' + + scalar_max = -1.0e20 + indexMax = -1 + kMax = -1 + latMax = 0.0 + lonMax = 0.0 + do iEdge = 1, nEdgesSolve + do k = 1, nVertLevels + spd = sqrt(u(k,iEdge)*u(k,iEdge) + v(k,iEdge)*v(k,iEdge)) + if (spd > scalar_max) then + scalar_max = spd + indexMax = iEdge + kMax = k + latMax = latEdge(iEdge) + lonMax = lonEdge(iEdge) + end if + end do + end do + localVals(1) = scalar_max + localVals(2) = real(indexMax,kind=RKIND) + localVals(3) = real(kMax,kind=RKIND) + localVals(4) = latMax + localVals(5) = lonMax + call mpas_dmpar_maxattributes_real(domain % dminfo, scalar_max, localVals, globalVals) + global_scalar_max = globalVals(1) + indexMax_global = int(globalVals(2)) + kMax_global = int(globalVals(3)) + latMax_global = globalVals(4) + lonMax_global = globalVals(5) + latMax_global = latMax_global * 180.0_RKIND / pi_const + lonMax_global = lonMax_global * 180.0_RKIND / pi_const + if (lonMax_global > 180.0) then + lonMax_global = lonMax_global - 360.0 + end if + write(0,'(a,f9.4,a,i4,a,f7.3,a,f8.3,a)') ' global max wsp: ', global_scalar_max, & + ' k=', kMax_global, ', ', latMax_global, ' lat ', lonMax_global, ' lon' + + ! + ! Check for NaNs + ! + do iCell = 1, nCellsSolve + do k = 1, nVertLevels + if (ieee_is_nan(w(k,iCell))) then + call mpas_dmpar_global_abort('NaN detected in ''w'' field.') + end if + end do + end do + + do iEdge = 1, nEdgesSolve + do k = 1, nVertLevels + if (ieee_is_nan(u(k,iEdge))) then + call mpas_dmpar_global_abort('NaN detected in ''u'' field.') + end if + end do + end do + + block => block % next + end do + + else if (config_print_global_minmax_vel) then + write(0,*) ' ' + + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'state', state) + + call mpas_pool_get_array(state, 'w', w, 2) + call mpas_pool_get_array(state, 'u', u, 2) + call mpas_pool_get_dimension(state, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(state, 'nEdgesSolve', nEdgesSolve) + call mpas_pool_get_dimension(state, 'nVertLevels', nVertLevels) + + scalar_min = 0.0 + scalar_max = 0.0 + do iCell = 1, nCellsSolve + do k = 1, nVertLevels + scalar_min = min(scalar_min, w(k,iCell)) + scalar_max = max(scalar_max, w(k,iCell)) + end do + end do + call mpas_dmpar_min_real(domain % dminfo, scalar_min, global_scalar_min) + call mpas_dmpar_max_real(domain % dminfo, scalar_max, global_scalar_max) + write(0,*) 'global min, max w ', global_scalar_min, global_scalar_max + + scalar_min = 0.0 + scalar_max = 0.0 + do iEdge = 1, nEdgesSolve + do k = 1, nVertLevels + scalar_min = min(scalar_min, u(k,iEdge)) + scalar_max = max(scalar_max, u(k,iEdge)) + end do + end do + call mpas_dmpar_min_real(domain % dminfo, scalar_min, global_scalar_min) + call mpas_dmpar_max_real(domain % dminfo, scalar_max, global_scalar_max) + write(0,*) 'global min, max u ', global_scalar_min, global_scalar_max + + block => block % next + end do + end if + + if (config_print_global_minmax_sca) then + if (.not. (config_print_global_minmax_vel .or. config_print_detailed_minmax_vel)) write(0,*) ' ' + + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'state', state) + + call mpas_pool_get_array(state, 'scalars', scalars, 2) + call mpas_pool_get_dimension(state, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(state, 'nVertLevels', nVertLevels) + call mpas_pool_get_dimension(state, 'num_scalars', num_scalars) + + do iScalar = 1, num_scalars + scalar_min = 0.0 + scalar_max = 0.0 + do iCell = 1, nCellsSolve + do k = 1, nVertLevels + scalar_min = min(scalar_min, scalars(iScalar,k,iCell)) + scalar_max = max(scalar_max, scalars(iScalar,k,iCell)) + end do + end do + call mpas_dmpar_min_real(domain % dminfo, scalar_min, global_scalar_min) + call mpas_dmpar_max_real(domain % dminfo, scalar_max, global_scalar_max) + write(0,'(a,i4,2(1x,e17.10))') ' global min, max scalar ', iScalar, global_scalar_min, global_scalar_max + end do + + block => block % next + end do + end if + + end subroutine summarize_timestep + end module atm_time_integration diff --git a/src/core_atmosphere/mpas_atm_core.F b/src/core_atmosphere/mpas_atm_core.F index f4d6d2b827..66a2d90242 100644 --- a/src/core_atmosphere/mpas_atm_core.F +++ b/src/core_atmosphere/mpas_atm_core.F @@ -10,6 +10,7 @@ module atm_core use mpas_derived_types use mpas_pool_routines use mpas_dmpar + use mpas_abort, only : mpas_dmpar_global_abort type (MPAS_Clock_type), pointer :: clock @@ -22,6 +23,9 @@ function atm_core_init(domain, startTimeStamp) result(ierr) use mpas_timekeeping use mpas_kind_types use mpas_stream_manager + use mpas_atm_dimensions, only : mpas_atm_set_dims + use mpas_atm_diagnostics_manager, only : mpas_atm_diag_setup + use mpas_atm_threading, only : mpas_atm_threading_init implicit none @@ -35,7 +39,7 @@ function atm_core_init(domain, startTimeStamp) result(ierr) character(len=StrKIND) :: timeStamp integer :: i logical, pointer :: config_do_restart - + type (mpas_pool_type), pointer :: state type (mpas_pool_type), pointer :: mesh type (mpas_pool_type), pointer :: diag @@ -43,9 +47,30 @@ function atm_core_init(domain, startTimeStamp) result(ierr) character (len=StrKIND), pointer :: xtime type (MPAS_Time_Type) :: startTime + integer, pointer :: nVertLevels, maxEdges, maxEdges2, num_scalars + ierr = 0 + ! + ! Setup threading + ! + call mpas_atm_threading_init(domain % blocklist, ierr) + if ( ierr /= 0 ) then + call mpas_dmpar_global_abort('ERROR: Threading setup failed for core '//trim(domain % core % coreName)) + end if + + + ! + ! Set up inner dimensions used by arrays in optimized dynamics routines + ! + call mpas_pool_get_subpool(domain % blocklist % structs, 'state', state) + call mpas_pool_get_dimension(state, 'nVertLevels', nVertLevels) + call mpas_pool_get_dimension(state, 'maxEdges', maxEdges) + call mpas_pool_get_dimension(state, 'maxEdges2', maxEdges2) + call mpas_pool_get_dimension(state, 'num_scalars', num_scalars) + call mpas_atm_set_dims(nVertLevels, maxEdges, maxEdges2, num_scalars) + ! ! Set "local" clock to point to the clock contained in the domain type ! @@ -55,7 +80,7 @@ function atm_core_init(domain, startTimeStamp) result(ierr) call mpas_pool_get_config(domain % blocklist % configs, 'config_do_restart', config_do_restart) call mpas_pool_get_config(domain % blocklist % configs, 'config_dt', dt) - + ! ! If this is a restart run, read the restart stream, else read the input ! stream. @@ -69,9 +94,10 @@ function atm_core_init(domain, startTimeStamp) result(ierr) call MPAS_stream_mgr_read(domain % streamManager, streamID='input', ierr=ierr) end if if (ierr /= MPAS_STREAM_MGR_NOERR) then - write(0,*) ' ' - write(0,*) '********************************************************************************' - write(0,*) 'Error reading initial conditions' + call mpas_dmpar_global_abort('********************************************************************************', & + deferredAbort=.true.) + call mpas_dmpar_global_abort('Error reading initial conditions', & + deferredAbort=.true.) call mpas_dmpar_global_abort('********************************************************************************') end if call MPAS_stream_mgr_reset_alarms(domain % streamManager, streamID='input', direction=MPAS_STREAM_INPUT, ierr=ierr) @@ -94,6 +120,21 @@ function atm_core_init(domain, startTimeStamp) result(ierr) end do end if + ! + ! Read a new data stream for Incremental Analysis Update (IAU), if config_IAU_option /= 'off' : HA (June-15-2016) + ! FIXME: should I check xtime for the IAU fields? Maybe not. + ! Note: Because the 'iau' stream has the 'iau' package attached to it, the MPAS_stream_mgr_read( ) + ! call here will actually try to read the stream only if IAU is being used in the run. + ! + call MPAS_stream_mgr_read(domain % streamManager, streamID='iau', whence=MPAS_STREAM_NEAREST, ierr=ierr) + if (ierr /= MPAS_STREAM_MGR_NOERR) then + call mpas_dmpar_global_abort('********************************************************************************', & + deferredAbort=.true.) + call mpas_dmpar_global_abort('Error reading IAU files', & + deferredAbort=.true.) + call mpas_dmpar_global_abort('********************************************************************************') + end if + call MPAS_stream_mgr_reset_alarms(domain % streamManager, streamID='iau', ierr=ierr) ! ! Set startTimeStamp based on the start time of the simulation clock @@ -129,6 +170,9 @@ function atm_core_init(domain, startTimeStamp) result(ierr) call mpas_pool_get_field(diag, 'rw', rw_field) call mpas_dmpar_exch_halo_field(rw_field) + call mpas_atm_diag_setup(domain % streamManager, domain % blocklist % configs, & + domain % blocklist % structs, domain % clock, domain % dminfo) + end function atm_core_init @@ -178,7 +222,7 @@ subroutine atm_simulation_clock_init(core_clock, configs, ierr) if (trim(config_stop_time) /= "none") then call mpas_set_time(curr_time=stopTime, dateTimeString=config_stop_time, ierr=local_err) if(startTime + runduration /= stopTime) then - write(0,*) 'Warning: config_run_duration and config_stop_time are inconsitent: using config_run_duration.' + write(0,*) 'Warning: config_run_duration and config_stop_time are inconsistent: using config_run_duration.' end if end if else if (trim(config_stop_time) /= "none") then @@ -208,7 +252,7 @@ subroutine atm_mpas_init_block(dminfo, stream_manager, block, mesh, dt) use mpas_atmphys_init use mpas_atmphys_manager #endif - + implicit none type (dm_info), intent(in) :: dminfo @@ -224,35 +268,124 @@ subroutine atm_mpas_init_block(dminfo, stream_manager, block, mesh, dt) type (mpas_pool_type), pointer :: diag_physics type (mpas_pool_type), pointer :: atm_input - integer :: iCell + integer :: iCell,iEdge,iVertex real (kind=RKIND), dimension(:,:), pointer :: u, uReconstructX, uReconstructY, uReconstructZ, uReconstructZonal, uReconstructMeridional - real (kind=RKIND), dimension(:), pointer :: meshScalingDel2, meshScalingDel4, areaCell, invAreaCell + real (kind=RKIND), dimension(:), pointer :: meshScalingDel2, meshScalingDel4 + real (kind=RKIND), dimension(:), pointer :: areaCell, invAreaCell + real (kind=RKIND), dimension(:), pointer :: dvEdge, invDvEdge + real (kind=RKIND), dimension(:), pointer :: dcEdge, invDcEdge + real (kind=RKIND), dimension(:), pointer :: areaTriangle, invAreaTriangle + integer, pointer :: nCells, nEdges, nVertices, nVertLevels, nEdgesSolve + integer :: thread character(len=StrKIND), pointer :: mminlu - integer, pointer :: nEdgesSolve, nCells - + integer, pointer :: nThreads + integer, dimension(:), pointer :: cellThreadStart, cellThreadEnd + integer, dimension(:), pointer :: cellSolveThreadStart, cellSolveThreadEnd + integer, dimension(:), pointer :: edgeThreadStart, edgeThreadEnd + integer, dimension(:), pointer :: edgeSolveThreadStart, edgeSolveThreadEnd + integer, dimension(:), pointer :: vertexThreadStart, vertexThreadEnd + integer, dimension(:), pointer :: vertexSolveThreadStart, vertexSolveThreadEnd + + logical, pointer :: config_do_restart, config_do_DAcycling + call atm_compute_signs(mesh) call mpas_pool_get_subpool(block % structs, 'diag', diag) call mpas_pool_get_subpool(block % structs, 'state', state) + call mpas_pool_get_subpool(block % structs, 'state', state) + call mpas_pool_get_config(block % configs, 'config_do_restart', config_do_restart) call mpas_pool_get_config(block % configs, 'config_do_DAcycling', config_do_DAcycling) + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !!!! Compute inverses to avoid divides later + call mpas_pool_get_array(mesh, 'areaCell', areaCell) call mpas_pool_get_array(mesh, 'invAreaCell', invAreaCell) - call mpas_pool_get_dimension(mesh, 'nCells', nCells) - + + call mpas_pool_get_array(mesh, 'dvEdge', dvEdge) + call mpas_pool_get_array(mesh, 'dcEdge', dcEdge) + call mpas_pool_get_array(mesh, 'invDvEdge', invDvEdge) + call mpas_pool_get_array(mesh, 'invDcEdge', invDcEdge) + + call mpas_pool_get_array(mesh, 'areaTriangle', areaTriangle) + call mpas_pool_get_array(mesh, 'invAreaTriangle', invAreaTriangle) + + call mpas_pool_get_dimension(mesh, 'nCells', nCells) + call mpas_pool_get_dimension(mesh, 'nEdges', nEdges) + call mpas_pool_get_dimension(mesh, 'nVertices', nVertices) + do iCell=1,nCells invAreaCell(iCell) = 1.0_RKIND / areaCell(iCell) end do + + do iEdge=1,nEdges + invDvEdge(iEdge) = 1.0_RKIND / dvEdge(iEdge) + end do + + do iEdge=1,nEdges + invDcEdge(iEdge) = 1.0_RKIND / dcEdge(iEdge) + end do + + do iVertex=1,nVertices + invAreaTriangle(iVertex) = 1.0_RKIND / areaTriangle(iVertex) + end do + + !!!!! End compute inverses + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - if (.not. config_do_restart .or. (config_do_restart .and. config_do_DAcycling)) then - call atm_init_coupled_diagnostics( state, 1, diag, mesh, block % configs) - end if - call atm_compute_solve_diagnostics(dt, state, 1, diag, mesh, block % configs) + call atm_adv_coef_compression(mesh) + + call atm_couple_coef_3rd_order(mesh, block % configs) + + call mpas_pool_get_dimension(state, 'nVertices', nVertices) + call mpas_pool_get_dimension(state, 'nVertLevels', nVertLevels) + + allocate(ke_vertex(nVertLevels,nVertices+1)) ! ke_vertex is a module variable defined in mpas_atm_time_integration.F + allocate(ke_edge(nVertLevels,nEdges+1)) ! ke_edge is a module variable defined in mpas_atm_time_integration.F + + call mpas_pool_get_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_get_dimension(block % dimensions, 'cellThreadStart', cellThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellThreadEnd', cellThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadStart', cellSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadEnd', cellSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadStart', vertexThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexThreadEnd', vertexThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadStart', vertexSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'vertexSolveThreadEnd', vertexSolveThreadEnd) + + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadStart', edgeThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeThreadEnd', edgeThreadEnd) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadStart', edgeSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'edgeSolveThreadEnd', edgeSolveThreadEnd) + +!$OMP PARALLEL DO + do thread=1,nThreads + if (.not. config_do_restart .or. (config_do_restart .and. config_do_DAcycling)) then + call atm_init_coupled_diagnostics(state, 1, diag, mesh, block % configs, & + cellThreadStart(thread), cellThreadEnd(thread), & + vertexThreadStart(thread), vertexThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread), & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread), & + vertexSolveThreadStart(thread), vertexSolveThreadEnd(thread), & + edgeSolveThreadStart(thread), edgeSolveThreadEnd(thread)) + end if + + call atm_compute_solve_diagnostics(dt, state, 1, diag, mesh, block % configs, & + cellThreadStart(thread), cellThreadEnd(thread), & + vertexThreadStart(thread), vertexThreadEnd(thread), & + edgeThreadStart(thread), edgeThreadEnd(thread)) + end do +!$OMP END PARALLEL DO + + deallocate(ke_vertex) + deallocate(ke_edge) call mpas_rbf_interp_initialize(mesh) call mpas_init_reconstruct(mesh) @@ -272,10 +405,6 @@ subroutine atm_mpas_init_block(dminfo, stream_manager, block, mesh, dt) ) #ifdef DO_PHYSICS - !check that all the physics options are correctly defined and that at least one physics - !parameterization is called (using the logical moist_physics): - call physics_namelist_check(mesh, block % configs) - !proceed with initialization of physics parameterization if moist_physics is set to true: call mpas_pool_get_subpool(block % structs, 'sfc_input', sfc_input) @@ -295,6 +424,7 @@ subroutine atm_mpas_init_block(dminfo, stream_manager, block, mesh, dt) call mpas_pool_get_subpool(block % structs, 'tend', tend) call mpas_pool_get_subpool(block % structs, 'diag_physics', diag_physics) call mpas_pool_get_subpool(block % structs, 'atm_input', atm_input) + call physics_tables_init(dminfo, block % configs) call physics_registry_init(mesh, block % configs, sfc_input) call physics_run_init(block % configs, mesh, state, clock, stream_manager) @@ -308,8 +438,6 @@ subroutine atm_mpas_init_block(dminfo, stream_manager, block, mesh, dt) call atm_compute_damping_coefs(mesh, block % configs) - call atm_compute_pgf_coefs(mesh, block % configs) - call mpas_pool_get_dimension(mesh, 'nEdgesSolve', nEdgesSolve) call mpas_pool_get_array(mesh, 'meshScalingDel2', meshScalingDel2) call mpas_pool_get_array(mesh, 'meshScalingDel4', meshScalingDel4) @@ -319,8 +447,6 @@ subroutine atm_mpas_init_block(dminfo, stream_manager, block, mesh, dt) write(0,*) 'min/max of meshScalingDel4 = ', minval(meshScalingDel4(1:nEdgesSolve)), & maxval(meshScalingDel4(1:nEdgesSolve)) - call atm_adv_coef_compression(mesh) - end subroutine atm_mpas_init_block @@ -331,6 +457,7 @@ function atm_core_run(domain) result(ierr) use mpas_stream_manager use mpas_derived_types, only : MPAS_STREAM_LATEST_BEFORE, MPAS_STREAM_INPUT, MPAS_STREAM_INPUT_OUTPUT use mpas_timer + use mpas_atm_diagnostics_manager, only : mpas_atm_diag_update, mpas_atm_diag_compute, mpas_atm_diag_reset implicit none @@ -349,13 +476,19 @@ function atm_core_run(domain) result(ierr) integer :: stream_dir character(len=StrKIND) :: input_stream, read_time - type (mpas_pool_type), pointer :: state, diag, diag_physics, mesh + type (mpas_pool_type), pointer :: state, diag, mesh, diag_physics, tend, tend_physics ! For high-frequency diagnostics output character (len=StrKIND) :: tempfilename + ! For timing information + real (kind=R8KIND) :: integ_start_time, integ_stop_time + real (kind=R8KIND) :: diag_start_time, diag_stop_time + real (kind=R8KIND) :: input_start_time, input_stop_time + real (kind=R8KIND) :: output_start_time, output_stop_time + ierr = 0 - + ! Eventually, dt should be domain specific call mpas_pool_get_config(domain % blocklist % configs, 'config_dt', dt) call mpas_pool_get_config(domain % blocklist % configs, 'config_do_restart', config_do_restart) @@ -370,6 +503,8 @@ function atm_core_run(domain) result(ierr) call MPAS_stream_mgr_reset_alarms(domain % streamManager, streamID='diagnostics', direction=MPAS_STREAM_OUTPUT, ierr=ierr) end if + call mpas_dmpar_get_time(diag_start_time) + if (MPAS_stream_mgr_ringing_alarms(domain % streamManager, direction=MPAS_STREAM_OUTPUT, ierr=ierr)) then block_ptr => domain % blocklist do while (associated(block_ptr)) @@ -382,17 +517,38 @@ function atm_core_run(domain) result(ierr) block_ptr => block_ptr % next end do end if + call mpas_atm_diag_reset() + call mpas_atm_diag_update() + call mpas_atm_diag_compute() + + call mpas_dmpar_get_time(diag_stop_time) + + call mpas_dmpar_get_time(output_start_time) call mpas_stream_mgr_write(domain % streamManager, ierr=ierr) + call mpas_dmpar_get_time(output_stop_time) if (ierr /= MPAS_STREAM_MGR_NOERR .and. & ierr /= MPAS_STREAM_MGR_ERR_CLOBBER_FILE .and. & ierr /= MPAS_STREAM_MGR_ERR_CLOBBER_REC) then - write(0,*) ' ' - write(0,*) '********************************************************************************' - write(0,*) 'Error writing one or more output streams' + call mpas_dmpar_global_abort('********************************************************************************', & + deferredAbort=.true.) + call mpas_dmpar_global_abort('Error writing one or more output streams', & + deferredAbort=.true.) call mpas_dmpar_global_abort('********************************************************************************') end if + if (MPAS_stream_mgr_ringing_alarms(domain % streamManager, direction=MPAS_STREAM_OUTPUT, ierr=ierr)) then + write(0,'(a,f9.4,a)') ' Timing for diagnostic computation: ', (diag_stop_time - diag_start_time), ' s' + write(0,'(a,f9.4,a)') ' Timing for stream output: ', (output_stop_time - output_start_time), ' s' + end if + + call mpas_atm_diag_reset() + call mpas_stream_mgr_reset_alarms(domain % streamManager, direction=MPAS_STREAM_OUTPUT, ierr=ierr) + block_ptr => domain % blocklist + call mpas_pool_get_subpool(block_ptr % structs, 'mesh', mesh) + call mpas_pool_get_subpool(block_ptr % structs, 'state', state) + call mpas_pool_get_subpool(block_ptr % structs, 'diag', diag) + call mpas_pool_get_subpool(block_ptr % structs, 'diag_physics', diag_physics) ! During integration, time level 1 stores the model state at the beginning of the ! time step, and time level 2 stores the state advanced dt in time by timestep(...) @@ -413,17 +569,21 @@ function atm_core_run(domain) result(ierr) if (stream_dir == MPAS_STREAM_INPUT .or. stream_dir == MPAS_STREAM_INPUT_OUTPUT) then if (MPAS_stream_mgr_ringing_alarms(domain % streamManager, streamID=input_stream, & direction=MPAS_STREAM_INPUT, ierr=ierr)) then + call mpas_dmpar_get_time(input_start_time) call MPAS_stream_mgr_read(domain % streamManager, streamID=input_stream, whence=MPAS_STREAM_LATEST_BEFORE, & actualWhen=read_time, ierr=ierr) + call mpas_dmpar_get_time(input_stop_time) if (ierr /= MPAS_STREAM_MGR_NOERR) then - write(0,*) ' ' - write(0,*) '********************************************************************************' - write(0,*) 'Error reading input stream '//trim(input_stream) + call mpas_dmpar_global_abort('********************************************************************************', & + deferredAbort=.true.) + call mpas_dmpar_global_abort('Error reading input stream '//trim(input_stream), & + deferredAbort=.true.) call mpas_dmpar_global_abort('********************************************************************************') end if write(0,*) '----------------------------------------------------------------------' write(0,*) ' Read '''//trim(input_stream)//''' input stream valid at '//trim(read_time) + write(0,'(a,f9.4,a)') ' Timing for stream input: ', (input_stop_time - input_start_time), ' s' write(0,*) '----------------------------------------------------------------------' call MPAS_stream_mgr_reset_alarms(domain % streamManager, streamID=input_stream, direction=MPAS_STREAM_INPUT, ierr=ierr) @@ -432,8 +592,11 @@ function atm_core_run(domain) result(ierr) end do call mpas_timer_start("time integration") + call mpas_dmpar_get_time(integ_start_time) call atm_do_timestep(domain, dt, itimestep) + call mpas_dmpar_get_time(integ_stop_time) call mpas_timer_stop("time integration") + write(0,'(a,f9.4,a)') ' Timing for integration step: ', (integ_stop_time - integ_start_time), ' s' ! Move time level 2 fields back into time level 1 for next time step call mpas_pool_get_subpool(domain % blocklist % structs, 'state', state) @@ -444,23 +607,31 @@ function atm_core_run(domain) result(ierr) call mpas_advance_clock(clock) currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr) + + call mpas_dmpar_get_time(diag_start_time) + ! ! Write any output streams that have alarms ringing, after computing diagnostics fields ! - call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr) + call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr) if (MPAS_stream_mgr_ringing_alarms(domain % streamManager, direction=MPAS_STREAM_OUTPUT, ierr=ierr)) then - block_ptr => domain % blocklist - do while (associated(block_ptr)) + block_ptr => domain % blocklist + do while (associated(block_ptr)) + call mpas_pool_get_subpool(block_ptr % structs, 'state', state) + call mpas_pool_get_subpool(block_ptr % structs, 'diag', diag) + call mpas_pool_get_subpool(block_ptr % structs, 'diag_physics', diag_physics) + call mpas_pool_get_subpool(block_ptr % structs, 'mesh', mesh) + call mpas_pool_get_subpool(block_ptr % structs, 'tend', tend) + call mpas_pool_get_subpool(block_ptr % structs, 'tend_physics', tend_physics) + call atm_compute_output_diagnostics(state, 1, diag, mesh) + + block_ptr => block_ptr % next + end do + end if - call mpas_pool_get_subpool(block_ptr % structs, 'state', state) - call mpas_pool_get_subpool(block_ptr % structs, 'diag', diag) - call mpas_pool_get_subpool(block_ptr % structs, 'diag_physics', diag_physics) - call mpas_pool_get_subpool(block_ptr % structs, 'mesh', mesh) - call atm_compute_output_diagnostics(state, 1, diag, mesh) + call mpas_atm_diag_update() + call mpas_atm_diag_compute() - block_ptr => block_ptr % next - end do - end if if (MPAS_stream_mgr_ringing_alarms(domain % streamManager, streamID='restart', direction=MPAS_STREAM_OUTPUT, ierr=ierr)) then block_ptr => domain % blocklist do while (associated(block_ptr)) @@ -474,16 +645,39 @@ function atm_core_run(domain) result(ierr) block_ptr => block_ptr % next end do end if + call mpas_dmpar_get_time(diag_stop_time) + call mpas_dmpar_get_time(output_start_time) call mpas_stream_mgr_write(domain % streamManager, ierr=ierr) + call mpas_dmpar_get_time(output_stop_time) if (ierr /= MPAS_STREAM_MGR_NOERR .and. & ierr /= MPAS_STREAM_MGR_ERR_CLOBBER_FILE .and. & ierr /= MPAS_STREAM_MGR_ERR_CLOBBER_REC) then - write(0,*) ' ' - write(0,*) '********************************************************************************' - write(0,*) 'Error writing one or more output streams' + call mpas_dmpar_global_abort('********************************************************************************', & + deferredAbort=.true.) + call mpas_dmpar_global_abort('Error writing one or more output streams', & + deferredAbort=.true.) call mpas_dmpar_global_abort('********************************************************************************') end if + if (MPAS_stream_mgr_ringing_alarms(domain % streamManager, direction=MPAS_STREAM_OUTPUT, ierr=ierr)) then + write(0,'(a,f9.4,a)') ' Timing for diagnostic computation: ', (diag_stop_time - diag_start_time), ' s' + write(0,'(a,f9.4,a)') ' Timing for stream output: ', (output_stop_time - output_start_time), ' s' + end if + + ! reset any diagnostics here + + if (MPAS_stream_mgr_ringing_alarms(domain % streamManager, streamID='diagnostics', direction=MPAS_STREAM_OUTPUT, ierr=ierr)) then + block_ptr => domain % blocklist + do while (associated(block_ptr)) + + call mpas_pool_get_subpool(block_ptr % structs, 'diag', diag) + call mpas_pool_get_subpool(block_ptr % structs, 'diag_physics', diag_physics) + call atm_reset_diagnostics(diag, diag_physics) + + block_ptr => block_ptr % next + end do + end if + ! Only after we've successfully written the restart file should we we ! write the restart_timestamp file @@ -495,13 +689,20 @@ function atm_core_run(domain) result(ierr) end if end if + call mpas_atm_diag_reset() + call mpas_stream_mgr_reset_alarms(domain % streamManager, direction=MPAS_STREAM_OUTPUT, ierr=ierr) + block_ptr => domain % blocklist + call mpas_pool_get_subpool(block_ptr % structs, 'mesh', mesh) + call mpas_pool_get_subpool(block_ptr % structs, 'state', state) + call mpas_pool_get_subpool(block_ptr % structs, 'diag', diag) + call mpas_pool_get_subpool(block_ptr % structs, 'diag_physics', diag_physics) + end do end function atm_core_run - subroutine atm_compute_output_diagnostics(state, time_lev, diag, mesh) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! Compute diagnostic fields for a domain to be written to history files @@ -513,7 +714,6 @@ subroutine atm_compute_output_diagnostics(state, time_lev, diag, mesh) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! use mpas_constants - use mpas_atm_interp_diagnostics implicit none @@ -552,8 +752,6 @@ subroutine atm_compute_output_diagnostics(state, time_lev, diag, mesh) end do end do - call interp_diagnostics(mesh, state, time_lev, diag) - end subroutine atm_compute_output_diagnostics @@ -603,6 +801,30 @@ subroutine atm_compute_restart_diagnostics(state, time_lev, diag, mesh) end subroutine atm_compute_restart_diagnostics + subroutine atm_reset_diagnostics(diag, diag_physics) + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! reset some diagnostics after output + ! + ! Input: diag - contains dynamics diagnostic fields + ! daig_physics - contains physics diagnostic fields + ! + ! Output: whatever diagnostics need resetting after output + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + implicit none + + type (mpas_pool_type), intent(inout) :: diag + type (mpas_pool_type), intent(inout) :: diag_physics + + real (kind=RKIND), dimension(:), pointer :: refl10cm_1km_max + + call mpas_pool_get_array(diag_physics, 'refl10cm_1km_max', refl10cm_1km_max) + if(associated(refl10cm_1km_max)) then + refl10cm_1km_max(:) = 0. + endif + + end subroutine atm_reset_diagnostics + subroutine atm_do_timestep(domain, dt, itimestep) @@ -656,16 +878,45 @@ function atm_core_finalize(domain) result(ierr) use mpas_decomp use mpas_timekeeping + use mpas_atm_diagnostics_manager, only : mpas_atm_diag_cleanup + use mpas_atm_threading, only : mpas_atm_threading_finalize + use mpas_io_units, only : stderrUnit +#ifdef DO_PHYSICS + use mpas_atmphys_finalize +#endif + implicit none - type (domain_type), intent(inout) :: domain + type (domain_type), intent(inout) :: domain + type (block_type), pointer :: block_ptr integer :: ierr ierr = 0 + call mpas_atm_diag_cleanup() + call mpas_destroy_clock(clock, ierr) call mpas_decomp_destroy_decomp_list(domain % decompositions) + +#ifdef DO_PHYSICS + block_ptr => domain % blocklist + do while (associated(block_ptr)) + call atmphys_finalize(block_ptr%configs) + + block_ptr => block_ptr%next + end do +#endif + + ! + ! Finalize threading + ! + call mpas_atm_threading_finalize(domain % blocklist) + + write(stderrUnit,'(a)') '' + write(stderrUnit,'(a)') '********************************************************' + write(stderrUnit,'(a)') ' Finished running the atmosphere core' + write(stderrUnit,'(a)') '********************************************************' end function atm_core_finalize @@ -709,6 +960,96 @@ subroutine atm_compute_mesh_scaling(mesh, configs) end subroutine atm_compute_mesh_scaling + subroutine atm_compute_signs(mesh) + + implicit none + + type (mpas_pool_type), intent(inout) :: mesh + + integer :: i, j, iCell, iVtx + integer, pointer :: nCells, nVertices, nEdges, vertexDegree + integer, dimension(:), pointer :: nEdgesOnCell + integer, dimension(:,:), pointer :: edgesOnVertex, verticesOnEdge, cellsOnEdge, edgesOnCell + integer, dimension(:,:), pointer :: verticesOnCell, cellsOnVertex, kiteForCell + real (kind=RKIND), dimension(:,:), pointer :: edgesOnVertex_sign, edgesOnCell_sign + real (kind=RKIND), dimension(:,:,:), pointer :: zb, zb3, zb_cell, zb3_cell + + + call mpas_pool_get_array(mesh, 'edgesOnVertex', edgesOnVertex) + call mpas_pool_get_array(mesh, 'verticesOnEdge', verticesOnEdge) + call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) + call mpas_pool_get_array(mesh, 'edgesOnCell', edgesOnCell) + call mpas_pool_get_array(mesh, 'cellsOnEdge', cellsOnEdge) + call mpas_pool_get_array(mesh, 'verticesOnCell', verticesOnCell) + call mpas_pool_get_array(mesh, 'cellsOnVertex', cellsOnVertex) + call mpas_pool_get_array(mesh, 'edgesOnVertex_sign', edgesOnVertex_sign) + call mpas_pool_get_array(mesh, 'edgesOnCell_sign', edgesOnCell_sign) + call mpas_pool_get_array(mesh, 'kiteForCell', kiteForCell) + call mpas_pool_get_array(mesh, 'zb', zb) + call mpas_pool_get_array(mesh, 'zb3', zb3) + call mpas_pool_get_array(mesh, 'zb_cell', zb_cell) + call mpas_pool_get_array(mesh, 'zb3_cell', zb3_cell) + + call mpas_pool_get_dimension(mesh, 'nCells', nCells) + call mpas_pool_get_dimension(mesh, 'nVertices', nVertices) + call mpas_pool_get_dimension(mesh, 'nEdges', nEdges) + call mpas_pool_get_dimension(mesh, 'vertexDegree', vertexDegree) + + + do iVtx=1,nVertices + do i=1,vertexDegree + if (edgesOnVertex(i,iVtx) <= nEdges) then + if (iVtx == verticesOnEdge(2,edgesOnVertex(i,iVtx))) then + edgesOnVertex_sign(i,iVtx) = 1.0 + else + edgesOnVertex_sign(i,iVtx) = -1.0 + end if + else + edgesOnVertex_sign(i,iVtx) = 0.0 + end if + end do + end do + + do iCell=1,nCells + do i=1,nEdgesOnCell(iCell) + if (edgesOnCell(i,iCell) <= nEdges) then + if (iCell == cellsOnEdge(1,edgesOnCell(i,iCell))) then + edgesOnCell_sign(i,iCell) = 1.0 + zb_cell(:,i,iCell) = zb(:,1,edgesOnCell(i,iCell)) + zb3_cell(:,i,iCell) = zb3(:,1,edgesOnCell(i,iCell)) + else + edgesOnCell_sign(i,iCell) = -1.0 + zb_cell(:,i,iCell) = zb(:,2,edgesOnCell(i,iCell)) + zb3_cell(:,i,iCell) = zb3(:,2,edgesOnCell(i,iCell)) + end if + else + edgesOnCell_sign(i,iCell) = 0.0 + end if + end do + end do + + do iCell=1,nCells + do i=1,nEdgesOnCell(iCell) + iVtx = verticesOnCell(i,iCell) + if (iVtx <= nVertices) then + do j=1,vertexDegree + if (iCell == cellsOnVertex(j,iVtx)) then + kiteForCell(i,iCell) = j + exit + end if + end do + if (j > vertexDegree) then + write(stderrUnit,*) 'Unexpected error while identifying kiteForCell' + end if + else + kiteForCell(i,iCell) = 1 + end if + end do + end do + + end subroutine atm_compute_signs + + subroutine atm_compute_damping_coefs(mesh, configs) implicit none @@ -748,109 +1089,6 @@ subroutine atm_compute_damping_coefs(mesh, configs) end subroutine atm_compute_damping_coefs - subroutine atm_compute_pgf_coefs(mesh, configs) - - implicit none - - type (mpas_pool_type), intent(inout) :: mesh - type (mpas_pool_type), intent(in) :: configs - - integer :: iEdge, iCell1, iCell2, k, iCell, nz, nz1 - real (kind=RKIND) :: d1, d2, d3 - real (kind=RKIND), dimension(:,:), pointer :: cpr, cpl, zgrid, pzp, pzm - integer, dimension(:,:), pointer :: cellsOnEdge - integer, pointer :: nCells, nEdges, nVertLevels - logical, pointer :: config_newpx - - call mpas_pool_get_array(mesh, 'cpr', cpr) - call mpas_pool_get_array(mesh, 'cpl', cpl) - call mpas_pool_get_array(mesh, 'pzp', pzp) - call mpas_pool_get_array(mesh, 'pzm', pzm) - call mpas_pool_get_array(mesh, 'zgrid', zgrid) - call mpas_pool_get_array(mesh, 'cellsOnEdge', cellsOnEdge) - - call mpas_pool_get_config(configs, 'config_newpx', config_newpx) - - call mpas_pool_get_dimension(mesh, 'nCells', nCells) - call mpas_pool_get_dimension(mesh, 'nEdges', nEdges) - call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) - -!**** coefficient arrays for new pressure gradient calculation - - cpr(:,:) = 0.0 - cpl(:,:) = 0.0 - - if (config_newpx) then - do iEdge=1,nEdges - - iCell1 = cellsOnEdge(1,iEdge) - iCell2 = cellsOnEdge(2,iEdge) - - d1 = .25*(zgrid(1,iCell2)+zgrid(2,iCell2)-zgrid(1,iCell1)-zgrid(2,iCell1)) - d2 = d1+.5*(zgrid(3,iCell2)-zgrid(1,iCell2)) - d3 = d2+.5*(zgrid(4,iCell2)-zgrid(2,iCell2)) -! cpr(1,iEdge) = d2*d3*(d3-d2)/(d2*d3*(d3-d2)+d1*d3*(d1-d3)+d1*d2*(d2-d1)) -! cpr(2,iEdge) = d1*d3*(d1-d3)/(d2*d3*(d3-d2)+d1*d3*(d1-d3)+d1*d2*(d2-d1)) -! cpr(3,iEdge) = d1*d2*(d2-d1)/(d2*d3*(d3-d2)+d1*d3*(d1-d3)+d1*d2*(d2-d1)) - - cpr(1,iEdge) = d2/(d2-d1) - cpr(2,iEdge) = -d1/(d2-d1) - cpr(3,iEdge) = 0. - - d1 = .25*(zgrid(1,iCell1)+zgrid(2,iCell1)-zgrid(1,iCell2)-zgrid(2,iCell2)) - d2 = d1+.5*(zgrid(3,iCell1)-zgrid(1,iCell1)) - d3 = d2+.5*(zgrid(4,iCell1)-zgrid(2,iCell1)) -! cpl(1,iEdge) = d2*d3*(d3-d2)/(d2*d3*(d3-d2)+d1*d3*(d1-d3)+d1*d2*(d2-d1)) -! cpl(2,iEdge) = d1*d3*(d1-d3)/(d2*d3*(d3-d2)+d1*d3*(d1-d3)+d1*d2*(d2-d1)) -! cpl(3,iEdge) = d1*d2*(d2-d1)/(d2*d3*(d3-d2)+d1*d3*(d1-d3)+d1*d2*(d2-d1)) - - cpl(1,iEdge) = d2/(d2-d1) - cpl(2,iEdge) = -d1/(d2-d1) - cpl(3,iEdge) = 0. - - end do - -! write(6,*) 'cpr1 = ',cpr(1,1),' cpl1 = ',cpl(1,1) -! write(6,*) 'cpr2 = ',cpr(2,1),' cpl2 = ',cpl(2,1) -! write(6,*) 'cpr3 = ',cpr(3,1),' cpl3 = ',cpl(3,1) - - else - -! Coefficients for computing vertical pressure gradient dp/dz -! dp/dz (k,iCell) = pzp(k,iCell) * (p(k+1,iCell) - p(k,iCell)) +pzm(k,iCell) * (p(k,iCell) - p(k-1,iCell)) - - nz1 = nVertLevels - nz = nz1 + 1 - - do iCell=1, nCells - - d1 = zgrid(3,iCell)-zgrid(1,iCell) - d2 = zgrid(4,iCell)-zgrid(2,iCell) - d3 = d1+d2 - pzm(1,iCell) = 2.*d3/(d1*d2) - pzp(1,iCell) = -2.*d1/(d2*d3) - - do k=2,nz1-1 - pzp(k,iCell) = 2.*(zgrid(k+1,iCell)-zgrid(k-1,iCell))/ & - & ((zgrid(k+2,iCell)-zgrid(k ,iCell))* & - & (zgrid(k+2,iCell)-zgrid(k ,iCell) & - & +zgrid(k+1,iCell)-zgrid(k-1,iCell))) - pzm(k,iCell) = 2.*(zgrid(k+2,iCell)-zgrid(k ,iCell))/ & - & ((zgrid(k+1,iCell)-zgrid(k-1,iCell))* & - & (zgrid(k+2,iCell)-zgrid(k ,iCell) & - & +zgrid(k+1,iCell)-zgrid(k-1,iCell))) - end do - - pzp(nz1,iCell) = 0. - pzm(nz1,iCell) = 2./(zgrid(nz,iCell)-zgrid(nz1-1,iCell)) - - end do - - end if - - end subroutine atm_compute_pgf_coefs - - subroutine atm_adv_coef_compression( mesh ) implicit none @@ -866,7 +1104,7 @@ subroutine atm_adv_coef_compression( mesh ) integer :: cell1, cell2, iEdge, n, i, j, j_in, iCell integer, pointer :: nCells, nEdges - integer :: cell_list(20), ordered_cell_list(20) + integer :: cell_list(20) logical :: addcell @@ -917,24 +1155,10 @@ subroutine atm_adv_coef_compression( mesh ) end if end do - ! order the list by increasing cell number (brute force approach) - - do i=1,n - ordered_cell_list(i) = nCells + 2 - j_in = 1 - do j=1,n - if (ordered_cell_list(i) > cell_list(j) ) then - j_in = j - ordered_cell_list(i) = cell_list(j) - end if - end do -! ordered_cell_list(i) = cell_list(j_in) - cell_list(j_in) = nCells + 3 - end do nAdvCellsForEdge(iEdge) = n do iCell = 1, nAdvCellsForEdge(iEdge) - advCellsForEdge(iCell,iEdge) = ordered_cell_list(iCell) + advCellsForEdge(iCell,iEdge) = cell_list(iCell) end do ! we have the ordered list, now construct coefficients @@ -947,7 +1171,7 @@ subroutine atm_adv_coef_compression( mesh ) j_in = 0 do j=1, n - if( ordered_cell_list(j) == cell1 ) j_in = j + if( cell_list(j) == cell1 ) j_in = j end do adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(1,1,iEdge) adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) + deriv_two(1,1,iEdge) @@ -955,7 +1179,7 @@ subroutine atm_adv_coef_compression( mesh ) do iCell = 1, nEdgesOnCell(cell1) j_in = 0 do j=1, n - if( ordered_cell_list(j) == cellsOnCell(iCell,cell1) ) j_in = j + if( cell_list(j) == cellsOnCell(iCell,cell1) ) j_in = j end do adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(iCell+1,1,iEdge) adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) + deriv_two(iCell+1,1,iEdge) @@ -966,7 +1190,7 @@ subroutine atm_adv_coef_compression( mesh ) j_in = 0 do j=1, n - if( ordered_cell_list(j) == cell2 ) j_in = j + if( cell_list(j) == cell2 ) j_in = j end do adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(1,2,iEdge) adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) - deriv_two(1,2,iEdge) @@ -974,7 +1198,7 @@ subroutine atm_adv_coef_compression( mesh ) do iCell = 1, nEdgesOnCell(cell2) j_in = 0 do j=1, n - if( ordered_cell_list(j) == cellsOnCell(iCell,cell2) ) j_in = j + if( cell_list(j) == cellsOnCell(iCell,cell2) ) j_in = j end do adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(iCell+1,2,iEdge) adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) - deriv_two(iCell+1,2,iEdge) @@ -989,13 +1213,13 @@ subroutine atm_adv_coef_compression( mesh ) j_in = 0 do j=1, n - if( ordered_cell_list(j) == cell1 ) j_in = j + if( cell_list(j) == cell1 ) j_in = j end do adv_coefs(j_in,iEdge) = adv_coefs(j_in,iEdge) + 0.5 j_in = 0 do j=1, n - if( ordered_cell_list(j) == cell2 ) j_in = j + if( cell_list(j) == cell2 ) j_in = j end do adv_coefs(j_in,iEdge) = adv_coefs(j_in,iEdge) + 0.5 @@ -1012,4 +1236,28 @@ subroutine atm_adv_coef_compression( mesh ) end subroutine atm_adv_coef_compression + + subroutine atm_couple_coef_3rd_order(mesh, configs) + + implicit none + + type (mpas_pool_type), intent(inout) :: mesh + type (mpas_pool_type), intent(in) :: configs + + real (kind=RKIND), dimension(:,:), pointer :: adv_coefs_3rd + real (kind=RKIND), dimension(:,:,:), pointer :: zb3_cell + real (kind=RKIND), pointer :: config_coef_3rd_order + + call mpas_pool_get_array(mesh, 'zb3_cell', zb3_cell) + call mpas_pool_get_array(mesh, 'adv_coefs_3rd', adv_coefs_3rd) + + call mpas_pool_get_config(configs, 'config_coef_3rd_order', config_coef_3rd_order) + + adv_coefs_3rd(:,:) = config_coef_3rd_order * adv_coefs_3rd(:,:) + zb3_cell(:,:,:) = config_coef_3rd_order * zb3_cell(:,:,:) + + end subroutine atm_couple_coef_3rd_order + + end module atm_core + diff --git a/src/core_atmosphere/mpas_atm_core_interface.F b/src/core_atmosphere/mpas_atm_core_interface.F index 80c88282a3..85ca589a53 100644 --- a/src/core_atmosphere/mpas_atm_core_interface.F +++ b/src/core_atmosphere/mpas_atm_core_interface.F @@ -7,6 +7,8 @@ ! module atm_core_interface + use mpas_attlist + use mpas_abort, only : mpas_dmpar_global_abort contains @@ -42,6 +44,7 @@ subroutine atm_setup_core(core) core % get_mesh_stream => atm_get_mesh_stream core % setup_immutable_streams => atm_setup_immutable_streams core % setup_derived_dimensions => atm_setup_derived_dimensions + core % setup_decomposed_dimensions => atm_setup_decomposed_dimensions core % setup_block => atm_setup_block core % setup_namelist => atm_setup_namelists @@ -50,6 +53,14 @@ subroutine atm_setup_core(core) #include "inc/core_variables.inc" +write(0,*) '' +#ifdef SINGLE_PRECISION +write(0,'(a)') 'Using default single-precision reals' +#else +write(0,'(a)') 'Using default double-precision reals' +#endif +write(0,*) '' + end subroutine atm_setup_core @@ -92,19 +103,53 @@ end subroutine atm_setup_domain !> not allocated until after this routine has been called. ! !----------------------------------------------------------------------- - function atm_setup_packages(configs, packages) result(ierr) + function atm_setup_packages(configs, packages, iocontext) result(ierr) - use mpas_derived_types, only : mpas_pool_type + use mpas_dmpar + use mpas_derived_types, only : mpas_pool_type, mpas_io_context_type use mpas_pool_routines, only : mpas_pool_get_config, mpas_pool_get_package +#ifdef DO_PHYSICS + use mpas_atmphys_control + use mpas_atmphys_packages +#endif + implicit none type (mpas_pool_type), intent(inout) :: configs type (mpas_pool_type), intent(inout) :: packages + type (mpas_io_context_type), intent(inout) :: iocontext integer :: ierr + logical, pointer :: iauActive + character(len=StrKIND), pointer :: config_iau_option + ierr = 0 + nullify(config_iau_option) + call mpas_pool_get_config(configs, 'config_IAU_option', config_iau_option) + + nullify(iauActive) + call mpas_pool_get_package(packages, 'iauActive', iauActive) + + if (trim(config_iau_option) /= 'off') then + iauActive = .true. + else + iauActive = .false. + end if + +#ifdef DO_PHYSICS + !check that all the physics options are correctly defined and that at + !least one physics parameterization is called (using the logical moist_physics): + call physics_namelist_check(configs) + + ierr = atmphys_setup_packages(configs,packages,iocontext) + if(ierr /= 0) then + call mpas_dmpar_global_abort('ERROR: Package setup failed for atmphys in core_atmosphere', & + deferredAbort=.true.) + endif +#endif + end function atm_setup_packages @@ -176,7 +221,8 @@ function atm_get_mesh_stream(configs, stream) result(ierr) call mpas_pool_get_config(configs, 'config_do_restart', config_do_restart) if (.not. associated(config_do_restart)) then - call mpas_dmpar_global_abort('ERROR: config_do_restart was not found when defining mesh stream.') + call mpas_dmpar_global_abort('ERROR: config_do_restart was not found when defining mesh stream.', deferredAbort=.true.) + ierr = 1 else if (config_do_restart) then write(stream,'(a)') 'restart' else diff --git a/src/core_atmosphere/mpas_atm_dimensions.F b/src/core_atmosphere/mpas_atm_dimensions.F new file mode 100644 index 0000000000..a7dd420118 --- /dev/null +++ b/src/core_atmosphere/mpas_atm_dimensions.F @@ -0,0 +1,124 @@ +module mpas_atm_dimensions + +#ifdef CONST_INNER_DIMS + +#ifndef CONST_NVERTLEVELS +#error "Defining CONST_INNER_DIMS requires CONST_NVERTLEVELS, CONST_MAXEDGES, CONST_MAXEDGES2, and CONST_NUM_SCALARS to be defined as well." +#endif +#ifndef CONST_MAXEDGES +#error "Defining CONST_INNER_DIMS requires CONST_NVERTLEVELS, CONST_MAXEDGES, CONST_MAXEDGES2, and CONST_NUM_SCALARS to be defined as well." +#endif +#ifndef CONST_MAXEDGES2 +#error "Defining CONST_INNER_DIMS requires CONST_NVERTLEVELS, CONST_MAXEDGES, CONST_MAXEDGES2, and CONST_NUM_SCALARS to be defined as well." +#endif +#ifndef CONST_NUM_SCALARS +#error "Defining CONST_INNER_DIMS requires CONST_NVERTLEVELS, CONST_MAXEDGES, CONST_MAXEDGES2, and CONST_NUM_SCALARS to be defined as well." +#endif + + integer, parameter :: nVertLevels = CONST_NVERTLEVELS + integer, parameter :: maxEdges = CONST_MAXEDGES + integer, parameter :: maxEdges2 = CONST_MAXEDGES2 + integer, parameter :: num_scalars = CONST_NUM_SCALARS +#else + integer :: nVertLevels + integer :: maxEdges + integer :: maxEdges2 + integer :: num_scalars +#endif + + + contains + + + subroutine mpas_atm_set_dims(nVertLevels_val, maxEdges_val, maxEdges2_val, num_scalars_val) + + use mpas_kind_types, only : StrKIND + use mpas_io_units, only : stderrUnit + use mpas_abort, only : mpas_dmpar_global_abort + + implicit none + + integer, intent(in) :: nVertLevels_val + integer, intent(in) :: maxEdges_val + integer, intent(in) :: maxEdges2_val + integer, intent(in) :: num_scalars_val + + character(len=StrKIND) :: errstring1, errstring2 + +#ifdef CONST_INNER_DIMS + + integer :: nerrors + character(len=StrKIND) :: errbuf + + nerrors = 0 + write(errbuf,*) '' + + if (nVertLevels /= nVertLevels_val) then + write(errstring1,'(a,i4)') ' At compile, CONST_NVERTLEVELS=', CONST_NVERTLEVELS + write(errstring2,'(a,i4)') ' At runtime, nVertLevels=', nVertLevels_val + + call mpas_dmpar_global_abort('********************************************************************************',deferredAbort=.true.) + call mpas_dmpar_global_abort('ERROR: Dimension nVertLevels read from input file does not match the value used', deferredAbort=.true.) + call mpas_dmpar_global_abort(' when compiling MPAS-Atmosphere:', deferredAbort=.true.) + call mpas_dmpar_global_abort(trim(errstring1), deferredAbort=.true.) + call mpas_dmpar_global_abort(trim(errstring2), deferredAbort=.true.) + call mpas_dmpar_global_abort('********************************************************************************',deferredAbort=.true.) + write(errbuf,'(a,i4,a1,i4,a)') trim(errbuf)//' nVertlevels: ', CONST_NVERTLEVELS, '/', nVertLevels_val, ' ' + nerrors = nerrors + 1 + end if + + if (maxEdges /= maxEdges_val) then + write(errstring1,'(a,i4)')' At compile, CONST_MAXEDGES=', CONST_MAXEDGES + write(errstring2,'(a,i4)')' At runtime, maxEdges=', maxEdges_val + + call mpas_dmpar_global_abort('********************************************************************************',deferredAbort=.true.) + call mpas_dmpar_global_abort('ERROR: Dimension maxEdges read from input file does not match the value used', deferredAbort=.true.) + call mpas_dmpar_global_abort(' when compiling MPAS-Atmosphere:', deferredAbort=.true.) + call mpas_dmpar_global_abort(trim(errstring1), deferredAbort=.true.) + call mpas_dmpar_global_abort(trim(errstring2), deferredAbort=.true.) + call mpas_dmpar_global_abort('********************************************************************************',deferredAbort=.true.) + write(errbuf,'(a,i4,a1,i4,a)') trim(errbuf)//' maxEdges: ', CONST_MAXEDGES, '/', maxEdges_val, ' ' + nerrors = nerrors + 1 + end if + + if (maxEdges2 /= maxEdges2_val) then + write(errstring1,'(a,i4)')' At compile, CONST_MAXEDGES2=', CONST_MAXEDGES2 + write(errstring2,'(a,i4)')' At runtime, maxEdges2=', maxEdges2_val + + call mpas_dmpar_global_abort('********************************************************************************',deferredAbort=.true.) + call mpas_dmpar_global_abort('ERROR: Dimension maxEdges2 read from input file does not match the value used', deferredAbort=.true.) + call mpas_dmpar_global_abort(' when compiling MPAS-Atmosphere:', deferredAbort=.true.) + call mpas_dmpar_global_abort(trim(errstring1), deferredAbort=.true.) + call mpas_dmpar_global_abort(trim(errstring2), deferredAbort=.true.) + call mpas_dmpar_global_abort('********************************************************************************',deferredAbort=.true.) + write(errbuf,'(a,i4,a1,i4,a)') trim(errbuf)//' maxEdges2: ', CONST_MAXEDGES2, '/', maxEdges2_val, ' ' + nerrors = nerrors + 1 + end if + + if (num_scalars /= num_scalars_val) then + write(errstring1,'(a,i4)')' At compile, CONST_NUM_SCALARS=', CONST_NUM_SCALARS + write(errstring2,'(a,i4)')' At runtime, num_scalars=', num_scalars_val + + call mpas_dmpar_global_abort('********************************************************************************',deferredAbort=.true.) + call mpas_dmpar_global_abort('ERROR: Number of scalars read from input file does not match the value used', deferredAbort=.true.) + call mpas_dmpar_global_abort(' when compiling MPAS-Atmosphere:', deferredAbort=.true.) + call mpas_dmpar_global_abort(trim(errstring1), deferredAbort=.true.) + call mpas_dmpar_global_abort(trim(errstring2), deferredAbort=.true.) + call mpas_dmpar_global_abort('********************************************************************************',deferredAbort=.true.) + write(errbuf,'(a,i4,a1,i4,a)') trim(errbuf)//' num_scalars: ', CONST_NUM_SCALARS, '/', num_scalars_val, ' ' + nerrors = nerrors + 1 + end if + + if (nerrors > 0) then + call mpas_dmpar_global_abort(trim(errbuf)) + end if +#else + nVertLevels = nVertLevels_val + maxEdges = maxEdges_val + maxEdges2 = maxEdges2_val + num_scalars = num_scalars_val +#endif + + end subroutine mpas_atm_set_dims + +end module mpas_atm_dimensions diff --git a/src/core_atmosphere/mpas_atm_interp_diagnostics.F b/src/core_atmosphere/mpas_atm_interp_diagnostics.F deleted file mode 100644 index 639dbc630e..0000000000 --- a/src/core_atmosphere/mpas_atm_interp_diagnostics.F +++ /dev/null @@ -1,645 +0,0 @@ -! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) -! and the University Corporation for Atmospheric Research (UCAR). -! -! Unless noted otherwise source code is licensed under the BSD license. -! Additional copyright and license information can be found in the LICENSE file -! distributed with this code, or at http://mpas-dev.github.com/license.html -! -!================================================================================================== - module mpas_atm_interp_diagnostics - use mpas_dmpar - use mpas_kind_types - use mpas_derived_types - use mpas_pool_routines - use mpas_constants - - implicit none - private - public:: interp_diagnostics - - contains - -!================================================================================================== - subroutine interp_diagnostics(mesh, state, time_lev, diag) -!================================================================================================== - -!input arguments: - type (mpas_pool_type), intent(in) :: mesh - type (mpas_pool_type), intent(in) :: state - integer, intent(in) :: time_lev ! which time level to use from state - -!inout arguments: - type (mpas_pool_type), intent(inout) :: diag - -!local variables: - integer :: iCell,iVert,iVertD,k,kk - integer, pointer :: nCells, nVertLevels, nVertices, vertexDegree - integer :: nVertLevelsP1 - integer, pointer :: index_qv - integer, dimension(:,:), pointer :: cellsOnVertex - - type (field2DReal), pointer:: pressure_p_field - - real (kind=RKIND), dimension(:), pointer :: areaTriangle - real (kind=RKIND), dimension(:,:), pointer :: kiteAreasOnVertex - - real (kind=RKIND), dimension(:,:), pointer :: exner, height - real (kind=RKIND), dimension(:,:), pointer :: pressure_b, pressure_p - real (kind=RKIND), dimension(:,:), pointer :: qvapor, relhum, theta_m, vorticity - real (kind=RKIND), dimension(:,:), pointer :: umeridional, uzonal, vvel - real (kind=RKIND), dimension(:,:,:), pointer :: scalars - - real (kind=RKIND), dimension(:), pointer :: temperature_200hPa - real (kind=RKIND), dimension(:), pointer :: temperature_500hPa - real (kind=RKIND), dimension(:), pointer :: temperature_700hPa - real (kind=RKIND), dimension(:), pointer :: temperature_850hPa - - real (kind=RKIND), dimension(:), pointer :: relhum_200hPa - real (kind=RKIND), dimension(:), pointer :: relhum_500hPa - real (kind=RKIND), dimension(:), pointer :: relhum_700hPa - real (kind=RKIND), dimension(:), pointer :: relhum_850hPa - - real (kind=RKIND), dimension(:), pointer :: uzonal_200hPa - real (kind=RKIND), dimension(:), pointer :: uzonal_500hPa - real (kind=RKIND), dimension(:), pointer :: uzonal_700hPa - real (kind=RKIND), dimension(:), pointer :: uzonal_850hPa - - real (kind=RKIND), dimension(:), pointer :: umeridional_200hPa - real (kind=RKIND), dimension(:), pointer :: umeridional_500hPa - real (kind=RKIND), dimension(:), pointer :: umeridional_700hPa - real (kind=RKIND), dimension(:), pointer :: umeridional_850hPa - - real (kind=RKIND), dimension(:), pointer :: height_200hPa - real (kind=RKIND), dimension(:), pointer :: height_500hPa - real (kind=RKIND), dimension(:), pointer :: height_700hPa - real (kind=RKIND), dimension(:), pointer :: height_850hPa - - real (kind=RKIND), dimension(:), pointer :: w_200hPa - real (kind=RKIND), dimension(:), pointer :: w_500hPa - real (kind=RKIND), dimension(:), pointer :: w_700hPa - real (kind=RKIND), dimension(:), pointer :: w_850hPa - - real (kind=RKIND), dimension(:), pointer :: vorticity_200hPa - real (kind=RKIND), dimension(:), pointer :: vorticity_500hPa - real (kind=RKIND), dimension(:), pointer :: vorticity_700hPa - real (kind=RKIND), dimension(:), pointer :: vorticity_850hPa - - real (kind=RKIND), dimension(:), pointer :: mslp - - real (kind=RKIND), dimension(:,:), allocatable :: pressure, pressureCp1, pressure2, pressure_v, temperature - -!local interpolated fields: - integer :: nIntP - real (kind=RKIND) :: w1,w2,z0,z1,z2 - real (kind=RKIND), dimension(:,:), allocatable :: field_in,press_in - real (kind=RKIND), dimension(:,:), allocatable :: field_interp,press_interp - -!-------------------------------------------------------------------------------------------------- - -! write(0,*) -! write(0,*) '--- enter subroutine interp_diagnostics:' - - call mpas_pool_get_dimension(mesh, 'nCells', nCells) - call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) - call mpas_pool_get_dimension(mesh, 'nVertices', nVertices) - call mpas_pool_get_dimension(mesh, 'vertexDegree', vertexDegree) - call mpas_pool_get_dimension(state, 'index_qv', index_qv) - - nVertLevelsP1 = nVertLevels + 1 - - call mpas_pool_get_array(mesh, 'cellsOnVertex', cellsOnVertex) - call mpas_pool_get_array(mesh, 'areaTriangle', areaTriangle) - call mpas_pool_get_array(mesh, 'kiteAreasOnVertex', kiteAreasOnVertex) - - call mpas_pool_get_array(mesh, 'zgrid', height) - call mpas_pool_get_array(state, 'w', vvel, time_lev) - call mpas_pool_get_array(state, 'theta_m', theta_m, time_lev) - call mpas_pool_get_array(state, 'scalars', scalars, time_lev) - - qvapor => scalars(index_qv,:,:) !MGD does this actually work? - - call mpas_pool_get_field(diag, 'pressure_p', pressure_p_field) - call mpas_dmpar_exch_halo_field(pressure_p_field) - - call mpas_pool_get_array(diag, 'exner', exner) - call mpas_pool_get_array(diag, 'pressure_base', pressure_b) - call mpas_pool_get_array(diag, 'pressure_p', pressure_p) - call mpas_pool_get_array(diag, 'vorticity', vorticity) - call mpas_pool_get_array(diag, 'uReconstructMeridional', umeridional) - call mpas_pool_get_array(diag, 'uReconstructZonal', uzonal) - call mpas_pool_get_array(diag, 'relhum', relhum) - - call mpas_pool_get_array(diag, 'temperature_200hPa', temperature_200hPa) - call mpas_pool_get_array(diag, 'temperature_500hPa', temperature_500hPa) - call mpas_pool_get_array(diag, 'temperature_700hPa', temperature_700hPa) - call mpas_pool_get_array(diag, 'temperature_850hPa', temperature_850hPa) - - call mpas_pool_get_array(diag, 'relhum_200hPa', relhum_200hPa) - call mpas_pool_get_array(diag, 'relhum_500hPa', relhum_500hPa) - call mpas_pool_get_array(diag, 'relhum_700hPa', relhum_700hPa) - call mpas_pool_get_array(diag, 'relhum_850hPa', relhum_850hPa) - - call mpas_pool_get_array(diag, 'uzonal_200hPa', uzonal_200hPa) - call mpas_pool_get_array(diag, 'uzonal_500hPa', uzonal_500hPa) - call mpas_pool_get_array(diag, 'uzonal_700hPa', uzonal_700hPa) - call mpas_pool_get_array(diag, 'uzonal_850hPa', uzonal_850hPa) - - call mpas_pool_get_array(diag, 'umeridional_200hPa', umeridional_200hPa) - call mpas_pool_get_array(diag, 'umeridional_500hPa', umeridional_500hPa) - call mpas_pool_get_array(diag, 'umeridional_700hPa', umeridional_700hPa) - call mpas_pool_get_array(diag, 'umeridional_850hPa', umeridional_850hPa) - - call mpas_pool_get_array(diag, 'height_200hPa', height_200hPa) - call mpas_pool_get_array(diag, 'height_500hPa', height_500hPa) - call mpas_pool_get_array(diag, 'height_700hPa', height_700hPa) - call mpas_pool_get_array(diag, 'height_850hPa', height_850hPa) - - call mpas_pool_get_array(diag, 'w_200hPa', w_200hPa) - call mpas_pool_get_array(diag, 'w_500hPa', w_500hPa) - call mpas_pool_get_array(diag, 'w_700hPa', w_700hPa) - call mpas_pool_get_array(diag, 'w_850hPa', w_850hPa) - - call mpas_pool_get_array(diag, 'vorticity_200hPa', vorticity_200hPa) - call mpas_pool_get_array(diag, 'vorticity_500hPa', vorticity_500hPa) - call mpas_pool_get_array(diag, 'vorticity_700hPa', vorticity_700hPa) - call mpas_pool_get_array(diag, 'vorticity_850hPa', vorticity_850hPa) - - call mpas_pool_get_array(diag, 'mslp', mslp) - - if(.not.allocated(pressure) ) allocate(pressure(nVertLevels,nCells) ) - if(.not.allocated(pressureCp1) ) allocate(pressureCp1(nVertLevels,nCells+1) ) - if(.not.allocated(pressure2) ) allocate(pressure2(nVertLevelsP1,nCells) ) - if(.not.allocated(pressure_v) ) allocate(pressure_v(nVertLevels,nVertices) ) - if(.not.allocated(temperature) ) allocate(temperature(nVertLevels,nCells) ) - -!calculation of total pressure at cell centers (at mass points): - do iCell = 1, nCells - do k = 1, nVertLevels - pressure(k,iCell) = (pressure_p(k,iCell) + pressure_b(k,iCell)) / 100._RKIND - pressureCp1(k,iCell) = pressure(k,iCell) - enddo - enddo - do iCell = nCells+1, nCells+1 - do k = 1, nVertLevels - pressureCp1(k,iCell) = (pressure_p(k,iCell) + pressure_b(k,iCell)) / 100._RKIND - enddo - enddo - -!calculation of total pressure at cell centers (at vertical velocity points): - k = nVertLevelsP1 - do iCell = 1, nCells - z0 = height(k,iCell) - z1 = 0.5*(height(k,iCell)+height(k-1,iCell)) - z2 = 0.5*(height(k-1,iCell)+height(k-2,iCell)) - w1 = (z0-z2)/(z1-z2) - w2 = 1.-w1 - !use log of pressure to avoid occurrences of negative top-of-the-model pressure. - pressure2(k,iCell) = exp(w1*log(pressure(k-1,iCell))+w2*log(pressure(k-2,iCell))) - enddo - do k = 2, nVertLevels - do iCell = 1, nCells - w1 = (height(k,iCell)-height(k-1,iCell)) / (height(k+1,iCell)-height(k-1,iCell)) - w2 = (height(k+1,iCell)-height(k,iCell)) / (height(k+1,iCell)-height(k-1,iCell)) - pressure2(k,iCell) = w1*pressure(k,iCell) + w2*pressure(k-1,iCell) - enddo - enddo - k = 1 - do iCell = 1, nCells - z0 = height(k,iCell) - z1 = 0.5*(height(k,iCell)+height(k+1,iCell)) - z2 = 0.5*(height(k+1,iCell)+height(k+2,iCell)) - w1 = (z0-z2)/(z1-z2) - w2 = 1.-w1 - pressure2(k,iCell) = w1*pressure(k,iCell)+w2*pressure(k+1,iCell) - enddo - -!calculation of total pressure at cell vertices (at mass points): - do iVert = 1, nVertices - pressure_v(:,iVert) = 0._RKIND - - do k = 1, nVertLevels - do iVertD = 1, vertexDegree - pressure_v(k,iVert) = pressure_v(k,iVert) & - + kiteAreasOnVertex(iVertD,iVert)*pressureCp1(k,cellsOnVertex(iVertD,iVert)) - enddo - pressure_v(k,iVert) = pressure_v(k,iVert) / areaTriangle(iVert) - enddo - enddo - -!calculation of temperature at cell centers: - do iCell = 1,nCells - do k = 1,nVertLevels - temperature(k,iCell) = (theta_m(k,iCell)/(1._RKIND+rvord*qvapor(k,iCell)))*exner(k,iCell) - enddo - enddo - -!interpolation to fixed pressure levels for fields located at cells centers and at mass points: - nIntP = 4 - if(.not.allocated(field_interp)) allocate(field_interp(nCells,nIntP) ) - if(.not.allocated(press_interp)) allocate(press_interp(nCells,nIntP) ) - do iCell = 1, nCells - press_interp(iCell,1) = 200.0_RKIND - press_interp(iCell,2) = 500.0_RKIND - press_interp(iCell,3) = 700.0_RKIND - press_interp(iCell,4) = 850.0_RKIND - enddo - - if(.not.allocated(press_in)) allocate(press_in(nCells,nVertLevels)) - do iCell = 1, nCells - do k = 1, nVertLevels - kk = nVertLevels+1-k - press_in(iCell,kk) = pressure(k,iCell) - enddo - enddo - - if(.not.allocated(field_in)) allocate(field_in(nCells,nVertLevels)) -!... temperature: - do iCell = 1, nCells - do k = 1, nVertLevels - kk = nVertLevels+1-k - field_in(iCell,kk) = temperature(k,iCell) - enddo - enddo - call interp_tofixed_pressure(nCells,nVertLevels,nIntP,press_in,field_in,press_interp,field_interp) - temperature_200hPa(1:nCells) = field_interp(1:nCells,1) - temperature_500hPa(1:nCells) = field_interp(1:nCells,2) - temperature_700hPa(1:nCells) = field_interp(1:nCells,3) - temperature_850hPa(1:nCells) = field_interp(1:nCells,4) -! write(0,*) '--- end interpolate temperature:' - -!... relative humidity: - do iCell = 1, nCells - do k = 1, nVertLevels - kk = nVertLevels+1-k - field_in(iCell,kk) = relhum(k,iCell) - enddo - enddo - call interp_tofixed_pressure(nCells,nVertLevels,nIntP,press_in,field_in,press_interp,field_interp) - relhum_200hPa(1:nCells) = field_interp(1:nCells,1) - relhum_500hPa(1:nCells) = field_interp(1:nCells,2) - relhum_700hPa(1:nCells) = field_interp(1:nCells,3) - relhum_850hPa(1:nCells) = field_interp(1:nCells,4) -! write(0,*) '--- end interpolate relative humidity:' - -!... u zonal wind: - do iCell = 1, nCells - do k = 1, nVertLevels - kk = nVertLevels+1-k - field_in(iCell,kk) = uzonal(k,iCell) - enddo - enddo - call interp_tofixed_pressure(nCells,nVertLevels,nIntP,press_in,field_in,press_interp,field_interp) - uzonal_200hPa(1:nCells) = field_interp(1:nCells,1) - uzonal_500hPa(1:nCells) = field_interp(1:nCells,2) - uzonal_700hPa(1:nCells) = field_interp(1:nCells,3) - uzonal_850hPa(1:nCells) = field_interp(1:nCells,4) -! write(0,*) '--- end interpolate zonal wind:' - -!... u meridional wind: - do iCell = 1, nCells - do k = 1, nVertLevels - kk = nVertLevels+1-k - field_in(iCell,kk) = umeridional(k,iCell) - enddo - enddo - call interp_tofixed_pressure(nCells,nVertLevels,nIntP,press_in,field_in,press_interp,field_interp) - umeridional_200hPa(1:nCells) = field_interp(1:nCells,1) - umeridional_500hPa(1:nCells) = field_interp(1:nCells,2) - umeridional_700hPa(1:nCells) = field_interp(1:nCells,3) - umeridional_850hPa(1:nCells) = field_interp(1:nCells,4) -! write(0,*) '--- end interpolate meridional wind:' - - if(allocated(field_in)) deallocate(field_in) - if(allocated(press_in)) deallocate(press_in) - -!interpolation to fixed pressure levels for fields located at cells centers and at vertical -!velocity points: - if(.not.allocated(press_in)) allocate(press_in(nCells,nVertLevelsP1)) - do iCell = 1, nCells - do k = 1, nVertLevelsP1 - kk = nVertLevelsP1+1-k - press_in(iCell,kk) = pressure2(k,iCell) - enddo - enddo - - if(.not.allocated(field_in)) allocate(field_in(nCells,nVertLevelsP1)) - !... height: - do iCell = 1, nCells - do k = 1, nVertLevelsP1 - kk = nVertLevelsP1+1-k - field_in(iCell,kk) = height(k,iCell) - enddo - enddo - call interp_tofixed_pressure(nCells,nVertLevelsP1,nIntP,press_in,field_in,press_interp,field_interp) - height_200hPa(1:nCells) = field_interp(1:nCells,1) - height_500hPa(1:nCells) = field_interp(1:nCells,2) - height_700hPa(1:nCells) = field_interp(1:nCells,3) - height_850hPa(1:nCells) = field_interp(1:nCells,4) -! write(0,*) '--- end interpolate height:' - -!... vertical velocity - do iCell = 1, nCells - do k = 1, nVertLevelsP1 - kk = nVertLevelsP1+1-k - field_in(iCell,kk) = vvel(k,iCell) - enddo - enddo - call interp_tofixed_pressure(nCells,nVertLevelsP1,nIntP,press_in,field_in,press_interp,field_interp) - w_200hPa(1:nCells) = field_interp(1:nCells,1) - w_500hPa(1:nCells) = field_interp(1:nCells,2) - w_700hPa(1:nCells) = field_interp(1:nCells,3) - w_850hPa(1:nCells) = field_interp(1:nCells,4) -! write(0,*) '--- end interpolate vertical velocity:' - - if(allocated(field_interp)) deallocate(field_interp) - if(allocated(press_interp)) deallocate(press_interp) - -!interpolation to fixed pressure levels for fields located at cell vertices and at mass points: - nIntP = 4 - if(.not.allocated(field_interp)) allocate(field_interp(nVertices,nIntP) ) - if(.not.allocated(press_interp)) allocate(press_interp(nVertices,nIntP) ) - do iVert = 1, nVertices - press_interp(iVert,1) = 200.0_RKIND - press_interp(iVert,2) = 500.0_RKIND - press_interp(iVert,3) = 700.0_RKIND - press_interp(iVert,4) = 850.0_RKIND - enddo - - if(allocated(field_in)) deallocate(field_in) - if(allocated(press_in)) deallocate(press_in) - - if(.not.allocated(press_in)) allocate(press_in(nVertices,nVertLevels)) - do iVert = 1, nVertices - do k = 1, nVertLevels - kk = nVertLevels+1-k - press_in(iVert,kk) = pressure_v(k,iVert) - enddo - enddo - - if(.not.allocated(field_in)) allocate(field_in(nVertices,nVertLevels)) -!... relative vorticity: - do iVert = 1, nVertices - do k = 1, nVertLevels - kk = nVertLevels+1-k - field_in(iVert,kk) = vorticity(k,iVert) - enddo - enddo - call interp_tofixed_pressure(nVertices,nVertLevels,nIntP,press_in,field_in,press_interp,field_interp) - vorticity_200hPa(1:nVertices) = field_interp(1:nVertices,1) - vorticity_500hPa(1:nVertices) = field_interp(1:nVertices,2) - vorticity_700hPa(1:nVertices) = field_interp(1:nVertices,3) - vorticity_850hPa(1:nVertices) = field_interp(1:nVertices,4) -! write(0,*) '--- end interpolate relative vorticity:' - -!... compute SLP (requires temp, height, pressure, qvapor) - call compute_slp(nCells, nVertLevels, temperature, height, pressure, qvapor, mslp) - mslp(:) = mslp(:) * 100.0 ! Convert from hPa to Pa -!... alternative way -!do iCell = 1, nCells -! mslp(iCell) = diag % surface_pressure % array(iCell) + 11.38*height(1,iCell) -! mslp(iCell) = mslp(iCell)/100. -!enddo - - if(allocated(field_interp)) deallocate(field_interp) - if(allocated(press_interp)) deallocate(press_interp) - if(allocated(pressure) ) deallocate(pressure ) - if(allocated(pressureCp1) ) deallocate(pressureCp1 ) - if(allocated(pressure2) ) deallocate(pressure2 ) - if(allocated(pressure_v) ) deallocate(pressure_v ) - if(allocated(temperature) ) deallocate(temperature ) - -!formats: -! 201 format(i5,4(1x,e15.8)) - - end subroutine interp_diagnostics - -!================================================================================================== - subroutine interp_tofixed_pressure(ncol,nlev_in,nlev_out,pres_in,field_in,pres_out,field_out) -!================================================================================================== - -!input arguments: - integer,intent(in):: ncol,nlev_in,nlev_out - - real(kind=RKIND),intent(in),dimension(ncol,nlev_in) :: pres_in,field_in - real(kind=RKIND),intent(in),dimension(ncol,nlev_out):: pres_out - -!output arguments: - real(kind=RKIND),intent(out),dimension(ncol,nlev_out):: field_out - -!local variables: -! integer:: i1,i2,icol,k,kk - integer:: icol,k,kk - integer:: kkstart,kount - integer,dimension(ncol):: kupper - - real(kind=RKIND):: dpl,dpu - -!-------------------------------------------------------------------------------------------------- - -!formats: -! 201 format(i5,8(1x,e15.8)) - -!write(0,*) -!write(0,*) '--- enter subroutine interp_tofixed_pressure:' -!write(0,*) '... ncol = ',ncol -!write(0,*) '... nlev_in = ',nlev_in -!write(0,*) '... nlev_out = ',nlev_out -!i1=1 ; i2=ncol -!do k = 1, nlev_in -! write(0,201) k,pres_in(i1,k),field_in(i1,k),pres_in(i2,k),field_in(i2,k) -!enddo -!write(0,*) - - do icol = 1, ncol - kupper(icol) = 1 - enddo - - do k = 1, nlev_out - - kkstart = nlev_in - do icol = 1, ncol - kkstart = min0(kkstart,kupper(icol)) - enddo - kount = 0 - - do kk = kkstart, nlev_in-1 - do icol = 1, ncol - if(pres_out(icol,k).gt.pres_in(icol,kk).and.pres_out(icol,k).le.pres_in(icol,kk+1)) then - kupper(icol) = kk - kount = kount + 1 -! write(0,201) kupper(icol),pres_out(icol,k),pres_in(icol,kk),pres_in(icol,kk+1) - endif - enddo - - if(kount.eq.ncol) then - do icol = 1, ncol - dpu = pres_out(icol,k) - pres_in(icol,kupper(icol)) - dpl = pres_in(icol,kupper(icol)+1) - pres_out(icol,k) - field_out(icol,k) = (field_in(icol,kupper(icol))*dpl & - + field_in(icol,kupper(icol)+1)*dpu)/(dpl + dpu) - end do - goto 35 - end if - enddo - - do icol = 1, ncol - if(pres_out(icol,k) .lt. pres_in(icol,1)) then - field_out(icol,k) = field_in(icol,1)*pres_out(icol,k)/pres_in(icol,1) - elseif(pres_out(icol,k) .gt. pres_in(icol,nlev_in)) then - field_out(icol,k) = field_in(icol,nlev_in) - else - dpu = pres_out(icol,k) - pres_in(icol,kupper(icol)) - dpl = pres_in(icol,kupper(icol)+1) - pres_out(icol,k) - field_out(icol,k) = (field_in(icol,kupper(icol))*dpl & - + field_in(icol,kupper(icol)+1)*dpu)/(dpl + dpu) - endif - enddo - - 35 continue -! write(0,201) kupper(i1),pres_out(i1,k),pres_in(i1,kupper(i1)),pres_in(i1,kupper(i1)+1), & -! field_out(i1,k),field_in(i1,kupper(i1)),field_in(i1,kupper(i1)+1) -! write(0,201) kupper(i2),pres_out(i2,k),pres_in(i2,kupper(i2)),pres_in(i2,kupper(i2)+1), & -! field_out(i2,k),field_in(i2,kupper(i2)),field_in(i2,kupper(i2)+1) - - enddo - - end subroutine interp_tofixed_pressure - - subroutine compute_slp(ncol,nlev_in,t,height,p,qv,slp) - - implicit none - - !input arguments: - integer, intent(in) :: ncol, nlev_in - - !p: in mb - !t: in K - !qv: in kg/kg - !height: in m - real(kind=RKIND), intent(in), dimension(nlev_in,ncol) :: p,t,qv - real(kind=RKIND), intent(in), dimension(nlev_in+1,ncol) :: height - - !output arguments: - real(kind=RKIND), intent(out), dimension(ncol) :: slp - - !local variables: - integer :: icol, k, kcount - integer :: klo, khi - - real(kind=RKIND) :: gamma, rr, grav - parameter (rr=287.0, grav=9.80616, gamma=0.0065) - - real(kind=RKIND) :: tc, pconst - parameter (tc=273.16+17.5, pconst=100.) - - logical mm5_test - parameter (mm5_test=.true.) - - integer, dimension(:), allocatable :: level - real(kind=RKIND), dimension(:), allocatable :: t_surf, t_msl - real(kind=RKIND) :: plo , phi , tlo, thi , zlo , zhi - real(kind=RKIND) :: p_at_pconst , t_at_pconst , z_at_pconst, z_half_lowest - - logical :: l1, l2, l3, found - - ! Find least zeta level that is PCONST Pa above the surface. We later use this - ! level to extrapolate a surface pressure and temperature, which is supposed - ! to reduce the effect of the diurnal heating cycle in the pressure field. - - if (.not.allocated(level)) allocate(level(ncol)) - if (.not.allocated(t_surf)) allocate(t_surf(ncol)) - if (.not.allocated(t_msl)) allocate(t_msl(ncol)) - - do icol = 1 , ncol - level(icol) = -1 - - k = 1 - found = .false. - do while ( (.not. found) .and. (k.le.nlev_in)) - if ( p(k,icol) .lt. p(1,icol)-pconst ) then - level(icol) = k - found = .true. - end if - k = k+1 - end do - - if ( level(icol) .eq. -1 ) then - write(0,*) 'Troubles finding level ', pconst,' above ground.' - write(0,*) 'Problems first occur at (',icol,')' - write(0,*) 'Surface pressure = ',p(1,icol),' hPa.' - write(0,*) '*** MSLP field will not be computed' - slp(:) = 0.0 - return - end if - - end do - - ! Get temperature PCONST hPa above surface. Use this to extrapolate - ! the temperature at the surface and down to sea level. - - do icol = 1 , ncol - - klo = max ( level(icol) - 1 , 1 ) - khi = min ( klo + 1 , nlev_in - 1 ) - - if ( klo .eq. khi ) then - write(0,*) 'Trapping levels are weird.' - write(0,*) 'klo = ',klo,', khi = ',khi, ': and they should not be equal.' - call mpas_dmpar_global_abort('ERROR: Error_trapping_levels') - end if - - plo = p(klo,icol) - phi = p(khi,icol) - tlo = t(klo,icol) * (1. + 0.608 * qv(klo,icol)) - thi = t(khi,icol) * (1. + 0.608 * qv(khi,icol)) - zlo = 0.5*(height(klo,icol)+height(klo+1,icol)) - zhi = 0.5*(height(khi,icol)+height(khi+1,icol)) - - p_at_pconst = p(1,icol) - pconst - t_at_pconst = thi-(thi-tlo)*log(p_at_pconst/phi)*log(plo/phi) - z_at_pconst = zhi-(zhi-zlo)*log(p_at_pconst/phi)*log(plo/phi) - - t_surf(icol) = t_at_pconst*(p(1,icol)/p_at_pconst)**(gamma*rr/grav) - t_msl(icol) = t_at_pconst+gamma*z_at_pconst - ! if (icol.eq.500) then - ! write(0,*) plo,phi,tlo,thi,zlo,zhi,p_at_pconst,t_at_pconst,z_at_pconst - ! write(0,*) t_surf(icol),t_msl(icol),level(icol),klo,khi - ! write(0,*) height(klo,icol),height(khi,icol),height(khi+1,icol) - ! endif - - end do - - ! If we follow a traditional computation, there is a correction to the sea level - ! temperature if both the surface and sea level temnperatures are *too* hot. - - if ( mm5_test ) then - kcount = 0 - do icol = 1 , ncol - l1 = t_msl(icol) .lt. tc - l2 = t_surf(icol) .le. tc - l3 = .not. l1 - if ( l2 .and. l3 ) then - t_msl(icol) = tc - else - t_msl(icol) = tc - 0.005*(t_surf(icol)-tc)**2 - kcount = kcount+1 - end if - end do - ! write(0,*) 'These number of points had t_msl adjusted ', kcount - end if - - do icol = 1 , ncol - z_half_lowest=0.5*(height(1,icol)+height(2,icol)) - slp(icol) = p(1,icol) * exp((2.*grav*z_half_lowest)/ & - (rr*(t_msl(icol)+t_surf(icol)))) - end do - - if (allocated(level)) deallocate(level) - if (allocated(t_surf)) deallocate(t_surf) - if (allocated(t_msl)) deallocate(t_msl) - - end subroutine compute_slp - -!================================================================================================== - end module mpas_atm_interp_diagnostics -!================================================================================================== diff --git a/src/core_atmosphere/mpas_atm_threading.F b/src/core_atmosphere/mpas_atm_threading.F new file mode 100644 index 0000000000..26251d1bb6 --- /dev/null +++ b/src/core_atmosphere/mpas_atm_threading.F @@ -0,0 +1,222 @@ +! Copyright (c) 2015, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +module mpas_atm_threading + + + contains + + + !----------------------------------------------------------------------- + ! routine mpas_atm_threading_init + ! + !> \brief Pre-computes thread loop bounds for cell, edge, and vertex elements + !> \author Michael Duda + !> \date 6 July 2015 + !> \details + !> This routine is responsible for computing thread loop bounds for cell, + !> edge, and vertex elements in each block of the input blocklist argument. + !> Starting and ending loop bounds are computed for these three element + !> types for all elements (e.g., nCells) as well as owned elements (e.g., + !> nCellsSolve). + !> + !> When MPAS is compiled without OpenMP support, this routine computes loop + !> bounds as though there is just a single thread; otherwise, it is assumed + !> that all threads (given by OMP_get_num_threads()) will be used to + !> decompose each of the element ranges. + !> + !> At present, a return value of 0 is always returned in the optional + !> output argument, ierr. + ! + !----------------------------------------------------------------------- + subroutine mpas_atm_threading_init(blocklist, ierr) + + use mpas_derived_types, only : block_type + use mpas_pool_routines, only : mpas_pool_get_dimension, mpas_pool_add_dimension +#ifdef MPAS_OPENMP + use omp_lib +#endif + + implicit none + + type (block_type), pointer :: blocklist + integer, intent(out), optional :: ierr + + type (block_type), pointer :: block + integer :: threadid + integer, pointer :: nCells, nCellsSolve, nEdges, nEdgesSolve, nVertices, nVerticesSolve + + integer :: nThreads + integer, dimension(:), pointer :: cellThreadStart, cellThreadEnd + integer, dimension(:), pointer :: cellSolveThreadStart, cellSolveThreadEnd + integer, dimension(:), pointer :: edgeThreadStart, edgeThreadEnd + integer, dimension(:), pointer :: edgeSolveThreadStart, edgeSolveThreadEnd + integer, dimension(:), pointer :: vertexThreadStart, vertexThreadEnd + integer, dimension(:), pointer :: vertexSolveThreadStart, vertexSolveThreadEnd + + + if (present(ierr)) ierr = 0 + + block => blocklist + do while (associated(block)) +#ifdef MPAS_OPENMP +!$OMP PARALLEL +!$OMP MASTER + nThreads = OMP_get_num_threads() +!$OMP END MASTER +!$OMP END PARALLEL +#else + nThreads = 1 +#endif + + allocate(cellThreadStart(nThreads)) + allocate(cellThreadEnd(nThreads)) + allocate(cellSolveThreadStart(nThreads)) + allocate(cellSolveThreadEnd(nThreads)) + allocate(edgeThreadStart(nThreads)) + allocate(edgeThreadEnd(nThreads)) + allocate(edgeSolveThreadStart(nThreads)) + allocate(edgeSolveThreadEnd(nThreads)) + allocate(vertexThreadStart(nThreads)) + allocate(vertexThreadEnd(nThreads)) + allocate(vertexSolveThreadStart(nThreads)) + allocate(vertexSolveThreadEnd(nThreads)) + + call mpas_pool_get_dimension(block % dimensions, 'nCells', nCells) + call mpas_pool_get_dimension(block % dimensions, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(block % dimensions, 'nEdges', nEdges) + call mpas_pool_get_dimension(block % dimensions, 'nEdgesSolve', nEdgesSolve) + call mpas_pool_get_dimension(block % dimensions, 'nVertices', nVertices) + call mpas_pool_get_dimension(block % dimensions, 'nVerticesSolve', nVerticesSolve) + +#ifdef MPAS_OPENMP +!$OMP PARALLEL PRIVATE(threadid) + threadid = OMP_get_thread_num() + + cellThreadStart(threadid+1) = (threadid * nCells / nThreads) + 1 + cellThreadEnd(threadid+1) = ((threadid+1) * nCells / nThreads) + cellSolveThreadStart(threadid+1) = (threadid * nCellsSolve / nThreads) + 1 + cellSolveThreadEnd(threadid+1) = ((threadid+1) * nCellsSolve / nThreads) + edgeThreadStart(threadid+1) = (threadid * nEdges / nThreads) + 1 + edgeThreadEnd(threadid+1) = ((threadid+1) * nEdges / nThreads) + edgeSolveThreadStart(threadid+1) = (threadid * nEdgesSolve / nThreads) + 1 + edgeSolveThreadEnd(threadid+1) = ((threadid+1) * nEdgesSolve / nThreads) + vertexThreadStart(threadid+1) = (threadid * nVertices / nThreads) + 1 + vertexThreadEnd(threadid+1) = ((threadid+1) * nVertices / nThreads) + vertexSolveThreadStart(threadid+1) = (threadid * nVerticesSolve / nThreads) + 1 + vertexSolveThreadEnd(threadid+1) = ((threadid+1) * nVerticesSolve / nThreads) +!$OMP END PARALLEL +#else + cellThreadStart(1) = 1 + cellThreadEnd(1) = nCells + cellSolveThreadStart(1) = 1 + cellSolveThreadEnd(1) = nCellsSolve + edgeThreadStart(1) = 1 + edgeThreadEnd(1) = nEdges + edgeSolveThreadStart(1) = 1 + edgeSolveThreadEnd(1) = nEdgesSolve + vertexThreadStart(1) = 1 + vertexThreadEnd(1) = nVertices + vertexSolveThreadStart(1) = 1 + vertexSolveThreadEnd(1) = nVerticesSolve +#endif + + call mpas_pool_add_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_add_dimension(block % dimensions, 'cellThreadStart', cellThreadStart) + call mpas_pool_add_dimension(block % dimensions, 'cellThreadEnd', cellThreadEnd) + call mpas_pool_add_dimension(block % dimensions, 'cellSolveThreadStart', cellSolveThreadStart) + call mpas_pool_add_dimension(block % dimensions, 'cellSolveThreadEnd', cellSolveThreadEnd) + + call mpas_pool_add_dimension(block % dimensions, 'edgeThreadStart', edgeThreadStart) + call mpas_pool_add_dimension(block % dimensions, 'edgeThreadEnd', edgeThreadEnd) + call mpas_pool_add_dimension(block % dimensions, 'edgeSolveThreadStart', edgeSolveThreadStart) + call mpas_pool_add_dimension(block % dimensions, 'edgeSolveThreadEnd', edgeSolveThreadEnd) + + call mpas_pool_add_dimension(block % dimensions, 'vertexThreadStart', vertexThreadStart) + call mpas_pool_add_dimension(block % dimensions, 'vertexThreadEnd', vertexThreadEnd) + call mpas_pool_add_dimension(block % dimensions, 'vertexSolveThreadStart', vertexSolveThreadStart) + call mpas_pool_add_dimension(block % dimensions, 'vertexSolveThreadEnd', vertexSolveThreadEnd) + + ! + ! Because pools make an internal copy of dimensions, we can now + ! delete our copies of the thread bounds arrays + ! + deallocate(cellThreadStart) + deallocate(cellThreadEnd) + deallocate(cellSolveThreadStart) + deallocate(cellSolveThreadEnd) + deallocate(edgeThreadStart) + deallocate(edgeThreadEnd) + deallocate(edgeSolveThreadStart) + deallocate(edgeSolveThreadEnd) + deallocate(vertexThreadStart) + deallocate(vertexThreadEnd) + deallocate(vertexSolveThreadStart) + deallocate(vertexSolveThreadEnd) + + block => block % next + end do + + end subroutine mpas_atm_threading_init + + + !----------------------------------------------------------------------- + ! routine mpas_atm_threading_finalize + ! + !> \brief Deallocates memory associated with threading in MPAS + !> \author Michael Duda + !> \date 6 July 2015 + !> \details + !> This routine deallocates any memory that was allocated in the call to + !> mpas_atm_threading_init() for each block in the input block list. + !> + !> At present, a return value of 0 is always returned in the optional + !> output argument, ierr. + ! + !----------------------------------------------------------------------- + subroutine mpas_atm_threading_finalize(blocklist, ierr) + + use mpas_derived_types, only : block_type + use mpas_pool_routines, only : mpas_pool_remove_dimension + + implicit none + + type (block_type), pointer :: blocklist + integer, intent(out), optional :: ierr + + type (block_type), pointer :: block + + + if (present(ierr)) ierr = 0 + + block => blocklist + do while (associated(block)) + + call mpas_pool_remove_dimension(block % dimensions, 'nThreads') + + call mpas_pool_remove_dimension(block % dimensions, 'cellThreadStart') + call mpas_pool_remove_dimension(block % dimensions, 'cellThreadEnd') + call mpas_pool_remove_dimension(block % dimensions, 'cellSolveThreadStart') + call mpas_pool_remove_dimension(block % dimensions, 'cellSolveThreadEnd') + + call mpas_pool_remove_dimension(block % dimensions, 'edgeThreadStart') + call mpas_pool_remove_dimension(block % dimensions, 'edgeThreadEnd') + call mpas_pool_remove_dimension(block % dimensions, 'edgeSolveThreadStart') + call mpas_pool_remove_dimension(block % dimensions, 'edgeSolveThreadEnd') + + call mpas_pool_remove_dimension(block % dimensions, 'vertexThreadStart') + call mpas_pool_remove_dimension(block % dimensions, 'vertexThreadEnd') + call mpas_pool_remove_dimension(block % dimensions, 'vertexSolveThreadStart') + call mpas_pool_remove_dimension(block % dimensions, 'vertexSolveThreadEnd') + + block => block % next + end do + + end subroutine mpas_atm_threading_finalize + +end module mpas_atm_threading diff --git a/src/core_atmosphere/physics/Makefile b/src/core_atmosphere/physics/Makefile index e5e3111818..8aa3ca3046 100644 --- a/src/core_atmosphere/physics/Makefile +++ b/src/core_atmosphere/physics/Makefile @@ -10,6 +10,7 @@ dummy: OBJS_init = \ mpas_atmphys_constants.o \ mpas_atmphys_date_time.o \ + mpas_atmphys_functions.o \ mpas_atmphys_utilities.o \ mpas_atmphys_o3climatology.o @@ -26,10 +27,14 @@ OBJS = \ mpas_atmphys_driver_radiation_lw.o \ mpas_atmphys_driver_radiation_sw.o \ mpas_atmphys_driver_sfclayer.o \ + mpas_atmphys_finalize.o \ mpas_atmphys_init.o \ + mpas_atmphys_init_microphysics.o \ mpas_atmphys_landuse.o \ mpas_atmphys_lsm_noahinit.o \ mpas_atmphys_manager.o \ + mpas_atmphys_driver_oml.o \ + mpas_atmphys_packages.o \ mpas_atmphys_rrtmg_lwinit.o \ mpas_atmphys_rrtmg_swinit.o \ mpas_atmphys_todynamics.o \ @@ -63,7 +68,8 @@ mpas_atmphys_camrad_init.o: \ ./physics_wrf/module_ra_cam_support.o mpas_atmphys_control.o: \ - mpas_atmphys_utilities.o + mpas_atmphys_utilities.o \ + mpas_atmphys_vars.o mpas_atmphys_driver.o: \ mpas_atmphys_driver_cloudiness.o \ @@ -74,6 +80,7 @@ mpas_atmphys_driver.o: \ mpas_atmphys_driver_radiation_lw.o \ mpas_atmphys_driver_radiation_sw.o \ mpas_atmphys_driver_sfclayer.o \ + mpas_atmphys_driver_oml.o \ mpas_atmphys_constants.o \ mpas_atmphys_interface.o \ mpas_atmphys_update.o \ @@ -81,14 +88,23 @@ mpas_atmphys_driver.o: \ mpas_atmphys_driver_cloudiness.o: \ mpas_atmphys_constants.o \ - mpas_atmphys_vars.o + mpas_atmphys_vars.o \ + ./physics_wrf/module_mp_thompson_cldfra3.o mpas_atmphys_driver_convection.o: \ mpas_atmphys_constants.o \ mpas_atmphys_utilities.o \ mpas_atmphys_vars.o \ + ./physics_wrf/module_cu_gf.mpas.o \ ./physics_wrf/module_cu_kfeta.o \ - ./physics_wrf/module_cu_tiedtke.o + ./physics_wrf/module_cu_tiedtke.o \ + ./physics_wrf/module_cu_ntiedtke.o + +mpas_atmphys_finalize.o: \ + ./physics_wrf/module_mp_thompson.o + +mpas_atmphys_finalize.o: \ + ./physics_wrf/module_mp_thompson.o mpas_atmphys_driver_gwdo.o: \ mpas_atmphys_vars.o \ @@ -103,14 +119,22 @@ mpas_atmphys_driver_lsm.o: \ mpas_atmphys_driver_microphysics.o: \ mpas_atmphys_constants.o \ + mpas_atmphys_init_microphysics.o \ mpas_atmphys_interface.o \ mpas_atmphys_vars.o \ ./physics_wrf/module_mp_kessler.o \ + ./physics_wrf/module_mp_thompson.o \ ./physics_wrf/module_mp_wsm6.o +mpas_atmphys_driver_oml.o: \ + mpas_atmphys_constants.o \ + mpas_atmphys_vars.o \ + ./physics_wrf/module_sf_oml.o + mpas_atmphys_driver_pbl.o: \ mpas_atmphys_constants.o \ mpas_atmphys_vars.o \ + ./physics_wrf/module_bl_mynn.o \ ./physics_wrf/module_bl_ysu.o mpas_atmphys_driver_radiation_lw.o: \ @@ -135,6 +159,7 @@ mpas_atmphys_driver_radiation_sw.o: \ mpas_atmphys_driver_sfclayer.o: \ mpas_atmphys_constants.o \ mpas_atmphys_vars.o \ + ./physics_wrf/module_sf_mynn.o \ ./physics_wrf/module_sf_sfclay.o mpas_atmphys_init.o: \ @@ -147,6 +172,9 @@ mpas_atmphys_init.o: \ mpas_atmphys_landuse.o \ mpas_atmphys_o3climatology.o +mpas_atmphys_init_microphysics.o: \ + ./physics_wrf/module_mp_thompson.o + mpas_atmphys_interface.o: \ mpas_atmphys_constants.o \ mpas_atmphys_vars.o @@ -184,7 +212,8 @@ mpas_atmphys_rrtmg_swinit.o: \ ./physics_wrf/module_ra_rrtmg_sw.o mpas_atmphys_todynamics.o: \ - mpas_atmphys_constants.o + mpas_atmphys_constants.o \ + mpas_atmphys_vars.o mpas_atmphys_update_surface.o: \ mpas_atmphys_date_time.o \ @@ -207,7 +236,7 @@ clean: $(RM) $@ $*.mod ifeq "$(GEN_F90)" "true" $(CPP) $(CPPFLAGS) $(COREDEF) $(HYDROSTATIC) $(CPPINCLUDES) $< > $*.f90 - $(FC) $(FFLAGS) -c $*.f90 $(FCINCLUDES) -I../../framework -I../../operators -I./physics_wrf -I../../external/esmf_time_f90 + $(FC) $(FFLAGS) -c $*.f90 $(FCINCLUDES) -I.. -I../../framework -I../../operators -I./physics_wrf -I../../external/esmf_time_f90 else - $(FC) $(CPPFLAGS) $(COREDEF) $(HYDROSATIC) $(FFLAGS) -c $*.F $(CPPINCLUDES) $(FCINCLUDES) -I../../framework -I../../operators -I./physics_wrf -I../../external/esmf_time_f90 + $(FC) $(CPPFLAGS) $(COREDEF) $(HYDROSATIC) $(FFLAGS) -c $*.F $(CPPINCLUDES) $(FCINCLUDES) -I.. -I../../framework -I../../operators -I./physics_wrf -I../../external/esmf_time_f90 endif diff --git a/src/core_atmosphere/physics/mpas_atmphys_camrad_init.F b/src/core_atmosphere/physics/mpas_atmphys_camrad_init.F index 9a1658d5a1..3b58a1c9d1 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_camrad_init.F +++ b/src/core_atmosphere/physics/mpas_atmphys_camrad_init.F @@ -11,7 +11,7 @@ #define DM_BCAST_MACRO(A) call mpas_dmpar_bcast_reals(dminfo,size(A),A) #endif -!================================================================================================== +!================================================================================================================= module mpas_atmphys_camrad_init use mpas_dmpar use mpas_kind_types @@ -29,35 +29,34 @@ module mpas_atmphys_camrad_init public:: camradinit -!>\brief Initialization of CAM radiation codes using MPAS MPI decomposition. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> subroutine camradinit calls the main subroutines needed to initialize the long- and short-wave -!> CAM radiation codes, and read input data from auxillary files. -!> -!> subroutines called in mpas_atmphys_camrad_init: -!> ----------------------------------------------- -!> radini :initialization of radiation constants. -!> esinti :initialization of saturation vapor pressures. -!> oznini :initialization of climatological monthly-mean ozone profiles. -!> aerosol_init:initialization of aerosol optical properties. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * added initialization of variable mxaerl which is the number of layers below 900 hPa in -!> which background aerosols are present. mxaerl is computed using the pressure-base array. -!> -> added diag in the argument list of subroutines camradinit and aerosol_init. -!> -> in subroutine aerosol_init, added initialization of variable mxaerl. -!> Laura D. Fowler (birch.ucar.edu) / 2013-07-01. -!> * moved the arrays pin and ozmixm from the mesh structure to the atm_input structure in -!> subroutine oznini. -!> Laura D. Fowler (birch.ucar.edu) / 2013-07-08. -!> * Replaced the variable g (that originally pointed to gravity) with gravity, for simplicity. -!> Laura D. Fowler (laura@ucar.edu) / 2014-03-21. -!> * Modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +!Initialization of CAM radiation codes using MPAS MPI decomposition. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutine camradinit calls the main subroutines needed to initialize the long- and short-wave +! CAM radiation codes, and read input data from auxillary files. +! +! subroutines called in mpas_atmphys_camrad_init: +! ----------------------------------------------- +! radini :initialization of radiation constants. +! esinti :initialization of saturation vapor pressures. +! oznini :initialization of climatological monthly-mean ozone profiles. +! aerosol_init:initialization of aerosol optical properties. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * added initialization of variable mxaerl which is the number of layers below 900 hPa in +! which background aerosols are present. mxaerl is computed using the pressure-base array. +! -> added diag in the argument list of subroutines camradinit and aerosol_init. +! -> in subroutine aerosol_init, added initialization of variable mxaerl. +! Laura D. Fowler (birch.ucar.edu) / 2013-07-01. +! * moved the arrays pin and ozmixm from the mesh structure to the atm_input structure in +! subroutine oznini. +! Laura D. Fowler (birch.ucar.edu) / 2013-07-08. +! * Replaced the variable g (that originally pointed to gravity) with gravity, for simplicity. +! Laura D. Fowler (laura@ucar.edu) / 2014-03-21. +! * Modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. !local parameters: @@ -67,9 +66,9 @@ module mpas_atmphys_camrad_init contains -!================================================================================================== +!================================================================================================================= subroutine camradinit(dminfo,mesh,atm_input,diag,state,time_lev) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info),intent(in):: dminfo @@ -152,9 +151,9 @@ subroutine camradinit(dminfo,mesh,atm_input,diag,state,time_lev) end subroutine camradinit -!================================================================================================== +!================================================================================================================= subroutine radini(dminfo,gravx,cpairx,epsilox,stebolx,pstdx) -!-------------------------------------------------------------------------------------------------- +!================================================================================================================= ! ! Purpose: ! Initialize various constants for radiation scheme; note that @@ -248,9 +247,9 @@ subroutine radini(dminfo,gravx,cpairx,epsilox,stebolx,pstdx) end subroutine radini -!================================================================================================== +!================================================================================================================= subroutine radaeini(dminfo,pstdx,mwdryx,mwco2x) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info),intent(in):: dminfo @@ -304,7 +303,7 @@ subroutine radaeini(dminfo,pstdx,mwdryx,mwco2x) integer:: i_te,i_rh -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- !... constants to set: p0 = pstdx @@ -394,9 +393,9 @@ subroutine radaeini(dminfo,pstdx,mwdryx,mwco2x) end subroutine radaeini -!================================================================================================== +!================================================================================================================= subroutine aerosol_init(dminfo,mesh,diag,state,time_lev) -!================================================================================================== +!================================================================================================================= !This subroutine assumes a uniform aerosol distribution in both time and space. It should be !modified if aerosol data are available from WRF-CHEM or other sources. @@ -439,7 +438,7 @@ subroutine aerosol_init(dminfo,mesh,diag,state,time_lev) 0.866419970989227 , 0.901350021362305 , 0.930540025234222 , & 0.954590022563934 , 0.974179983139038 , 0.990000009536743 , 1/ -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- !initialization: call mpas_pool_get_dimension(mesh,'nCells',nCells) @@ -556,9 +555,9 @@ subroutine aerosol_init(dminfo,mesh,diag,state,time_lev) end subroutine aerosol_init -!================================================================================================== +!================================================================================================================= subroutine aer_optics_initialize(dminfo) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info):: dminfo @@ -597,7 +596,7 @@ subroutine aer_optics_initialize(dminfo) logical:: opened character(len=StrKIND):: errmess -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- !write(0,*) '--- enter subroutine aer_optics_initialize:' @@ -743,9 +742,9 @@ subroutine aer_optics_initialize(dminfo) end subroutine aer_optics_initialize -!================================================================================================== +!================================================================================================================= subroutine oznini(mesh,atm_input) -!================================================================================================== +!================================================================================================================= !This subroutine assumes a uniform distribution of ozone concentration. It should be replaced !with monthly climatology varying ozone distribution. @@ -776,7 +775,7 @@ subroutine oznini(mesh,atm_input) !real(Kind=RKIND),dimension(lonsiz,levsiz,latsiz,num_months):: ozmixin real(Kind=RKIND),dimension(:,:,:,:),allocatable:: ozmixin -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_dimension(mesh,'nCells',nCells) call mpas_pool_get_dimension(mesh,'nMonths',num_months) @@ -880,6 +879,6 @@ subroutine oznini(mesh,atm_input) end subroutine oznini -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_camrad_init -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_constants.F b/src/core_atmosphere/physics/mpas_atmphys_constants.F index 943b16f27b..12433a699b 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_constants.F +++ b/src/core_atmosphere/physics/mpas_atmphys_constants.F @@ -5,41 +5,41 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_constants use mpas_kind_types - use mpas_constants, only : pii, cp, gravity, R_d => rgas + use mpas_constants, only : pii, cp, gravity, R_d => rgas, rvord implicit none public save -!>\brief defines the constants needed for the physics parameterizations. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * Replaced the variable g (that originally pointed to gravity) with gravity, for simplicity. -!> Laura D. Fowler (laura@ucar.edu) / 2014-03-21. -!> * Removed the constraint of only using RKIND from mpas_kind_types. -!> Laura D. Fowler (laura@ucar.edu) / 2014-09-18. -!> * added empty subroutine atmphys_constants_init that does not do anything, but needed for -!> compiling MPAS with some compilers. -!> Laura D. Fowler (laura@ucar.edu) / 2015-01-12. +!defines the constants needed for the physics parameterizations. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * Replaced the variable g (that originally pointed to gravity) with gravity, for simplicity. +! Laura D. Fowler (laura@ucar.edu) / 2014-03-21. +! * Removed the constraint of only using RKIND from mpas_kind_types. +! Laura D. Fowler (laura@ucar.edu) / 2014-09-18. +! * added empty subroutine atmphys_constants_init that does not do anything, but needed for +! compiling MPAS with some compilers. +! Laura D. Fowler (laura@ucar.edu) / 2015-01-12. +! * added the variable rvord needed in the MYNN planetary boundary layer scheme from WRF 3.8. +! Laura D. Fowler (laura@ucar.edu) / 2016-04-14. -!================================================================================================== +!================================================================================================================= real(kind=RKIND),parameter:: c0 = 0.00000 real(kind=RKIND),parameter:: c1 = 1.00000 - real(kind=RKIND),parameter:: P0 = 100000. !reference pressure [Pa] - real(kind=RKIND),parameter:: t00 = 273.15 !reference temperarure [K] - real(kind=RKIND),parameter:: R_v = 461.6 !gas constant for water vapor [J/kg/K] + real(kind=RKIND),parameter:: P0 = 100000. !reference pressure [Pa] + real(kind=RKIND),parameter:: t00 = 273.15 !reference temperarure [K] + real(kind=RKIND),parameter:: R_v = 461.6 !gas constant for water vapor [J/kg/K] real(kind=RKIND),parameter:: ep_1 = R_v/R_d-1. real(kind=RKIND),parameter:: ep_2 = R_d/R_v real(kind=RKIND),parameter:: cpv = 4.*R_v @@ -57,9 +57,9 @@ module mpas_atmphys_constants real(kind=RKIND),parameter:: svp3 = 29.65 real(kind=RKIND),parameter:: svpt0 = 273.15 - real(kind=RKIND),parameter:: xlv = 2.50e6 !latent heat of vaporization [J/kg] - real(kind=RKIND),parameter:: xlf = 3.50e5 !latent heat of fusion [J/kg] - real(kind=RKIND),parameter:: xls = xlv + xlf !latent heat of sublimation [J/kg] + real(kind=RKIND),parameter:: xlv = 2.50e6 !latent heat of vaporization [J/kg] + real(kind=RKIND),parameter:: xlf = 3.50e5 !latent heat of fusion [J/kg] + real(kind=RKIND),parameter:: xls = xlv + xlf !latent heat of sublimation [J/kg] real(kind=RKIND),parameter:: xlv0 = 3.15e6 real(kind=RKIND),parameter:: xlv1 = 2370. @@ -76,20 +76,20 @@ module mpas_atmphys_constants real(kind=RKIND),parameter:: psat = 610.78 !constants specific to long- and short-wave radiation codes: -!real(kind=RKIND),parameter:: solcon_0 = 1365. !solar constant [W/m2] - real(kind=RKIND),parameter:: solcon_0 = 1370. !solar constant [W/m2] - real(kind=RKIND),parameter:: degrad = 3.1415926/180. !conversion from degree to radiant [-] +!real(kind=RKIND),parameter:: solcon_0 = 1365. !solar constant [W/m2] + real(kind=RKIND),parameter:: solcon_0 = 1370. !solar constant [W/m2] + real(kind=RKIND),parameter:: degrad = 3.1415926/180. !conversion from degree to radiant [-] real(kind=RKIND),parameter:: dpd = 360./365. contains -!================================================================================================== +!================================================================================================================= subroutine atmphys_constants_init() -!================================================================================================== +!================================================================================================================= !dummy subroutine that does not do anything. end subroutine atmphys_constants_init -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_constants -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_control.F b/src/core_atmosphere/physics/mpas_atmphys_control.F index 555cd7d53d..4ad78ce79a 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_control.F +++ b/src/core_atmosphere/physics/mpas_atmphys_control.F @@ -5,61 +5,83 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_control + use mpas_dmpar use mpas_kind_types use mpas_derived_types use mpas_pool_routines use mpas_atmphys_utilities + use mpas_atmphys_vars, only: l_mp_tables implicit none private public:: physics_namelist_check, & - physics_registry_init + physics_registry_init, & + physics_tables_init logical,public:: moist_physics -!>\brief MPAS control and initialization routines. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> mpas_atmphys_control contains subroutines called during the initialization. -!> -!> subroutines called in mpas_atmphys_control: -!> ------------------------------------------- -!> physics_namelist_check: checks that physics namelist parameters are defined correctly. -!> physics_registry_init : initializes thicknesses of soil layers for NOAH land-surface scheme. -!> physics_idealized_init: initializes physics variables needed to run idealized cases. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * removed the namelist option config_eddy_scheme and associated sourcecode. -!> * removed the namelist option config_conv_shallow_scheme and associated sourcecode. -!> * removed controls to the updated Kain-Fritsch convection scheme. -!> Laura D. Fowler (laura@ucar.edu) / 2013-05-29. -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. -!> * removed subroutine physics_idealized_init, also available in mpas_init_atm_cases.F in -!> core_init_atmosphere. -!> Laura D. Fowler (laura@ucar.edu) / 2014-08-11. -!> * renamed config_conv_deep_scheme to config_convection_scheme. -!> Laura D. Fowler (laura@ucar.edu) / 2014-09-18. +!MPAS control and initialization routines. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines called in mpas_atmphys_control: +! ------------------------------------------- +! * physics_namelist_check: checks that physics namelist parameters are defined correctly. +! * physics_registry_init : initializes thicknesses of soil layers for NOAH land-surface scheme. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * removed the namelist option config_eddy_scheme and associated sourcecode. +! * removed the namelist option config_conv_shallow_scheme and associated sourcecode. +! * removed controls to the updated Kain-Fritsch convection scheme. +! Laura D. Fowler (laura@ucar.edu) / 2013-05-29. +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * removed subroutine physics_idealized_init, also available in mpas_init_atm_cases.F in core_init_atmosphere. +! Laura D. Fowler (laura@ucar.edu) / 2014-08-11. +! * renamed config_conv_deep_scheme to config_convection_scheme. +! Laura D. Fowler (laura@ucar.edu) / 2014-09-18. +! * renamed "wsm6" to "mp_wsm6" and "kessler" to "mp_kessler". +! Laura D. Fowler (laura@ucar.edu) / 2016-03-09. +! * renamed "kain_fritsch" to "cu_kain_fritsch" and "tiedtke" to "cu_tiedtke". +! Laura D. Fowler (laura@ucar.edu) / 2016-03-22. +! * renamed "ysu" to "bl_ysu", "ysu_gwdo" to "bl_gwdo_ysu", and "monin_obukhov" to "sf_monin_obukhov". +! Laura D. Fowler (laura@ucar.edu) / 2016-03-25. +! * added the option mp_thompson. +! Laura D. Fowler (laura@ucar.edu) / 2016-03-25. +! * added the option cu_grell_freitas. +! Laura D. Fowler (laura@ucar.edu) / 2016-03-31. +! * added the options sf_mynn and bl_mynn and for the MYNN parameterization from WRF version 3.6.1. +! Laura D. Fowler (laura@ucar.edu) / 2016-04-11. +! * added the option cu_ntiedtke for the "new" Tiedtke parameterization of convection from WRF version 3.8.1. +! Laura D. Fowler (laura@ucar.edu) / 2016-09-19. +! * added the physics suite "convection_scale_aware" (see below for the physics options used in the suite). +! Laura D. Fowler (laura@ucar.edu) / 2016-10-28. +! * added the subroutine physics_tables_init which checks if the files containing the lokk-up tables for the +! Thompson cloud microphysics are available or not. +! Laura D. Fowler (laura@ucar.edu) / 2016-11-01. +! * modified checking the config_gwdo_scheme option to allow bl_ysu_gwdo to be run when the MYNN pbl and surface +! layer scheme options are chosen. +! Laura D. Fowler (laura@ucar.edu) / 2016-12-22. +! * modified logic in subroutine physics_tables_init so that the Thompson microphysics tables are read in each +! MPI task. +! Laura D. Fowler (laura@ucar.edu) / 2016-12-30. contains -!================================================================================================== - subroutine physics_namelist_check(mesh,configs) -!================================================================================================== +!================================================================================================================= + subroutine physics_namelist_check(configs) +!================================================================================================================= !input arguments: - type(mpas_pool_type),intent(in):: mesh type(mpas_pool_type),intent(in):: configs !local pointers: @@ -74,7 +96,9 @@ subroutine physics_namelist_check(mesh,configs) config_radt_sw_scheme, & config_sfclayer_scheme -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine physics_namelist_check:' call mpas_pool_get_config(configs,'config_physics_suite' ,config_physics_suite ) call mpas_pool_get_config(configs,'config_microp_scheme' ,config_microp_scheme ) @@ -87,32 +111,47 @@ subroutine physics_namelist_check(mesh,configs) call mpas_pool_get_config(configs,'config_radt_sw_scheme' ,config_radt_sw_scheme ) call mpas_pool_get_config(configs,'config_sfclayer_scheme' ,config_sfclayer_scheme ) + write(0,*) + write(0,*) '----- Setting up physics suite '''//trim(config_physics_suite)//''' -----' + ! - ! Setup schemes according to the selected physics suite + !setup schemes according to the selected physics suite: ! if (trim(config_physics_suite) == 'mesoscale_reference') then - if (trim(config_microp_scheme) == 'suite') config_microp_scheme = 'wsm6' - if (trim(config_convection_scheme) == 'suite') config_convection_scheme = 'tiedtke' - if (trim(config_pbl_scheme) == 'suite') config_pbl_scheme = 'ysu' - if (trim(config_gwdo_scheme) == 'suite') config_gwdo_scheme = 'off' - if (trim(config_radt_lw_scheme) == 'suite') config_radt_lw_scheme = 'rrtmg_lw' - if (trim(config_radt_sw_scheme) == 'suite') config_radt_sw_scheme = 'rrtmg_sw' - if (trim(config_radt_cld_scheme) == 'suite') config_radt_cld_scheme = 'cld_fraction' - if (trim(config_sfclayer_scheme) == 'suite') config_sfclayer_scheme = 'monin_obukhov' - if (trim(config_lsm_scheme) == 'suite') config_lsm_scheme = 'noah' + if (trim(config_microp_scheme) == 'suite') config_microp_scheme = 'mp_wsm6' + if (trim(config_convection_scheme) == 'suite') config_convection_scheme = 'cu_ntiedtke' + if (trim(config_pbl_scheme) == 'suite') config_pbl_scheme = 'bl_ysu' + if (trim(config_gwdo_scheme) == 'suite') config_gwdo_scheme = 'bl_ysu_gwdo' + if (trim(config_radt_lw_scheme) == 'suite') config_radt_lw_scheme = 'rrtmg_lw' + if (trim(config_radt_sw_scheme) == 'suite') config_radt_sw_scheme = 'rrtmg_sw' + if (trim(config_radt_cld_scheme) == 'suite') config_radt_cld_scheme = 'cld_fraction' + if (trim(config_sfclayer_scheme) == 'suite') config_sfclayer_scheme = 'sf_monin_obukhov' + if (trim(config_lsm_scheme) == 'suite') config_lsm_scheme = 'noah' + + else if (trim(config_physics_suite) == 'convection_permitting') then + + if (trim(config_microp_scheme) == 'suite') config_microp_scheme = 'mp_thompson' + if (trim(config_convection_scheme) == 'suite') config_convection_scheme = 'cu_grell_freitas' + if (trim(config_pbl_scheme) == 'suite') config_pbl_scheme = 'bl_mynn' + if (trim(config_gwdo_scheme) == 'suite') config_gwdo_scheme = 'bl_ysu_gwdo' + if (trim(config_radt_lw_scheme) == 'suite') config_radt_lw_scheme = 'rrtmg_lw' + if (trim(config_radt_sw_scheme) == 'suite') config_radt_sw_scheme = 'rrtmg_sw' + if (trim(config_radt_cld_scheme) == 'suite') config_radt_cld_scheme = 'cld_fraction' + if (trim(config_sfclayer_scheme) == 'suite') config_sfclayer_scheme = 'sf_mynn' + if (trim(config_lsm_scheme) == 'suite') config_lsm_scheme = 'noah' else if (trim(config_physics_suite) == 'none') then - if (trim(config_microp_scheme) == 'suite') config_microp_scheme = 'off' + if (trim(config_microp_scheme) == 'suite') config_microp_scheme = 'off' if (trim(config_convection_scheme) == 'suite') config_convection_scheme = 'off' - if (trim(config_pbl_scheme) == 'suite') config_pbl_scheme = 'off' - if (trim(config_gwdo_scheme) == 'suite') config_gwdo_scheme = 'off' - if (trim(config_radt_lw_scheme) == 'suite') config_radt_lw_scheme = 'off' - if (trim(config_radt_sw_scheme) == 'suite') config_radt_sw_scheme = 'off' - if (trim(config_radt_cld_scheme) == 'suite') config_radt_cld_scheme = 'off' - if (trim(config_sfclayer_scheme) == 'suite') config_sfclayer_scheme = 'off' - if (trim(config_lsm_scheme) == 'suite') config_lsm_scheme = 'off' + if (trim(config_pbl_scheme) == 'suite') config_pbl_scheme = 'off' + if (trim(config_gwdo_scheme) == 'suite') config_gwdo_scheme = 'off' + if (trim(config_radt_lw_scheme) == 'suite') config_radt_lw_scheme = 'off' + if (trim(config_radt_sw_scheme) == 'suite') config_radt_sw_scheme = 'off' + if (trim(config_radt_cld_scheme) == 'suite') config_radt_cld_scheme = 'off' + if (trim(config_sfclayer_scheme) == 'suite') config_sfclayer_scheme = 'off' + if (trim(config_lsm_scheme) == 'suite') config_lsm_scheme = 'off' else @@ -121,13 +160,13 @@ subroutine physics_namelist_check(mesh,configs) call physics_error_fatal(mpas_err_message) end if - !cloud microphysics scheme: - if(.not. (config_microp_scheme .eq. 'off' .or. & - config_microp_scheme .eq. 'kessler' .or. & - config_microp_scheme .eq. 'wsm6' )) then - + if(.not. (config_microp_scheme .eq. 'off' .or. & + config_microp_scheme .eq. 'mp_kessler' .or. & + config_microp_scheme .eq. 'mp_thompson' .or. & + config_microp_scheme .eq. 'mp_wsm6')) then + write(mpas_err_message,'(A,A10)') 'illegal value for config_microp_scheme:', & trim(config_microp_scheme) call physics_error_fatal(mpas_err_message) @@ -135,9 +174,11 @@ subroutine physics_namelist_check(mesh,configs) endif !convection scheme: - if(.not. (config_convection_scheme .eq. 'off' .or. & - config_convection_scheme .eq. 'kain_fritsch' .or. & - config_convection_scheme .eq. 'tiedtke' )) then + if(.not. (config_convection_scheme .eq. 'off' .or. & + config_convection_scheme .eq. 'cu_grell_freitas' .or. & + config_convection_scheme .eq. 'cu_kain_fritsch' .or. & + config_convection_scheme .eq. 'cu_tiedtke' .or. & + config_convection_scheme .eq. 'cu_ntiedtke')) then write(mpas_err_message,'(A,A10)') 'illegal value for config_convection_scheme: ', & trim(config_convection_scheme) @@ -146,8 +187,9 @@ subroutine physics_namelist_check(mesh,configs) endif !pbl scheme: - if(.not. (config_pbl_scheme .eq. 'off' .or. & - config_pbl_scheme .eq. 'ysu')) then + if(.not. (config_pbl_scheme .eq. 'off' .or. & + config_pbl_scheme .eq. 'bl_mynn' .or. & + config_pbl_scheme .eq. 'bl_ysu')) then write(mpas_err_message,'(A,A10)') 'illegal value for pbl_scheme: ', & trim(config_pbl_scheme) @@ -157,18 +199,12 @@ subroutine physics_namelist_check(mesh,configs) !gravity wave drag over orography scheme: if(.not. (config_gwdo_scheme .eq. 'off' .or. & - config_gwdo_scheme .eq. 'ysu_gwdo')) then + config_gwdo_scheme .eq. 'bl_ysu_gwdo')) then write(mpas_err_message,'(A,A10)') 'illegal value for gwdo_scheme: ', & trim(config_gwdo_scheme) call physics_error_fatal(mpas_err_message) - elseif(config_gwdo_scheme .eq. 'ysu_gwdo' .and. config_pbl_scheme .ne. 'ysu') then - - write(mpas_err_message,'(A,A10)') 'turn YSU PBL scheme on with config_gwdo = ysu_gwdo:', & - trim(config_gwdo_scheme) - call physics_error_fatal(mpas_err_message) - endif !lw radiation scheme: @@ -196,7 +232,8 @@ subroutine physics_namelist_check(mesh,configs) !cloud fraction for radiation schemes: if(.not. (config_radt_cld_scheme .eq. 'off' .or. & config_radt_cld_scheme .eq. 'cld_incidence' .or. & - config_radt_cld_scheme .eq. 'cld_fraction')) then + config_radt_cld_scheme .eq. 'cld_fraction' .or. & + config_radt_cld_scheme .eq. 'cld_fraction_thompson')) then write(mpas_err_message,'(A,A10)') 'illegal value for calculation of cloud fraction: ', & trim(config_radt_cld_scheme) @@ -218,13 +255,19 @@ subroutine physics_namelist_check(mesh,configs) endif !surface-layer scheme: - if(.not. (config_sfclayer_scheme .eq. 'off' .or. & - config_sfclayer_scheme .eq. 'monin_obukhov')) then + if(.not. (config_sfclayer_scheme .eq. 'off' .or. & + config_sfclayer_scheme .eq. 'sf_mynn' .or. & + config_sfclayer_scheme .eq. 'sf_monin_obukhov')) then write(mpas_err_message,'(A,A10)') 'illegal value for surface layer scheme: ', & trim(config_sfclayer_scheme) call physics_error_fatal(mpas_err_message) - + else + if(config_pbl_scheme == 'bl_mynn') then + config_sfclayer_scheme = 'sf_mynn' + elseif(config_pbl_scheme == 'bl_ysu') then + config_sfclayer_scheme = 'sf_monin_obukhov' + endif endif !land-surface scheme: note that config_sfclayer_scheme must be defined for the land-surface @@ -257,25 +300,26 @@ subroutine physics_namelist_check(mesh,configs) write(0,*) ' ' write(0,*) ' config_microp_scheme = ', trim(config_microp_scheme) write(0,*) ' config_convection_scheme = ', trim(config_convection_scheme) - write(0,*) ' config_lsm_scheme = ', trim(config_lsm_scheme) write(0,*) ' config_pbl_scheme = ', trim(config_pbl_scheme) write(0,*) ' config_gwdo_scheme = ', trim(config_gwdo_scheme) write(0,*) ' config_radt_cld_scheme = ', trim(config_radt_cld_scheme) write(0,*) ' config_radt_lw_scheme = ', trim(config_radt_lw_scheme) write(0,*) ' config_radt_sw_scheme = ', trim(config_radt_sw_scheme) write(0,*) ' config_sfclayer_scheme = ', trim(config_sfclayer_scheme) - -! write(0,*) '--- end subroutine physics_namelist_check:' + write(0,*) ' config_lsm_scheme = ', trim(config_lsm_scheme) + write(0,*) end subroutine physics_namelist_check -!================================================================================================== +!================================================================================================================= subroutine physics_registry_init(mesh,configs,sfc_input) -!================================================================================================== +!================================================================================================================= -!input and inout arguments: +!input arguments: type(mpas_pool_type),intent(in):: mesh type(mpas_pool_type),intent(in):: configs + +!inout arguments: type(mpas_pool_type),intent(inout):: sfc_input !local pointers: @@ -289,7 +333,7 @@ subroutine physics_registry_init(mesh,configs,sfc_input) !local variables: integer:: iCell -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_config(configs,'config_do_restart',config_do_restart) call mpas_pool_get_config(configs,'config_lsm_scheme',config_lsm_scheme) @@ -308,12 +352,10 @@ subroutine physics_registry_init(mesh,configs,sfc_input) case("noah") !initialize the thickness of the soil layers for the Noah scheme: do iCell = 1, nCells - if(landmask(iCell) == 1) then - dzs(1,iCell) = 0.10_RKIND - dzs(2,iCell) = 0.30_RKIND - dzs(3,iCell) = 0.60_RKIND - dzs(4,iCell) = 1.00_RKIND - endif + dzs(1,iCell) = 0.10_RKIND + dzs(2,iCell) = 0.30_RKIND + dzs(3,iCell) = 0.60_RKIND + dzs(4,iCell) = 1.00_RKIND enddo case default @@ -322,10 +364,61 @@ subroutine physics_registry_init(mesh,configs,sfc_input) endif +!write(0,*) '--- enter subroutine physics_namelist_check.' +!write(0,*) + end subroutine physics_registry_init -!================================================================================================== - end module mpas_atmphys_control -!================================================================================================== +!================================================================================================================= + subroutine physics_tables_init(dminfo,configs) +!================================================================================================================= + +!input arguments: + type(dm_info),intent(in):: dminfo + type(mpas_pool_type),intent(in):: configs + +!local variables: + character(len=StrKIND),pointer:: config_microp_scheme + logical:: l_qr_acr_qg,l_qr_acr_qs,l_qi_aut_qs,l_freezeH2O + +!----------------------------------------------------------------------------------------------------------------- + + l_mp_tables = .true. + + if(dminfo % my_proc_id == IO_NODE) then + call mpas_pool_get_config(configs,'config_microp_scheme',config_microp_scheme) + if(config_microp_scheme /= "mp_thompson") return + + l_qr_acr_qg = .false. + l_qr_acr_qs = .false. + l_qi_aut_qs = .false. + l_freezeH2O = .false. + + inquire(file='MP_THOMPSON_QRacrQG_DATA.DBL' ,exist=l_qr_acr_qg) + inquire(file='MP_THOMPSON_QRacrQS_DATA.DBL' ,exist=l_qr_acr_qs) + inquire(file='MP_THOMPSON_QIautQS_DATA.DBL' ,exist=l_qi_aut_qs) + inquire(file='MP_THOMPSON_freezeH2O_DATA.DBL',exist=l_freezeH2O) + +! write(0,*) +! write(0,*) '--- enter subroutine physics_tables_init:' +! write(0,*) 'l_qr_acr_qg = ',l_qr_acr_qg +! write(0,*) 'l_qr_acr_qs = ',l_qr_acr_qs +! write(0,*) 'l_qi_aut_qs = ',l_qi_aut_qs +! write(0,*) 'l_freezeH2O = ',l_freezeH2O + + if(.not. (l_qr_acr_qg .and. l_qr_acr_qs .and. l_qi_aut_qs .and. l_freezeH2O)) then + write(mpas_err_message,'(A)') & + '--- tables to run the Thompson cloud microphysics scheme do not exist: run build_tables first.' + call physics_error_fatal(mpas_err_message) + endif +! write(0,*) 'l_mp_tables = ',l_mp_tables + + endif + + end subroutine physics_tables_init + +!================================================================================================================= + end module mpas_atmphys_control +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_date_time.F b/src/core_atmosphere/physics/mpas_atmphys_date_time.F index 926b72c04e..243ac7584a 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_date_time.F +++ b/src/core_atmosphere/physics/mpas_atmphys_date_time.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_date_time use mpas_kind_types @@ -18,32 +18,32 @@ module mpas_atmphys_date_time character(len=StrKIND),public:: current_date -!>\brief MPAS utility module for time management. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> subroutines called in mpas_atmphys_date_time: -!> --------------------------------------------- -!> get_julgmt : calculates current julian day. -!> split_date_char : used to extract actual date from input string. -!> monthly_interp_to_date: interpolates monthly-mean data to current julian day. -!> monthly_min_max : looks for min and max values from monthly-mean data set (greenfrac,...) -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * in subroutine monthly_interp_to_date, change the length of variables day15 and mon from -!> StrKIND to 2 to input correctly the reference date to subroutine get_julgmt_date. -!> * in subroutines get_julgmt_date and split_date_char, changed the declaration of date_str -!> from StrKIND to *. -!> Laura D. Fowler (laura@ucar.edu) / 2013-10-18. +! MPAS utility module for time management. +! Laura D. Fowler (send comments to laura@ucar.edu). +! 2013-05-01. +! +! subroutines called in mpas_atmphys_date_time: +! --------------------------------------------- +! get_julgmt : calculates current julian day. +! split_date_char : used to extract actual date from input string. +! monthly_interp_to_date: interpolates monthly-mean data to current julian day. +! monthly_min_max : looks for min and max values from monthly-mean data set (greenfrac,...) +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * in subroutine monthly_interp_to_date, change the length of variables day15 and mon from +! StrKIND to 2 to input correctly the reference date to subroutine get_julgmt_date. +! * in subroutines get_julgmt_date and split_date_char, changed the declaration of date_str +! from StrKIND to *. +! Laura D. Fowler (laura@ucar.edu) / 2013-10-18. contains -!================================================================================================== + +!================================================================================================================= subroutine get_julgmt(date_str,julyr,julday,gmt) -!================================================================================================== +!================================================================================================================= !input arguments: character(len=*),intent(in):: date_str @@ -60,7 +60,7 @@ subroutine get_julgmt(date_str,julyr,julday,gmt) integer,dimension(12):: mmd data mmd /31,28,31,30,31,30,31,31,30,31,30,31/ -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call split_date_char(date_str,ny,nm,nd,nh,ni,ns,nt) @@ -77,9 +77,9 @@ subroutine get_julgmt(date_str,julyr,julday,gmt) end subroutine get_julgmt -!================================================================================================== +!================================================================================================================= subroutine split_date_char(date,century_year,month,day,hour,minute,second,ten_thousandth) -!================================================================================================== +!================================================================================================================= !input arguments: character(len=*),intent(in):: date @@ -87,7 +87,7 @@ subroutine split_date_char(date,century_year,month,day,hour,minute,second,ten_th !output arguments: integer,intent(out):: century_year,month,day,hour,minute,second,ten_thousandth -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- read(date,fmt='( I4)') century_year read(date,fmt='( 5X,I2)') month @@ -99,9 +99,9 @@ subroutine split_date_char(date,century_year,month,day,hour,minute,second,ten_th end subroutine split_date_char -!================================================================================================== +!================================================================================================================= subroutine monthly_interp_to_date(npoints,date_str,field_in,field_out) -!================================================================================================== +!================================================================================================================= !input arguments: character(len=StrKIND),intent(in):: date_str @@ -121,8 +121,7 @@ subroutine monthly_interp_to_date(npoints,date_str,field_in,field_out) real(kind=RKIND):: gmt -!-------------------------------------------------------------------------------------------------- - +!----------------------------------------------------------------------------------------------------------------- !write(0,*) !write(0,*) '--- enter subroutine monthly_interp_to_date:' !write(0,*) '--- current_date = ',trim(date_str) @@ -171,13 +170,13 @@ subroutine monthly_interp_to_date(npoints,date_str,field_in,field_out) endif enddo find_month -! 201 format(i6,3(1x,e15.8)) +!201 format(i6,3(1x,e15.8)) end subroutine monthly_interp_to_date -!================================================================================================== +!================================================================================================================= subroutine monthly_min_max(npoints,field_in,field_min,field_max) -!================================================================================================== +!================================================================================================================= !input arguments: integer,intent(in):: npoints @@ -190,7 +189,7 @@ subroutine monthly_min_max(npoints,field_in,field_min,field_max) integer:: n,nn real(kind=RKIND):: minner,maxxer -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- do n = 1, npoints minner = field_in(1,n) @@ -211,6 +210,6 @@ subroutine monthly_min_max(npoints,field_in,field_min,field_max) end subroutine monthly_min_max -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_date_time -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_driver.F b/src/core_atmosphere/physics/mpas_atmphys_driver.F index 9c6e5b9f69..5548aad2bc 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_driver.F +++ b/src/core_atmosphere/physics/mpas_atmphys_driver.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_driver use mpas_kind_types use mpas_derived_types @@ -19,80 +19,93 @@ module mpas_atmphys_driver use mpas_atmphys_driver_radiation_lw use mpas_atmphys_driver_radiation_sw use mpas_atmphys_driver_sfclayer + use mpas_atmphys_driver_oml use mpas_atmphys_constants use mpas_atmphys_interface use mpas_atmphys_update use mpas_atmphys_vars, only: l_camlw,l_conv,l_radtlw,l_radtsw + use mpas_timer implicit none private public:: physics_driver -!>\brief MPAS top physics driver. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> subroutine physics_driver is the top physics driver from which separate drivers for all physics -!> parameterizations, except cloud microphysics parameterizations are called. -!> -!> subroutines called in mpas_atmphys_driver: -!> ------------------------------------------ -!> allocate_forall_physics : allocate local arrays defining atmospheric soundings (pressure,..) -!> allocate_cloudiness : allocate all local arrays used in driver_cloudiness. -!> allocate_convection : allocate all local arrays used in driver_convection. -!> allocate_gwdo : allocate all local arrays used in driver_gwdo. -!> allocate_lsm : allocate all local arrays used in driver_lsm. -!> allocate_pbl : allocate all local arrays used in driver_pbl. -!> allocate_radiation_lw : allocate all local arrays used in driver_radiation_lw. -!> allocate_radiation_sw : allocate all local arrays used in driver_radiation_sw. -!> allocate_sfclayer : allocate all local arrays used in driver_sfclayer. -!> -!> deallocate_forall_physics : deallocate local arrays defining atmospheric soundings. -!> deallocate_cloudiness : dedeallocate all local arrays used in driver_cloudiness. -!> deallocate_convection : deallocate all local arrays used in driver_convection. -!> deallocate_gwdo : deallocate all local arrays used in driver_gwdo. -!> deallocate_lsm : deallocate all local arrays used in driver_lsm. -!> deallocate_pbl : deallocate all local arrays used in driver_pbl. -!> deallocate_radiation_lw : deallocate all local arrays used in driver_radiation_lw. -!> deallocate_radiation_sw : deallocate all local arrays used in driver_radiation_sw. -!> deallocate_sfclayer : deallocate all local arrays used in driver_sfclayer. -!> -!> MPAS_to_physics : -!> driver_cloudiness : driver for parameterization of fractional cloudiness. -!> driver_convection : driver for parameterization of convection. -!> driver_gwdo : driver for parameterization of gravity wave drag over orography. -!> driver_lsm : driver for land-surface scheme. -!> driver_pbl : driver for planetary boundary layer scheme. -!> driver_radiation_sw : driver for short wave radiation schemes. -!> driver_radiation_lw : driver for long wave radiation schemes. -!> driver_sfclayer : driver for surface layer scheme. -!> update_convection_step1 : updates lifetime of deep convective clouds in Kain-Fritsch scheme. -!> update_convection_step2 : updates accumulated precipitation output from convection schemes. -!> update_radiation_diagnostics: updates accumualted radiation diagnostics from radiation schemes. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * removed call to calculate atmospheric soundings for the hydrostatic dynamical core. -!> Laura D. Fowler (2013-05-06). -!> * removed the namelist option config_eddy_scheme and associated sourcecode. -!> * removed the namelist option config_conv_shallow_scheme and associated sourcecode. -!> Laura D. Fowler (birch.ucar.edu) / 2013-05-29. -!> * added block%atm_input in calls to subroutines driver_radiation_lw amd driver_radiation_lw. -!> Laura D. Fowler (laura@ucar.edu) / 2013-07-03. -!> * modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. -!> * renamed config_conv_deep_scheme to config_convection_scheme. -!> Laura D. Fowler (laura@ucar.edu) / 2014-09-18. +!MPAS top physics driver. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutine physics_driver is the top physics driver from which separate drivers for all physics +! parameterizations, except cloud microphysics parameterizations are called. +! +! subroutines called in mpas_atmphys_driver: +! ------------------------------------------ +! allocate_forall_physics : allocate local arrays defining atmospheric soundings (pressure,..) +! allocate_cloudiness : allocate all local arrays used in driver_cloudiness. +! allocate_convection : allocate all local arrays used in driver_convection. +! allocate_gwdo : allocate all local arrays used in driver_gwdo. +! allocate_lsm : allocate all local arrays used in driver_lsm. +! allocate_pbl : allocate all local arrays used in driver_pbl. +! allocate_radiation_lw : allocate all local arrays used in driver_radiation_lw. +! allocate_radiation_sw : allocate all local arrays used in driver_radiation_sw. +! allocate_sfclayer : allocate all local arrays used in driver_sfclayer. +! +! deallocate_forall_physics : deallocate local arrays defining atmospheric soundings. +! deallocate_cloudiness : dedeallocate all local arrays used in driver_cloudiness. +! deallocate_convection : deallocate all local arrays used in driver_convection. +! deallocate_gwdo : deallocate all local arrays used in driver_gwdo. +! deallocate_lsm : deallocate all local arrays used in driver_lsm. +! deallocate_pbl : deallocate all local arrays used in driver_pbl. +! deallocate_radiation_lw : deallocate all local arrays used in driver_radiation_lw. +! deallocate_radiation_sw : deallocate all local arrays used in driver_radiation_sw. +! deallocate_sfclayer : deallocate all local arrays used in driver_sfclayer. +! +! MPAS_to_physics : +! driver_cloudiness : driver for parameterization of fractional cloudiness. +! driver_convection : driver for parameterization of convection. +! driver_gwdo : driver for parameterization of gravity wave drag over orography. +! driver_lsm : driver for land-surface scheme. +! driver_pbl : driver for planetary boundary layer scheme. +! driver_radiation_sw : driver for short wave radiation schemes. +! driver_radiation_lw : driver for long wave radiation schemes. +! driver_sfclayer : driver for surface layer scheme. +! update_convection_step1 : updates lifetime of deep convective clouds in Kain-Fritsch scheme. +! update_convection_step2 : updates accumulated precipitation output from convection schemes. +! update_radiation_diagnostics: updates accumualted radiation diagnostics from radiation schemes. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * removed call to calculate atmospheric soundings for the hydrostatic dynamical core. +! Laura D. Fowler (2013-05-06). +! * removed the namelist option config_eddy_scheme and associated sourcecode. +! * removed the namelist option config_conv_shallow_scheme and associated sourcecode. +! Laura D. Fowler (birch.ucar.edu) / 2013-05-29. +! * added block%atm_input in calls to subroutines driver_radiation_lw amd driver_radiation_lw. +! Laura D. Fowler (laura@ucar.edu) / 2013-07-03. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * renamed config_conv_deep_scheme to config_convection_scheme. +! Laura D. Fowler (laura@ucar.edu) / 2014-09-18. +! * in the call to driver_convection, added block%configs needed for the implementation of the +! Grell-Freitas convection scheme. +! Laura D. Fowler (laura@ucar.edu) / 2016-03-30. +! * modified the call to the subroutines driver_sfclayer and driver_pbl for the implementation +! of the MYNN surface layer scheme and PBL schemes. itimestep and block%configs are added to +! the argument list. +! Laura D. Fowler (laura@ucar.edu) / 2015-01-06. +! * now only call subroutine update_convection_step2 when config_convection_scheme is not off. +! Laura D. Fowler (laura@ucar.edu) / 2016-04-13. +! * modified call to driver_cloudiness to accomodate the calculation of the cloud fraction with the Thompson +! cloud microphysics scheme. +! Laura D. Fowler (laura@ucar.edu) / 2016-06-04. contains -!================================================================================================== +!================================================================================================================= subroutine physics_driver(domain,itimestep,xtime_s) -!================================================================================================== +!================================================================================================================= !input arguments: integer,intent(in):: itimestep @@ -111,7 +124,7 @@ subroutine physics_driver(domain,itimestep,xtime_s) atm_input, & sfc_input - real(kind=RKIND),pointer:: config_bucket_radt + logical,pointer:: config_frac_seaice character(len=StrKIND),pointer:: config_bucket_update, & config_convection_scheme, & @@ -122,12 +135,23 @@ subroutine physics_driver(domain,itimestep,xtime_s) config_radt_sw_scheme, & config_sfclayer_scheme + logical, pointer:: config_oml1d + real(kind=RKIND),pointer:: config_bucket_radt + !local variables: type(block_type),pointer:: block integer:: time_lev + integer:: thread -!================================================================================================== + integer,pointer:: nThreads + integer,dimension(:),pointer:: cellSolveThreadStart, cellSolveThreadEnd + +!================================================================================================================= +!write(0,*) +!write(0,*) '--- enter subroutine mpas_atmphys_driver:' + + call mpas_timer_start('physics driver') call mpas_pool_get_config(domain%configs,'config_convection_scheme',config_convection_scheme) call mpas_pool_get_config(domain%configs,'config_gwdo_scheme' ,config_gwdo_scheme ) @@ -138,6 +162,8 @@ subroutine physics_driver(domain,itimestep,xtime_s) call mpas_pool_get_config(domain%configs,'config_sfclayer_scheme' ,config_sfclayer_scheme ) call mpas_pool_get_config(domain%configs,'config_bucket_radt' ,config_bucket_radt ) call mpas_pool_get_config(domain%configs,'config_bucket_update' ,config_bucket_update ) + call mpas_pool_get_config(domain%configs,'config_frac_seaice' ,config_frac_seaice ) + call mpas_pool_get_config(domain%configs,'config_oml1d' ,config_oml1d ) if(config_convection_scheme .ne. 'off' .or. & config_lsm_scheme .ne. 'off' .or. & @@ -157,38 +183,70 @@ subroutine physics_driver(domain,itimestep,xtime_s) call mpas_pool_get_subpool(block%structs,'sfc_input' ,sfc_input ) call mpas_pool_get_subpool(block%structs,'tend_physics',tend_physics) + call mpas_pool_get_dimension(block % dimensions, 'nThreads', nThreads) + + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadStart', cellSolveThreadStart) + call mpas_pool_get_dimension(block % dimensions, 'cellSolveThreadEnd', cellSolveThreadEnd) + !allocate arrays shared by all physics parameterizations: call allocate_forall_physics !physics prep step: time_lev = 1 - call MPAS_to_physics(mesh,state,time_lev,diag,diag_physics) + +!$OMP PARALLEL DO + do thread=1,nThreads + call MPAS_to_physics(mesh,state,time_lev,diag,diag_physics, & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO !call to cloud scheme: if(l_radtlw .or. l_radtsw) then call allocate_cloudiness - call driver_cloudiness(diag_physics) +!$OMP PARALLEL DO + do thread=1,nThreads + call driver_cloudiness(block%configs,mesh,diag_physics,sfc_input, & + cellSolveThreadStart(thread),cellSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO endif !call to short wave radiation scheme: if(l_radtsw) then time_lev = 1 call allocate_radiation_sw(xtime_s) - call driver_radiation_sw(itimestep,block%configs,mesh,state,time_lev,diag_physics, & - atm_input,sfc_input,tend_physics,xtime_s) +!$OMP PARALLEL DO + do thread=1,nThreads + call driver_radiation_sw(itimestep,block%configs,mesh,state,time_lev,diag_physics, & + atm_input,sfc_input,tend_physics,xtime_s, & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO endif !call to long wave radiation scheme: if(l_radtlw) then time_lev = 1 call allocate_radiation_lw(xtime_s) - call driver_radiation_lw(xtime_s,block%configs,mesh,state,time_lev,diag_physics, & - atm_input,sfc_input,tend_physics) +!$OMP PARALLEL DO + do thread=1,nThreads + call driver_radiation_lw(xtime_s,block%configs,mesh,state,time_lev,diag_physics, & + atm_input,sfc_input,tend_physics, & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO endif !call to accumulate long- and short-wave diagnostics if needed: - if(config_bucket_update /= 'none' .and. config_bucket_radt .gt. 0._RKIND) & - call update_radiation_diagnostics(block%configs,mesh,diag_physics) + if(config_bucket_update /= 'none' .and. config_bucket_radt .gt. 0._RKIND) then +!$OMP PARALLEL DO + do thread=1,nThreads + call update_radiation_diagnostics(block%configs,mesh,diag_physics, & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO + endif !deallocate all radiation arrays: if(config_radt_sw_scheme.ne.'off' .or. config_radt_lw_scheme.ne.'off') & @@ -198,41 +256,81 @@ subroutine physics_driver(domain,itimestep,xtime_s) !call to surface-layer scheme: if(config_sfclayer_scheme .ne. 'off') then - call allocate_sfclayer - call driver_sfclayer(mesh,diag_physics,sfc_input) - call deallocate_sfclayer + call allocate_sfclayer(config_frac_seaice) +!$OMP PARALLEL DO + do thread=1,nThreads + call driver_sfclayer(itimestep,block%configs,mesh,diag_physics,sfc_input, & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO + call deallocate_sfclayer(config_frac_seaice) endif + !call to 1d ocean mixed-layer model + if(config_oml1d) call driver_oml1d(block%configs, mesh, diag, diag_physics, sfc_input) + !call to land-surface scheme: if(config_lsm_scheme .ne. 'off') then - call allocate_lsm - call driver_lsm(itimestep,block%configs,mesh,diag_physics,sfc_input) - call deallocate_lsm + call allocate_lsm(config_frac_seaice) +!$OMP PARALLEL DO + do thread=1,nThreads + call driver_lsm(itimestep,block%configs,mesh,diag_physics,sfc_input, & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO + call deallocate_lsm(config_frac_seaice) endif !call to pbl schemes: if(config_pbl_scheme .ne. 'off' .and. config_sfclayer_scheme .ne. 'off') then call allocate_pbl - call driver_pbl(sfc_input,diag_physics,tend_physics) +!$OMP PARALLEL DO + do thread=1,nThreads + call driver_pbl(itimestep,block%configs,mesh,sfc_input,diag_physics,tend_physics, & + cellSolveThreadStart(thread),cellSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO call deallocate_pbl endif !call to gravity wave drag over orography scheme: if(config_gwdo_scheme .ne. 'off') then call allocate_gwdo - call driver_gwdo(itimestep,mesh,sfc_input,diag_physics,tend_physics) +!$OMP PARALLEL DO + do thread=1,nThreads + call driver_gwdo(itimestep,block%configs,mesh,sfc_input,diag_physics,tend_physics, & + cellSolveThreadStart(thread),cellSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO call deallocate_gwdo endif !call to convection scheme: - call update_convection_step1(mesh,diag_physics,tend_physics) +!$OMP PARALLEL DO + do thread=1,nThreads + call update_convection_step1(diag_physics,tend_physics, & + cellSolveThreadStart(thread),cellSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO if(l_conv) then call allocate_convection - call driver_convection(itimestep,mesh,sfc_input,diag_physics,tend_physics) +!$OMP PARALLEL DO + do thread=1,nThreads + call driver_convection(itimestep,block%configs,mesh,sfc_input,diag_physics,tend_physics, & + cellSolveThreadStart(thread), cellSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO call deallocate_convection endif !update diagnostics: - call update_convection_step2(block%configs,mesh,diag_physics) + if(config_convection_scheme .ne. 'off') then +!$OMP PARALLEL DO + do thread=1,nThreads + call update_convection_step2(block%configs,diag_physics, & + cellSolveThreadStart(thread),cellSolveThreadEnd(thread)) + end do +!$OMP END PARALLEL DO + end if !deallocate arrays shared by all physics parameterizations: call deallocate_forall_physics @@ -241,12 +339,14 @@ subroutine physics_driver(domain,itimestep,xtime_s) end do endif -! write(0,*) -! write(0,*) '--- end physics_driver:' -! write(0,*) + + call mpas_timer_stop('physics driver') + +!write(0,*) '--- enter subroutine mpas_atmphys_driver:' +!write(0,*) end subroutine physics_driver -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_driver -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_driver_cloudiness.F b/src/core_atmosphere/physics/mpas_atmphys_driver_cloudiness.F index b8bda73192..077ddf4162 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_driver_cloudiness.F +++ b/src/core_atmosphere/physics/mpas_atmphys_driver_cloudiness.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_driver_cloudiness use mpas_kind_types use mpas_derived_types @@ -13,6 +13,7 @@ module mpas_atmphys_driver_cloudiness use mpas_atmphys_constants, only: ep_2 use mpas_atmphys_vars + use module_mp_thompson_cldfra3 implicit none private @@ -20,82 +21,135 @@ module mpas_atmphys_driver_cloudiness deallocate_cloudiness, & driver_cloudiness - integer,private:: i,j,k - - -!>\brief MPAS driver for parameterization of the diagnostic cloud fraction. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_driver_cloudiness: -!> ---------------------------------------------- -!> allocate_cloudiness : allocate local arrays for parameterization of diagnostic cloudiness. -!> deallocate_cloudiness: deallocate local arrays for parameterization of diagnostic cloudiness. -!> cloudiness_from_MPAS : initialize local arrays. -!> cloudiness_to_MPAS : copy local arrays to MPAS arrays. -!> driver_cloudiness : main driver (called from subroutine physics_driver). -!> calc_cldincidence : calculates the cloud fraction as 0 or 1, depending on cloud condensates. -!> calc_cldfraction : calculates the cloud fraction as a function of the relative humidity. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +!MPAS driver for parameterization of the diagnostic cloud fraction. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_driver_cloudiness: +! ---------------------------------------------- +! allocate_cloudiness : allocate local arrays for parameterization of diagnostic cloudiness. +! deallocate_cloudiness: deallocate local arrays for parameterization of diagnostic cloudiness. +! cloudiness_from_MPAS : initialize local arrays. +! cloudiness_to_MPAS : copy local arrays to MPAS arrays. +! driver_cloudiness : main driver (called from subroutine physics_driver). +! calc_cldincidence : calculates the cloud fraction as 0 or 1, depending on cloud condensates. +! calc_cldfraction : calculates the cloud fraction as a function of the relative humidity. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * added subroutine cal_cldfra3 which contains the calculation of the radiative cloud fraction in conjunction +! with the Thompson cloud microphysics. +! Laura D. Fowler (laura@ucar.edu) / 2016-06-04. +! * initialize the local "radiation" water vapor, cloud water, cloud ice, and snow mixing ratios. When using the +! option "cld_fraction_thompson", the "radiative" cloud water and ice paths are updated in conjunction with +! cloud formation, but changes to the cloud water and cloud ice mixing ratios only affect the long wave and +! short wave radiation codes. +! Laura D. Fowler (laura@ucar.edu) / 2016-07-05. contains -!================================================================================================== +!================================================================================================================= subroutine allocate_cloudiness -!================================================================================================== +!================================================================================================================= - if(.not.allocated(cldfrac_p) ) allocate(cldfrac_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(dx_p) ) allocate(dx_p(ims:ime,jms:jme) ) + if(.not.allocated(xland_p) ) allocate(xland_p(ims:ime,jms:jme) ) + if(.not.allocated(cldfrac_p)) allocate(cldfrac_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(qvrad_p) ) allocate(qvrad_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(qcrad_p) ) allocate(qcrad_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(qirad_p) ) allocate(qirad_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(qsrad_p) ) allocate(qsrad_p(ims:ime,kms:kme,jms:jme) ) end subroutine allocate_cloudiness -!================================================================================================== +!================================================================================================================= subroutine deallocate_cloudiness -!================================================================================================== +!================================================================================================================= - if(allocated(cldfrac_p) ) deallocate(cldfrac_p ) + if(allocated(dx_p) ) deallocate(dx_p ) + if(allocated(xland_p) ) deallocate(xland_p ) + if(allocated(cldfrac_p)) deallocate(cldfrac_p) + if(allocated(qvrad_p) ) deallocate(qvrad_p ) + if(allocated(qcrad_p) ) deallocate(qcrad_p ) + if(allocated(qirad_p) ) deallocate(qirad_p ) + if(allocated(qsrad_p) ) deallocate(qsrad_p ) end subroutine deallocate_cloudiness -!================================================================================================== - subroutine cloudiness_from_MPAS(diag_physics) -!================================================================================================== +!================================================================================================================= + subroutine cloudiness_from_MPAS(configs,mesh,diag_physics,sfc_input,its,ite) +!================================================================================================================= + +!input arguments: + integer,intent(in):: its,ite + +!input and inout arguments: + type(mpas_pool_type),intent(in) :: configs + type(mpas_pool_type),intent(in) :: mesh + type(mpas_pool_type),intent(in) :: sfc_input !inout arguments: type(mpas_pool_type),intent(inout):: diag_physics -!-------------------------------------------------------------------------------------------------- +!local variables and pointers: + integer:: i,j,k + + real(kind=RKIND),pointer:: len_disp + real(kind=RKIND),dimension(:),pointer:: areaCell,meshDensity + real(kind=RKIND),dimension(:),pointer:: xland + +!----------------------------------------------------------------------------------------------------------------- + + call mpas_pool_get_config(configs,'config_len_disp',len_disp) + + call mpas_pool_get_array(mesh,'areaCell' ,areaCell ) + call mpas_pool_get_array(mesh,'meshDensity',meshDensity) + + call mpas_pool_get_array(sfc_input,'xland',xland) do j = jts,jte - do k = kts,kte - do i = its,ite - cldfrac_p(i,k,j) = 0._RKIND - enddo - enddo + do i = its,ite + dx_p(i,j) = len_disp / meshDensity(i)**0.25 + xland_p(i,j) = xland(i) + enddo + + !--- initialize the radiative cloud fraction and water vapor, cloud water, cloud ice, + ! and snow mixing ratios: + do k = kts,kte + do i = its,ite + qvrad_p(i,k,j) = qv_p(i,k,j) + qcrad_p(i,k,j) = qc_p(i,k,j) + qirad_p(i,k,j) = qi_p(i,k,j) + qsrad_p(i,k,j) = qs_p(i,k,j) + cldfrac_p(i,k,j) = 0._RKIND + enddo + enddo enddo end subroutine cloudiness_from_MPAS -!================================================================================================== - subroutine cloudiness_to_MPAS(diag_physics) -!================================================================================================== +!================================================================================================================= + subroutine cloudiness_to_MPAS(diag_physics,its,ite) +!================================================================================================================= + +!input arguments: + integer,intent(in):: its,ite !inout arguments: type(mpas_pool_type),intent(inout):: diag_physics -!local pointers: +!local variables and pointers: + integer:: i,j,k + real(kind=RKIND),dimension(:,:),pointer:: cldfrac -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_array(diag_physics,'cldfrac',cldfrac) @@ -109,61 +163,76 @@ subroutine cloudiness_to_MPAS(diag_physics) end subroutine cloudiness_to_MPAS -!================================================================================================== - subroutine driver_cloudiness(diag_physics) -!================================================================================================== +!================================================================================================================= + subroutine driver_cloudiness(configs,mesh,diag_physics,sfc_input,its,ite) +!================================================================================================================= + +!input arguments: + type(mpas_pool_type),intent(in) :: configs + type(mpas_pool_type),intent(in) :: mesh + type(mpas_pool_type),intent(in) :: sfc_input + + integer,intent(in):: its,ite !inout arguments: type(mpas_pool_type),intent(inout):: diag_physics -!-------------------------------------------------------------------------------------------------- -! write(0,*) -! write(0,*) '--- enter subroutine driver_cloudiness:' +!local variables: + integer:: i,j,k + +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine driver_cloudiness:' !copy MPAS arrays to local arrays: - call cloudiness_from_MPAS(diag_physics) + call cloudiness_from_MPAS(configs,mesh,diag_physics,sfc_input,its,ite) cld_fraction_select: select case (trim(radt_cld_scheme)) case("cld_incidence") - - !calculate the incidence of clouds: -! write(0,*) '--- enter subroutine calc_cldincidence:' - call calc_cldincidence(cldfrac_p,qc_p,qi_p,f_qc,f_qi) -! write(0,*) '--- exit subroutine calc_cldincidence' + call calc_cldincidence(cldfrac_p,qcrad_p,qirad_p,f_qc,f_qi,its,ite) case("cld_fraction") - - !calculate the cloud fraction based on the relative humidity: -! write(0,*) '--- enter subroutine calc_cldfraction:' - call calc_cldfraction(cldfrac_p,t_p,pres_p,qv_p,qc_p,qi_p,qs_p) -! write(0,*) '--- exit subroutine calc_cldfraction' + call calc_cldfraction(cldfrac_p,t_p,pres_p,qvrad_p,qcrad_p,qirad_p,qs_p,its,ite) + + case("cld_fraction_thompson") + call cal_cldfra3( & + cldfra = cldfrac_p , qv = qvrad_p , qc = qcrad_p , qi = qirad_p , & + qs = qsrad_p , p = pres_hyd_p , t = t_p , rho = rho_p , & + xland = xland_p , gridkm = dx_p , & + ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde , & + ims = ims , ime = ime , jms = jms , jme = jme , kms = kds , kme = kme , & + its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & + ) case default end select cld_fraction_select !copy local arrays to MPAS grid: - call cloudiness_to_MPAS(diag_physics) + call cloudiness_to_MPAS(diag_physics,its,ite) -! write(0,*) '--- exit subroutine driver_cloudiness' +!write(0,*) '--- end subroutine driver_cloudiness.' end subroutine driver_cloudiness -!================================================================================================== - subroutine calc_cldincidence(cldfrac,qc,qi,f_qc,f_qi) -!================================================================================================== +!================================================================================================================= + subroutine calc_cldincidence(cldfrac,qc,qi,f_qc,f_qi,its,ite) +!================================================================================================================= !input arguments: logical,intent(in):: f_qc,f_qi + integer,intent(in):: its,ite real(kind=RKIND),intent(in),dimension(ims:ime,kms:kme,jms:jme):: qc,qi !output arguments: real(kind=RKIND),intent(out),dimension(ims:ime,kms:kme,jms:jme):: cldfrac !local variables: + integer:: i,j,k + real(kind=RKIND),parameter:: thresh = 1.e-06 -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- do j = jts,jte do k = kts,kte @@ -193,11 +262,12 @@ subroutine calc_cldincidence(cldfrac,qc,qi,f_qc,f_qi) end subroutine calc_cldincidence -!================================================================================================== - subroutine calc_cldfraction(cldfrac,t_p,pres_p,qv,qc,qi,qs) -!================================================================================================== +!================================================================================================================= + subroutine calc_cldfraction(cldfrac,t_p,pres_p,qv,qc,qi,qs,its,ite) +!================================================================================================================= !input arguments: + integer,intent(in):: its,ite real(kind=RKIND),intent(in),dimension(ims:ime,kms:kme,jms:jme):: qv,qc,qi,qs real(kind=RKIND),intent(in),dimension(ims:ime,kms:kme,jms:jme):: t_p,pres_p @@ -205,6 +275,8 @@ subroutine calc_cldfraction(cldfrac,t_p,pres_p,qv,qc,qi,qs) real(kind=RKIND),intent(out),dimension(ims:ime,kms:kme,jms:jme):: cldfrac !local variables: + integer:: i,j,k + real(kind=RKIND),parameter:: alpha0 = 100. real(kind=RKIND),parameter:: gamma = 0.49 real(kind=RKIND),parameter:: qcldmin = 1.e-12 @@ -221,7 +293,7 @@ subroutine calc_cldfraction(cldfrac,t_p,pres_p,qv,qc,qi,qs) real(kind=RKIND):: esi,esw,qvsi,qvsw real(kind=RKIND):: arg,denom,qcld,qvs,rhum,subsat,weight -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- do j = jts,jte do k = kts,kte @@ -273,16 +345,12 @@ subroutine calc_cldfraction(cldfrac,t_p,pres_p,qv,qc,qi,qs) endif -! if(qcld .ge. qcldmin) write(0,101) i,k,qcld,rhum,cldfrac(i,k,j) - enddo enddo enddo - 101 format(i8,1x,i3,3(1x,e15.8)) - end subroutine calc_cldfraction -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_driver_cloudiness -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_driver_convection.F b/src/core_atmosphere/physics/mpas_atmphys_driver_convection.F index 6779aba46e..fbc7926879 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_driver_convection.F +++ b/src/core_atmosphere/physics/mpas_atmphys_driver_convection.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_driver_convection use mpas_kind_types use mpas_derived_types @@ -16,8 +16,10 @@ module mpas_atmphys_driver_convection use mpas_atmphys_vars !wrf physics: + use module_cu_gf use module_cu_kfeta use module_cu_tiedtke + use module_cu_ntiedtke implicit none private @@ -28,59 +30,77 @@ module mpas_atmphys_driver_convection update_convection_step1, & update_convection_step2 - integer, private:: i,k,j - - -!>\brief MPAS driver for parameterization of convection. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_driver_convection: -!> ---------------------------------------------- -!> allocate_convection : allocate local arrays for parameterization of convection. -!> deallocate_convection : deallocate local arrays for parameterization of convection. -!> init_convection : initialization of individual convection scheme. -!> driver_convection : main driver (called from subroutine physics_driver). -!> convection_from_MPAS : initialize local arrays. -!> convection_to_MPAS : copy local arrays to MPAS arrays. -!> update_convection_step1 : updates lifetime of deep convective clouds in Kain-Fritsch scheme. -!> update_convection_step2 : updates accumulated precipitation output from convection schemes. -!> -!> WRF physics called from driver_convection: -!> ------------------------------------------ -!> * module_cu_kfeta : Kain-Fritsch convection scheme. -!> * module_cu_tiedtke : Tiedtke convection scheme. - -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * removed the pre-processor option "do_hydrostatic_pressure" before call to the subroutines -!> kf_eta_cps and tiedtke. -!> * removed call to the updated Kain-Fritsch convection scheme. -!> Laura D. Fowler (laura@ucar.edu) / 2013-05-29. -!> * added the mean distance between cell centers in the call to the Kain-Fritsch convection -!> parameterization of convection. -!> Laura D. Fowler (laura@ucar.edu) / 2013-08-22. -!> * in call to subroutine kf_eta_cps, replaced the variable g (that originally pointed to -!> gravity) with gravity, for simplicity. -!> Laura D. Fowler (laura@ucar.edu) / 2014-03-21. -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * Modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. -!> * in ./physics_wrf, updated the Tiedtke convection scheme to that in WRF version 3.6.1. The -!> call to subroutine cu_tiedtke has been updated accordingly to include the sensible heat -!> flux. -!> Laura D. Fowler (laura@ucar.edu) / 2014-09-11. + +!MPAS driver for parameterization of convection. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_driver_convection: +! ---------------------------------------------- +! allocate_convection : allocate local arrays for parameterization of convection. +! deallocate_convection : deallocate local arrays for parameterization of convection. +! init_convection : initialization of individual convection scheme. +! driver_convection : main driver (called from subroutine physics_driver). +! convection_from_MPAS : initialize local arrays. +! convection_to_MPAS : copy local arrays to MPAS arrays. +! update_convection_step1 : updates lifetime of deep convective clouds in Kain-Fritsch scheme. +! update_convection_step2 : updates accumulated precipitation output from convection schemes. +! +! WRF physics called from driver_convection: +! ------------------------------------------ +! * module_cu_kfeta : Kain-Fritsch convection scheme. +! * module_cu_tiedtke : Tiedtke convection scheme. + +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * removed the pre-processor option "do_hydrostatic_pressure" before call to the subroutines kf_eta_cps +! and tiedtke. +! * removed call to the updated Kain-Fritsch convection scheme. +! Laura D. Fowler (laura@ucar.edu) / 2013-05-29. +! * added the mean distance between cell centers in the call to the Kain-Fritsch parameterization of convection. +! Laura D. Fowler (laura@ucar.edu) / 2013-08-22. +! * in call to subroutine kf_eta_cps, replaced the variable g (that originally pointed to gravity) with gravity, +! for simplicity. +! Laura D. Fowler (laura@ucar.edu) / 2014-03-21. +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * Modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * in ./physics_wrf, updated the Tiedtke convection scheme to that in WRF version 3.6.1. The call to subroutine +! cu_tiedtke has been updated accordingly to include the sensible heat flux. +! Laura D. Fowler (laura@ucar.edu) / 2014-09-11. +! * renamed "kain_fritsch" with "cu_kain_fritsch" and "tiedtke" with "cu_tiedtke". +! Laura D. Fowler (laura@ucar.edu) / 2016-03-22. +! * added the implementation of the Grell-Freitas convection scheme (option cu_grell_freitas). +! Laura D. Fowler (laura@ucar.edu) / 2016-03-30. +! code cleaner, now that we are using physics packages. +! Laura D. Fowler (laura@ucar.edu) / 2016-04-01. +! * in the call to subroutine cu_grell_freitas, added the array pratec for the calculation of rainc. +! Laura D. Fowler (laura@ucar.edu) / 2016-04-14. +! * in the call to subroutine cu_grell_freitas, removed the argument ktop_deep. +! Laura D. Fowler (laura@ucar.edu) / 2016-04-20. +! * updated the call to the Tiedtke parameterization of convection to WRF version 3.8.1. This option is +! triggered using the option cu_tiedtke. +! Laura D. Fowler (laura@ucar.edu) / 2016-08-18. +! * added the call to the "new" Tiedtke parameterization of convection from WRF version 3.8.1. This option is +! triggered using the option cu_ntiedtke. +! Laura D. Fowler (laura@ucar.edu) / 2016-09-20. +! * for the kain_fritsch parameterization of convection, change the definition of dx_p to match that used in the +! Grell-Freitas and "new Tiedtke" parameterization. +! Laura D. Fowler (laura@ucar.edu) / 2016-10-18. contains -!================================================================================================== +!================================================================================================================= subroutine allocate_convection -!================================================================================================== +!================================================================================================================= + +!local variables: + integer:: i,k,j + +!----------------------------------------------------------------------------------------------------------------- if(.not.allocated(cu_act_flag)) allocate(cu_act_flag(ims:ime,jms:jme) ) if(.not.allocated(rthcuten_p) ) allocate(rthcuten_p(ims:ime,kms:kme,jms:jme)) @@ -110,7 +130,39 @@ subroutine allocate_convection convection_select: select case(convection_scheme) - case ("kain_fritsch") + case ("cu_grell_freitas") + if(.not.allocated(ht_p) ) allocate(ht_p(ims:ime,jms:jme) ) + if(.not.allocated(cubot_p) ) allocate(cubot_p(ims:ime,jms:jme) ) + if(.not.allocated(cutop_p) ) allocate(cutop_p(ims:ime,jms:jme) ) + if(.not.allocated(kpbl_p) ) allocate(kpbl_p(ims:ime,jms:jme) ) + if(.not.allocated(k22_shallow_p) ) allocate(k22_shallow_p(ims:ime,jms:jme) ) + if(.not.allocated(kbcon_shallow_p)) allocate(kbcon_shallow_p(ims:ime,jms:jme) ) + if(.not.allocated(ktop_shallow_p) ) allocate(ktop_shallow_p(ims:ime,jms:jme) ) + if(.not.allocated(kbot_shallow_p) ) allocate(kbot_shallow_p(ims:ime,jms:jme) ) + if(.not.allocated(ktop_deep_p) ) allocate(ktop_deep_p(ims:ime,jms:jme) ) + + if(.not.allocated(dx_p) ) allocate(dx_p(ims:ime,jms:jme) ) + if(.not.allocated(area_p) ) allocate(area_p(ims:ime,jms:jme) ) + if(.not.allocated(gsw_p) ) allocate(gsw_p(ims:ime,jms:jme) ) + if(.not.allocated(hfx_p) ) allocate(hfx_p(ims:ime,jms:jme) ) + if(.not.allocated(qfx_p) ) allocate(qfx_p(ims:ime,jms:jme) ) + if(.not.allocated(xland_p) ) allocate(xland_p(ims:ime,jms:jme) ) + + if(.not.allocated(rthblten_p) ) allocate(rthblten_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(rthdynten_p) ) allocate(rthdynten_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(rthraten_p) ) allocate(rthraten_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(rqvblten_p) ) allocate(rqvblten_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(rqvdynten_p) ) allocate(rqvdynten_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(rucuten_p) ) allocate(rucuten_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(rvcuten_p) ) allocate(rvcuten_p(ims:ime,kms:kme,jms:jme) ) + + if(.not.allocated(xmb_total_p) ) allocate(xmb_total_p(ims:ime,jms:jme) ) + if(.not.allocated(xmb_shallow_p) ) allocate(xmb_shallow_p(ims:ime,jms:jme) ) + + if(.not.allocated(qccu_p) ) allocate(qccu_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(qicu_p) ) allocate(qicu_p(ims:ime,kms:kme,jms:jme) ) + + case ("cu_kain_fritsch") if(.not.allocated(dx_p) ) allocate(dx_p(ims:ime,jms:jme) ) if(.not.allocated(area_p) ) allocate(area_p(ims:ime,jms:jme) ) if(.not.allocated(nca_p) ) allocate(nca_p(ims:ime,jms:jme) ) @@ -136,12 +188,10 @@ subroutine allocate_convection enddo enddo - case ("tiedtke") + case ("cu_tiedtke","cu_ntiedtke") if(.not.allocated(hfx_p) ) allocate(hfx_p(ims:ime,jms:jme) ) if(.not.allocated(qfx_p) ) allocate(qfx_p(ims:ime,jms:jme) ) if(.not.allocated(xland_p) ) allocate(xland_p(ims:ime,jms:jme) ) - if(.not.allocated(rqvdynten_p) ) allocate(rqvdynten_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(rqvdynblten_p)) allocate(rqvdynblten_p(ims:ime,kms:kme,jms:jme)) if(.not.allocated(rucuten_p) ) allocate(rucuten_p(ims:ime,kms:kme,jms:jme) ) if(.not.allocated(rvcuten_p) ) allocate(rvcuten_p(ims:ime,kms:kme,jms:jme) ) @@ -156,23 +206,54 @@ subroutine allocate_convection do i = its,ite do k = kts,kte do j = jts,jte - rqvdynten_p(i,k,j) = 0._RKIND - rqvdynblten_p(i,k,j) = 0._RKIND rucuten_p(i,k,j) = 0._RKIND rvcuten_p(i,k,j) = 0._RKIND enddo enddo enddo + cu_tiedtke_select: select case(convection_scheme) + + case ("cu_tiedtke") + if(.not.allocated(rqvdynten_p) ) allocate(rqvdynten_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(rqvdynblten_p)) allocate(rqvdynblten_p(ims:ime,kms:kme,jms:jme)) + + do i = its,ite + do k = kts,kte + do j = jts,jte + rqvdynten_p(i,k,j) = 0._RKIND + rqvdynblten_p(i,k,j) = 0._RKIND + enddo + enddo + enddo + + case("cu_ntiedtke") + if(.not.allocated(dx_p) ) allocate(dx_p(ims:ime,jms:jme) ) + if(.not.allocated(rqvften_p)) allocate(rqvften_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(rthften_p)) allocate(rthften_p(ims:ime,kms:kme,jms:jme)) + + do i = its,ite + do k = kts,kte + do j = jts,jte + rqvften_p(i,k,j) = 0._RKIND + rthften_p(i,k,j) = 0._RKIND + enddo + enddo + enddo + + case default + + end select cu_tiedtke_select + case default end select convection_select end subroutine allocate_convection -!================================================================================================== +!================================================================================================================= subroutine deallocate_convection -!================================================================================================== +!================================================================================================================= if(allocated(cu_act_flag)) deallocate(cu_act_flag) if(allocated(rthcuten_p) ) deallocate(rthcuten_p ) @@ -184,7 +265,38 @@ subroutine deallocate_convection convection_select: select case(convection_scheme) - case ("kain_fritsch") + case ("cu_grell_freitas") + if(allocated(ht_p) ) deallocate(ht_p ) + if(allocated(cubot_p) ) deallocate(cubot_p ) + if(allocated(cutop_p) ) deallocate(cutop_p ) + if(allocated(kpbl_p) ) deallocate(kpbl_p ) + if(allocated(k22_shallow_p) ) deallocate(k22_shallow_p ) + if(allocated(kbcon_shallow_p)) deallocate(kbcon_shallow_p) + if(allocated(ktop_shallow_p) ) deallocate(ktop_shallow_p ) + if(allocated(kbot_shallow_p) ) deallocate(kbot_shallow_p ) + if(allocated(ktop_deep_p) ) deallocate(ktop_deep_p ) + + if(allocated(dx_p) ) deallocate(dx_p ) + if(allocated(area_p) ) deallocate(area_p ) + if(allocated(gsw_p) ) deallocate(gsw_p ) + if(allocated(hfx_p) ) deallocate(hfx_p ) + if(allocated(qfx_p) ) deallocate(qfx_p ) + if(allocated(xland_p) ) deallocate(xland_p ) + if(allocated(rthblten_p) ) deallocate(rthblten_p ) + if(allocated(rthdynten_p) ) deallocate(rthdynten_p ) + if(allocated(rthraten_p) ) deallocate(rthraten_p ) + if(allocated(rqvblten_p) ) deallocate(rqvblten_p ) + if(allocated(rqvdynten_p) ) deallocate(rqvdynten_p ) + if(allocated(rucuten_p) ) deallocate(rucuten_p ) + if(allocated(rvcuten_p) ) deallocate(rvcuten_p ) + + if(allocated(xmb_total_p) ) deallocate(xmb_total_p ) + if(allocated(xmb_shallow_p) ) deallocate(xmb_shallow_p ) + + if(allocated(qccu_p) ) deallocate(qccu_p ) + if(allocated(qicu_p) ) deallocate(qicu_p ) + + case ("cu_kain_fritsch") if(allocated(dx_p) ) deallocate(dx_p ) if(allocated(area_p) ) deallocate(area_p ) if(allocated(nca_p) ) deallocate(nca_p ) @@ -194,24 +306,37 @@ subroutine deallocate_convection if(allocated(rqrcuten_p) ) deallocate(rqrcuten_p ) if(allocated(rqscuten_p) ) deallocate(rqscuten_p ) - case ("tiedtke") + case ("cu_tiedtke","cu_ntiedtke") if(allocated(hfx_p) ) deallocate(hfx_p ) if(allocated(qfx_p) ) deallocate(qfx_p ) if(allocated(xland_p) ) deallocate(xland_p ) - if(allocated(rqvdynten_p) ) deallocate(rqvdynten_p ) - if(allocated(rqvdynblten_p)) deallocate(rqvdynblten_p) if(allocated(rucuten_p) ) deallocate(rucuten_p ) if(allocated(rvcuten_p) ) deallocate(rvcuten_p ) + cu_tiedtke_select: select case(convection_scheme) + + case ("cu_tiedtke") + if(allocated(rqvdynten_p) ) deallocate(rqvdynten_p ) + if(allocated(rqvdynblten_p)) deallocate(rqvdynblten_p) + + case ("cu_ntiedtke") + if(allocated(dx_p) ) deallocate(dx_p) + if(allocated(rqvften_p)) deallocate(rqvften_p) + if(allocated(rthften_p)) deallocate(rthften_p) + + case default + + end select cu_tiedtke_select + case default end select convection_select end subroutine deallocate_convection -!================================================================================================== +!================================================================================================================= subroutine init_convection(mesh,configs,diag_physics) -!================================================================================================== +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(in):: mesh @@ -228,58 +353,57 @@ subroutine init_convection(mesh,configs,diag_physics) !local variables: integer:: iCell -!-------------------------------------------------------------------------------------------------- -! write(0,*) -! write(0,*) '--- enter subroutine init_convection:' +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_dimension(mesh,'nCells',nCells) call mpas_pool_get_config(configs,'config_do_restart',config_do_restart) - call mpas_pool_get_array(diag_physics,'nca',nca) convection_select: select case(convection_scheme) - case ("kain_fritsch") -! write(0,*) ' enter kain-fritsch initialization:' + case ("cu_kain_fritsch") + call mpas_pool_get_array(diag_physics,'nca',nca) if(.not. config_do_restart) then do iCell = 1, nCells nca(iCell) = -100._RKIND enddo endif call kf_lutab(svp1,svp2,svp3,svpt0) -! write(0,*) ' end kain-kritsch initialization' - - case ("tiedtke") -! write(0,*) ' enter tiedtke initialization:' -! write(mpas_err_message,'(A,A10)') & -! 'Tiedtke is being tested. Do not use right now. Thanks ' -! call physics_error_fatal(mpas_err_message) case default end select convection_select -! write(0,*) '--- end subroutine init_convection' - end subroutine init_convection -!================================================================================================== - subroutine driver_convection(itimestep,mesh,sfc_input,diag_physics,tend_physics) -!================================================================================================== +!================================================================================================================= + subroutine driver_convection(itimestep,configs,mesh,sfc_input,diag_physics,tend_physics,its,ite) +!================================================================================================================= -!input and output arguments: -!--------------------------- - integer,intent(in):: itimestep +!input arguments: + type(mpas_pool_type),intent(in):: configs type(mpas_pool_type),intent(in):: mesh type(mpas_pool_type),intent(in):: sfc_input + + integer,intent(in):: its,ite + integer,intent(in):: itimestep + +!inout arguments: type(mpas_pool_type),intent(inout):: diag_physics type(mpas_pool_type),intent(inout):: tend_physics !local variables and arrays: -!--------------------------- logical:: log_convection - integer:: icount + + integer:: i,j + integer:: icount,initflag + real(kind=RKIND):: dx +!local pointers: + logical,pointer:: config_do_restart + integer,pointer:: gfconv_closure_deep,gfconv_closure_shallow + real(kind=RKIND),pointer:: len_disp + !variables specific to Kain_Fritsch parameterization: logical:: warm_rain,adapt_step_flag integer:: ktau @@ -287,17 +411,18 @@ subroutine driver_convection(itimestep,mesh,sfc_input,diag_physics,tend_physics) real(kind=RKIND):: cudt real(kind=RKIND):: cudtacttime -!temp: - real(kind=RKIND):: max_rthcuten - real(kind=RKIND):: min_rthcuten +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine driver_convection:' -!================================================================================================== -! write(0,*) -! write(0,*) '--- enter convection_driver: dt_cu=',dt_cu + call mpas_pool_get_config(configs,'config_gfconv_closure_deep',gfconv_closure_deep) + call mpas_pool_get_config(configs,'config_gfconv_closure_shallow',gfconv_closure_shallow) + call mpas_pool_get_config(configs,'config_len_disp',len_disp) + call mpas_pool_get_config(configs,'config_do_restart',config_do_restart) !initialize instantaneous precipitation, and copy convective tendencies from the dynamics to !the physics grid: - call convection_from_MPAS(dt_dyn,mesh,sfc_input,diag_physics,tend_physics) + call convection_from_MPAS(dt_dyn,configs,mesh,sfc_input,diag_physics,tend_physics,its,ite) !... convert the convection time-step to minutes: cudt = dt_cu/60. @@ -312,9 +437,45 @@ subroutine driver_convection(itimestep,mesh,sfc_input,diag_physics,tend_physics) enddo enddo +!... initialization of initflag needed in the calls cu_tiedtke and cu_ntiedtke: + initflag = 1 + if(config_do_restart .or. itimestep > 1) initflag = 0 + convection_select: select case(convection_scheme) - case ("kain_fritsch") + case ("cu_grell_freitas") + call cu_grell_freitas( & + itimestep = itimestep , dt = dt_dyn , & + dxCell = dx_p , areaCell = area_p , & + u = u_p , v = v_p , & + w = w_p , t = t_p , & + q = qv_p , rho = rho_p , & + p = pres_hyd_p , pi = pi_p , & + p8w = pres2_hyd_p , dz8w = dz_p , & + ht = ht_p , xland = xland_p , & + gsw = gsw_p , xlv = xlv , & + cp = cp , g = gravity , & + r_v = R_v , hfx = hfx_p , & + qfx = qfx_p , rthblten = rthblten_p , & + rqvblten = rqvblten_p , rthften = rthdynten_p , & + rqvften = rqvdynten_p , rthraten = rthraten_p , & + kpbl = kpbl_p , raincv = raincv_p , & + pratec = pratec_p , htop = cutop_p , & + hbot = cubot_p , k22_shallow = k22_shallow_p , & + kbcon_shallow = kbcon_shallow_p , ktop_shallow = ktop_shallow_p , & + xmb_total = xmb_total_p , xmb_shallow = xmb_shallow_p , & + gdc = qccu_p , gdc2 = qicu_p , & + rthcuten = rthcuten_p , rqvcuten = rqvcuten_p , & + rqccuten = rqccuten_p , rqicuten = rqicuten_p , & + ichoice_deep = gfconv_closure_deep , & + ichoice_shallow = gfconv_closure_shallow , & + ishallow_g3 = ishallow , & + ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde , & + ims = ims , ime = ime , jms = jms , jme = jme , kms = kds , kme = kme , & + its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & + ) + + case ("cu_kain_fritsch") if(itimestep == 1) then ktau = itimestep else @@ -352,31 +513,54 @@ subroutine driver_convection(itimestep,mesh,sfc_input,diag_physics,tend_physics) its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & ) - case("tiedtke") -! write(0,*) '--- enter subroutine cu_tiedtke:' - call cu_tiedtke ( & - pcps = pres_hyd_p , p8w = pres2_hyd_p , & - znu = znu_hyd_p , t3d = t_p , & - dt = dt_dyn , itimestep = itimestep , & - stepcu = n_cu , raincv = raincv_p , & - pratec = pratec_p , hfx = hfx_p , & - qfx = qfx_p , u3d = u_p , & - v3d = v_p , w = w_p , & - qv3d = qv_p , qc3d = qc_p , & - qi3d = qi_p , pi3d = pi_p , & - rho3d = rho_p , qvften = rqvdynten_p , & - qvpblten = rqvdynblten_p , dz8w = dz_p , & - xland = xland_p , cu_act_flag = cu_act_flag , & - f_qv = f_qv , f_qc = f_qc , & - f_qr = f_qr , f_qi = f_qi , & - f_qs = f_qs , rthcuten = rthcuten_p , & - rqvcuten = rqvcuten_p , rqccuten = rqccuten_p , & - rqicuten = rqicuten_p , rucuten = rucuten_p , & - rvcuten = rvcuten_p , & + case("cu_tiedtke") + call cu_tiedtke( & + pcps = pres_hyd_p , p8w = pres2_hyd_p , & + znu = znu_hyd_p , t3d = t_p , & + dt = dt_dyn , itimestep = initflag , & + stepcu = n_cu , raincv = raincv_p , & + pratec = pratec_p , qfx = qfx_p , & + u3d = u_p , v3d = v_p , & + w = w_p , qv3d = qv_p , & + qc3d = qc_p , qi3d = qi_p , & + pi3d = pi_p , rho3d = rho_p , & + qvften = rqvdynten_p , qvpblten = rqvdynblten_p , & + dz8w = dz_p , xland = xland_p , & + cu_act_flag = cu_act_flag , f_qv = f_qv , & + f_qc = f_qc , f_qr = f_qr , & + f_qi = f_qi , f_qs = f_qs , & + rthcuten = rthcuten_p , rqvcuten = rqvcuten_p , & + rqccuten = rqccuten_p , rqicuten = rqicuten_p , & + rucuten = rucuten_p , rvcuten = rvcuten_p , & + ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde , & + ims = ims , ime = ime , jms = jms , jme = jme , kms = kds , kme = kme , & + its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & + ) + + case("cu_ntiedtke") + call cu_ntiedtke( & + pcps = pres_hyd_p , p8w = pres2_hyd_p , & + t3d = t_p , dz8w = dz_p , & + dt = dt_dyn , itimestep = initflag , & + stepcu = n_cu , raincv = raincv_p , & + pratec = pratec_p , qfx = qfx_p , & + hfx = hfx_p , xland = xland_p , & + dx = dx_p , u3d = u_p , & + v3d = v_p , w = w_p , & + qv3d = qv_p , qc3d = qc_p , & + qi3d = qi_p , pi3d = pi_p , & + rho3d = rho_p , qvften = rqvften_p , & + thften = rthften_p , cu_act_flag = cu_act_flag , & + f_qv = f_qv , f_qc = f_qc , & + f_qr = f_qr , f_qi = f_qi , & + f_qs = f_qs , rthcuten = rthcuten_p , & + rqvcuten = rqvcuten_p , rqccuten = rqccuten_p , & + rqicuten = rqicuten_p , rucuten = rucuten_p , & + rvcuten = rvcuten_p , & ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde , & ims = ims , ime = ime , jms = jms , jme = jme , kms = kds , kme = kme , & its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & - ) + ) case default @@ -384,58 +568,54 @@ subroutine driver_convection(itimestep,mesh,sfc_input,diag_physics,tend_physics) !copy instantaneous and accumulated precipitation, convective tendencies, and "other" arrays !specific to convection parameterization back to the dynamics grid: - call convection_to_MPAS(diag_physics,tend_physics) + call convection_to_MPAS(diag_physics,tend_physics,its,ite) -! write(0,*) '--- end subroutine convection_driver' +!write(0,*) '--- end subroutine driver_convection.' +!write(0,*) end subroutine driver_convection -!================================================================================================== - subroutine convection_from_MPAS(dt_dyn,mesh,sfc_input,diag_physics,tend_physics) -!================================================================================================== +!================================================================================================================= + subroutine convection_from_MPAS(dt_dyn,configs,mesh,sfc_input,diag_physics,tend_physics,its,ite) +!================================================================================================================= + !input arguments: + type(mpas_pool_type),intent(in):: configs type(mpas_pool_type),intent(in):: mesh - type(mpas_pool_type),intent(in) :: sfc_input + type(mpas_pool_type),intent(in):: sfc_input type(mpas_pool_type),intent(in):: diag_physics type(mpas_pool_type),intent(in):: tend_physics + + integer,intent(in):: its,ite real(kind=RKIND),intent(in):: dt_dyn !local pointers: - real(kind=RKIND),dimension(:),pointer :: areaCell,dcEdge_m + integer,dimension(:),pointer:: kpbl,k22_shallow,kbcon_shallow,ktop_shallow,ktop_deep + real(kind=RKIND),dimension(:),pointer :: areaCell,meshDensity real(kind=RKIND),dimension(:),pointer :: nca,cubot,cutop,cuprec,raincv - real(kind=RKIND),dimension(:),pointer :: hfx,qfx,xland + real(kind=RKIND),dimension(:),pointer :: gsw,hfx,qfx,xland + real(kind=RKIND),dimension(:),pointer :: xmb_total,xmb_shallow + real(kind=RKIND),dimension(:,:),pointer:: zgrid + real(kind=RKIND),dimension(:,:),pointer:: qc_cu,qi_cu real(kind=RKIND),dimension(:,:),pointer:: w0avg real(kind=RKIND),dimension(:,:),pointer:: rthcuten,rqvcuten,rqccuten,rqicuten,rqrcuten,rqscuten - real(kind=RKIND),dimension(:,:),pointer:: rqvblten,rqvdynten,rucuten,rvcuten - -!-------------------------------------------------------------------------------------------------- -! write(0,*) -! write(0,*) '--- enter subroutine convection_from_MPAS:' - - call mpas_pool_get_array(mesh,'areaCell',areaCell) - - call mpas_pool_get_array(sfc_input,'xland',xland) - - call mpas_pool_get_array(diag_physics,'dcEdge_m' ,dcEdge_m ) - call mpas_pool_get_array(diag_physics,'nca' ,nca ) - call mpas_pool_get_array(diag_physics,'cubot' ,cubot ) - call mpas_pool_get_array(diag_physics,'cutop' ,cutop ) - call mpas_pool_get_array(diag_physics,'cuprec' ,cuprec ) - call mpas_pool_get_array(diag_physics,'raincv' ,raincv ) - call mpas_pool_get_array(diag_physics,'w0avg' ,w0avg ) - call mpas_pool_get_array(diag_physics,'hfx' ,hfx ) - call mpas_pool_get_array(diag_physics,'qfx' ,qfx ) - - call mpas_pool_get_array(tend_physics,'rthcuten' ,rthcuten ) - call mpas_pool_get_array(tend_physics,'rqvcuten' ,rqvcuten ) - call mpas_pool_get_array(tend_physics,'rqccuten' ,rqccuten ) - call mpas_pool_get_array(tend_physics,'rqicuten' ,rqicuten ) - call mpas_pool_get_array(tend_physics,'rqrcuten' ,rqrcuten ) - call mpas_pool_get_array(tend_physics,'rqscuten' ,rqscuten ) - call mpas_pool_get_array(tend_physics,'rucuten' ,rucuten ) - call mpas_pool_get_array(tend_physics,'rvcuten' ,rvcuten ) - call mpas_pool_get_array(tend_physics,'rqvblten' ,rqvblten ) - call mpas_pool_get_array(tend_physics,'rqvdynten',rqvdynten) + real(kind=RKIND),dimension(:,:),pointer:: rthblten,rthdynten,rthratenlw,rthratensw + real(kind=RKIND),dimension(:,:),pointer:: rqvblten,rqvdynten,rucuten,rvcuten + +!local variables: + integer:: i,j,k + integer:: iEdge + real(kind=RKIND),pointer:: len_disp + +!----------------------------------------------------------------------------------------------------------------- + + call mpas_pool_get_array(diag_physics,'cuprec',cuprec) + call mpas_pool_get_array(diag_physics,'raincv',raincv) + + call mpas_pool_get_array(tend_physics,'rthcuten',rthcuten) + call mpas_pool_get_array(tend_physics,'rqvcuten',rqvcuten) + call mpas_pool_get_array(tend_physics,'rqccuten',rqccuten) + call mpas_pool_get_array(tend_physics,'rqicuten',rqicuten) do j = jts,jte do i = its,ite @@ -452,13 +632,94 @@ subroutine convection_from_MPAS(dt_dyn,mesh,sfc_input,diag_physics,tend_physics) convection_select: select case(convection_scheme) - case ("kain_fritsch") + case ("cu_grell_freitas") + call mpas_pool_get_config(configs,'config_len_disp',len_disp) + + call mpas_pool_get_array(mesh,'areaCell' ,areaCell ) + call mpas_pool_get_array(mesh,'meshDensity',meshDensity) + call mpas_pool_get_array(mesh,'zgrid' ,zgrid ) + + call mpas_pool_get_array(sfc_input,'xland',xland) + + call mpas_pool_get_array(diag_physics,'gsw' ,gsw ) + call mpas_pool_get_array(diag_physics,'hfx' ,hfx ) + call mpas_pool_get_array(diag_physics,'qfx' ,qfx ) + call mpas_pool_get_array(diag_physics,'kpbl' ,kpbl ) + call mpas_pool_get_array(diag_physics,'cubot' ,cubot ) + call mpas_pool_get_array(diag_physics,'cutop' ,cutop ) + call mpas_pool_get_array(diag_physics,'xmb_total' ,xmb_total ) + call mpas_pool_get_array(diag_physics,'xmb_shallow' ,xmb_shallow ) + call mpas_pool_get_array(diag_physics,'k22_shallow' ,k22_shallow ) + call mpas_pool_get_array(diag_physics,'kbcon_shallow',kbcon_shallow) + call mpas_pool_get_array(diag_physics,'ktop_shallow' ,ktop_shallow ) + call mpas_pool_get_array(diag_physics,'ktop_deep' ,ktop_deep ) + call mpas_pool_get_array(diag_physics,'qc_cu' ,qc_cu ) + call mpas_pool_get_array(diag_physics,'qi_cu' ,qi_cu ) + + call mpas_pool_get_array(tend_physics,'rqvblten' ,rqvblten ) + call mpas_pool_get_array(tend_physics,'rqvdynten' ,rqvdynten ) + call mpas_pool_get_array(tend_physics,'rthblten' ,rthblten ) + call mpas_pool_get_array(tend_physics,'rthdynten' ,rthdynten ) + call mpas_pool_get_array(tend_physics,'rthratenlw',rthratenlw) + call mpas_pool_get_array(tend_physics,'rthratensw',rthratensw) + call mpas_pool_get_array(tend_physics,'rucuten' ,rucuten ) + call mpas_pool_get_array(tend_physics,'rvcuten' ,rvcuten ) + + do j = jts,jte + do i = its,ite + dx_p(i,j) = len_disp / meshDensity(i)**0.25 + area_p(i,j) = areaCell(i) + ht_p(i,j) = zgrid(1,i) + xland_p(i,j) = xland(i) + gsw_p(i,j) = gsw(i) + hfx_p(i,j) = hfx(i) + qfx_p(i,j) = qfx(i) + kpbl_p(i,j) = kpbl(i) + cubot_p(i,j) = cubot(i) + cutop_p(i,j) = cutop(i) + + xmb_total_p(i,j) = xmb_total(i) + xmb_shallow_p(i,j) = xmb_shallow(i) + + k22_shallow_p(i,j) = k22_shallow(i) + kbcon_shallow_p(i,j) = kbcon_shallow(i) + ktop_shallow_p(i,j) = ktop_shallow(i) + ktop_deep_p(i,j) = ktop_deep(i) + + do k = kts,kte + qccu_p(i,k,j) = qc_cu(k,i) + qicu_p(i,k,j) = qi_cu(k,i) + + rthblten_p(i,k,j) = rthblten(k,i) + rthdynten_p(i,k,j) = rthdynten(k,i) + rthraten_p(i,k,j) = rthratenlw(k,i) + rthratensw(k,i) + rqvblten_p(i,k,j) = rqvblten(k,i) + rqvdynten_p(i,k,j) = rqvdynten(k,i) + rucuten_p(i,k,j) = rucuten(k,i) + rvcuten_p(i,k,j) = rvcuten(k,k) + enddo + enddo + enddo + + case ("cu_kain_fritsch") + call mpas_pool_get_config(configs,'config_len_disp',len_disp) + + call mpas_pool_get_array(mesh,'areaCell',areaCell) + call mpas_pool_get_array(mesh,'meshDensity',meshDensity) + + call mpas_pool_get_array(diag_physics,'cubot' ,cubot ) + call mpas_pool_get_array(diag_physics,'cutop' ,cutop ) + call mpas_pool_get_array(diag_physics,'nca' ,nca ) + call mpas_pool_get_array(diag_physics,'w0avg' ,w0avg ) + + call mpas_pool_get_array(tend_physics,'rqrcuten',rqrcuten) + call mpas_pool_get_array(tend_physics,'rqscuten',rqscuten) do j = jts,jte do i = its,ite !area of grid-cell: + dx_p(i,j) = len_disp / meshDensity(i)**0.25 area_p(i,j) = areaCell(i) - dx_p(i,j) = dcEdge_m(i) cubot_p(i,j) = cubot(i) cutop_p(i,j) = cutop(i) @@ -478,7 +739,16 @@ subroutine convection_from_MPAS(dt_dyn,mesh,sfc_input,diag_physics,tend_physics) enddo enddo - case ("tiedtke") + case ("cu_tiedtke","cu_ntiedtke") + call mpas_pool_get_array(sfc_input,'xland',xland) + call mpas_pool_get_array(diag_physics,'hfx',hfx) + call mpas_pool_get_array(diag_physics,'qfx',qfx) + + call mpas_pool_get_array(tend_physics,'rqvblten' ,rqvblten ) + call mpas_pool_get_array(tend_physics,'rqvdynten',rqvdynten) + call mpas_pool_get_array(tend_physics,'rucuten' ,rucuten ) + call mpas_pool_get_array(tend_physics,'rvcuten' ,rvcuten ) + do j = jts,jte do i = its,ite xland_p(i,j) = xland(i) @@ -488,49 +758,91 @@ subroutine convection_from_MPAS(dt_dyn,mesh,sfc_input,diag_physics,tend_physics) do k = kts,kte do i = its,ite - rqvdynblten_p(i,k,j) = rqvblten(k,i) - rqvdynten_p(i,k,j) = rqvdynten(k,i) - rucuten_p(i,k,j) = rucuten(k,i) - rvcuten_p(i,k,j) = rvcuten(k,i) + rucuten_p(i,k,j) = rucuten(k,i) + rvcuten_p(i,k,j) = rvcuten(k,i) enddo enddo enddo + + cu_tiedtke_select: select case(convection_scheme) + + case ("cu_tiedtke") + do j = jts,jte + do k = kts,kte + do i = its,ite + rqvdynblten_p(i,k,j) = rqvblten(k,i) + rqvdynten_p(i,k,j) = rqvdynten(k,i) + enddo + enddo + enddo + case("cu_ntiedtke") + call mpas_pool_get_config(configs,'config_len_disp',len_disp) + call mpas_pool_get_array(mesh,'meshDensity',meshDensity) + + call mpas_pool_get_array(tend_physics,'rqvdynten' ,rqvdynten ) + call mpas_pool_get_array(tend_physics,'rqvblten' ,rqvblten ) + call mpas_pool_get_array(tend_physics,'rthdynten' ,rthdynten ) + call mpas_pool_get_array(tend_physics,'rthblten' ,rthblten ) + call mpas_pool_get_array(tend_physics,'rthratenlw',rthratenlw) + call mpas_pool_get_array(tend_physics,'rthratensw',rthratensw) + + do j = jts,jte + do i = its,ite + dx_p(i,j) = len_disp / meshDensity(i)**0.25 + enddo + + do k = kts,kte + do i = its,ite + rqvften_p(i,k,j) = rqvdynten(k,i) + rqvblten(k,i) + rthften_p(i,k,j) = (rthdynten(k,i) + rthblten(k,i) & + + rthratenlw(k,i) + rthratensw(k,i)) * pi_p(i,k,j) + enddo + enddo + enddo + + case default + + end select cu_tiedtke_select + case default end select convection_select - + end subroutine convection_from_MPAS -!================================================================================================== - subroutine convection_to_MPAS(diag_physics,tend_physics) -!================================================================================================== +!================================================================================================================= + subroutine convection_to_MPAS(diag_physics,tend_physics,its,ite) +!================================================================================================================= + +!input arguments: + integer,intent(in):: its,ite + !inout arguments: type(mpas_pool_type),intent(inout):: diag_physics type(mpas_pool_type),intent(inout):: tend_physics +!local variables: + integer:: i,k,j + !local pointers: + integer,dimension(:),pointer:: k22_shallow,kbcon_shallow,ktop_shallow,ktop_deep real(kind=RKIND),dimension(:),pointer :: nca,cubot,cutop,cuprec,raincv + real(kind=RKIND),dimension(:),pointer :: xmb_total,xmb_shallow + real(kind=RKIND),dimension(:,:),pointer:: qc_cu,qi_cu real(kind=RKIND),dimension(:,:),pointer:: w0avg real(kind=RKIND),dimension(:,:),pointer:: rthcuten,rqvcuten,rqccuten,rqicuten,rqrcuten,rqscuten real(kind=RKIND),dimension(:,:),pointer:: rucuten,rvcuten -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- + + call mpas_pool_get_array(diag_physics,'cuprec',cuprec) + call mpas_pool_get_array(diag_physics,'raincv',raincv) - call mpas_pool_get_array(diag_physics,'nca' ,nca ) - call mpas_pool_get_array(diag_physics,'cubot' ,cubot ) - call mpas_pool_get_array(diag_physics,'cutop' ,cutop ) - call mpas_pool_get_array(diag_physics,'cuprec' ,cuprec ) - call mpas_pool_get_array(diag_physics,'raincv' ,raincv ) - call mpas_pool_get_array(diag_physics,'w0avg' ,w0avg ) call mpas_pool_get_array(tend_physics,'rthcuten',rthcuten) call mpas_pool_get_array(tend_physics,'rqvcuten',rqvcuten) call mpas_pool_get_array(tend_physics,'rqccuten',rqccuten) call mpas_pool_get_array(tend_physics,'rqicuten',rqicuten) - call mpas_pool_get_array(tend_physics,'rqrcuten',rqrcuten) - call mpas_pool_get_array(tend_physics,'rqscuten',rqscuten) - call mpas_pool_get_array(tend_physics,'rucuten' ,rucuten ) - call mpas_pool_get_array(tend_physics,'rvcuten' ,rvcuten ) do j = jts,jte do i = its,ite @@ -547,7 +859,50 @@ subroutine convection_to_MPAS(diag_physics,tend_physics) convection_select: select case(convection_scheme) - case ("kain_fritsch") + case ("cu_grell_freitas") + call mpas_pool_get_array(diag_physics,'cubot' ,cubot ) + call mpas_pool_get_array(diag_physics,'cutop' ,cutop ) + call mpas_pool_get_array(diag_physics,'xmb_total' ,xmb_total ) + call mpas_pool_get_array(diag_physics,'xmb_shallow' ,xmb_shallow ) + call mpas_pool_get_array(diag_physics,'k22_shallow' ,k22_shallow ) + call mpas_pool_get_array(diag_physics,'kbcon_shallow',kbcon_shallow) + call mpas_pool_get_array(diag_physics,'ktop_shallow' ,ktop_shallow ) + call mpas_pool_get_array(diag_physics,'ktop_deep' ,ktop_deep ) + call mpas_pool_get_array(diag_physics,'qc_cu' ,qc_cu ) + call mpas_pool_get_array(diag_physics,'qi_cu' ,qi_cu ) + + call mpas_pool_get_array(tend_physics,'rucuten',rucuten) + call mpas_pool_get_array(tend_physics,'rvcuten',rvcuten) + + do j = jts,jte + do i = its,ite + cubot(i) = cubot_p(i,j) + cutop(i) = cutop_p(i,j) + xmb_total(i) = xmb_total_p(i,j) + xmb_shallow(i) = xmb_shallow_p(i,j) + k22_shallow(i) = k22_shallow_p(i,j) + kbcon_shallow(i) = kbcon_shallow_p(i,j) + ktop_shallow(i) = ktop_shallow_p(i,j) + ktop_deep(i) = ktop_deep_p(i,j) + + do k = kts,kte + qc_cu(k,i) = qccu_p(i,k,j) + qi_cu(k,i) = qicu_p(i,k,j) + rucuten(k,i) = rucuten_p(i,k,j) + rvcuten(k,k) = rvcuten_p(i,k,j) + enddo + enddo + enddo + + case ("cu_kain_fritsch") + call mpas_pool_get_array(diag_physics,'cubot',cubot) + call mpas_pool_get_array(diag_physics,'cutop',cutop) + call mpas_pool_get_array(diag_physics,'nca' ,nca ) + call mpas_pool_get_array(diag_physics,'w0avg',w0avg) + + call mpas_pool_get_array(tend_physics,'rqrcuten',rqrcuten) + call mpas_pool_get_array(tend_physics,'rqscuten',rqscuten) + do j = jts,jte do i = its,ite cubot(i) = cubot_p(i,j) @@ -561,7 +916,10 @@ subroutine convection_to_MPAS(diag_physics,tend_physics) enddo enddo - case ("tiedtke") + case ("cu_tiedtke","cu_ntiedtke") + call mpas_pool_get_array(tend_physics,'rucuten',rucuten) + call mpas_pool_get_array(tend_physics,'rvcuten',rvcuten) + do j = jts,jte do k = kts,kte do i = its,ite @@ -577,67 +935,62 @@ subroutine convection_to_MPAS(diag_physics,tend_physics) end subroutine convection_to_MPAS -!================================================================================================== - subroutine update_convection_step1(mesh,diag_physics,tend_physics) -!================================================================================================== +!================================================================================================================= + subroutine update_convection_step1(diag_physics,tend_physics,its,ite) +!================================================================================================================= + !input arguments: - type(mpas_pool_type),intent(in):: mesh + integer,intent(in):: its,ite !inout arguments: type(mpas_pool_type),intent(inout):: diag_physics type(mpas_pool_type),intent(inout):: tend_physics !local pointers: - integer,pointer:: nCellsSolve,nVertLevels - real(kind=RKIND),dimension(:),pointer :: nca,cubot,cutop,cuprec,raincv real(kind=RKIND),dimension(:,:),pointer:: rthcuten,rqvcuten,rqccuten,rqicuten,rqrcuten,rqscuten !local variables and arrays: - integer:: iCell,k - -!-------------------------------------------------------------------------------------------------- + integer:: i,k - call mpas_pool_get_dimension(mesh,'nCellsSolve',nCellsSolve) - call mpas_pool_get_dimension(mesh,'nVertLevels',nVertLevels) - - call mpas_pool_get_array(diag_physics,'nca' ,nca ) - call mpas_pool_get_array(diag_physics,'cubot' ,cubot ) - call mpas_pool_get_array(diag_physics,'cutop' ,cutop ) - call mpas_pool_get_array(diag_physics,'cuprec',cuprec) - call mpas_pool_get_array(diag_physics,'raincv',raincv) - - call mpas_pool_get_array(tend_physics,'rthcuten',rthcuten) - call mpas_pool_get_array(tend_physics,'rqvcuten',rqvcuten) - call mpas_pool_get_array(tend_physics,'rqccuten',rqccuten) - call mpas_pool_get_array(tend_physics,'rqicuten',rqicuten) - call mpas_pool_get_array(tend_physics,'rqrcuten',rqrcuten) - call mpas_pool_get_array(tend_physics,'rqscuten',rqscuten) +!----------------------------------------------------------------------------------------------------------------- convection_select: select case(convection_scheme) - case ("kain_fritsch") - - do iCell = 1, nCellsSolve + case ("cu_kain_fritsch") + call mpas_pool_get_array(diag_physics,'nca' ,nca ) + call mpas_pool_get_array(diag_physics,'cubot' ,cubot ) + call mpas_pool_get_array(diag_physics,'cutop' ,cutop ) + call mpas_pool_get_array(diag_physics,'cuprec',cuprec) + call mpas_pool_get_array(diag_physics,'raincv',raincv) + + call mpas_pool_get_array(tend_physics,'rthcuten',rthcuten) + call mpas_pool_get_array(tend_physics,'rqvcuten',rqvcuten) + call mpas_pool_get_array(tend_physics,'rqccuten',rqccuten) + call mpas_pool_get_array(tend_physics,'rqicuten',rqicuten) + call mpas_pool_get_array(tend_physics,'rqrcuten',rqrcuten) + call mpas_pool_get_array(tend_physics,'rqscuten',rqscuten) + + do i = its, ite !decreases the characteristic time period that convection remains active. When nca_p !becomes less than the convective timestep, convective tendencies and precipitation !are reset to zero (note that this is also done in subroutine kf_eta_cps). - if(nca(iCell) .gt. 0.) then - nca(iCell) = nca(iCell) - dt_dyn + if(nca(i) .gt. 0.) then + nca(i) = nca(i) - dt_dyn - if(nca(iCell) .lt. 0.5*dt_dyn) then - do k = 1, nVertLevels - rthcuten(k,iCell) = 0._RKIND - rqvcuten(k,iCell) = 0._RKIND - rqccuten(k,iCell) = 0._RKIND - rqrcuten(k,iCell) = 0._RKIND - rqicuten(k,iCell) = 0._RKIND - rqscuten(k,iCell) = 0._RKIND + if(nca(i) .lt. 0.5*dt_dyn) then + do k = kts,kte + rthcuten(k,i) = 0._RKIND + rqvcuten(k,i) = 0._RKIND + rqccuten(k,i) = 0._RKIND + rqrcuten(k,i) = 0._RKIND + rqicuten(k,i) = 0._RKIND + rqscuten(k,i) = 0._RKIND enddo - raincv(iCell) = 0._RKIND - cuprec(iCell) = 0._RKIND - cubot(iCell) = kte+1 - cutop(iCell) = kts + raincv(i) = 0._RKIND + cuprec(i) = 0._RKIND + cubot(i) = kte+1 + cutop(i) = kts endif endif enddo @@ -648,51 +1001,47 @@ subroutine update_convection_step1(mesh,diag_physics,tend_physics) end subroutine update_convection_step1 -!================================================================================================== - subroutine update_convection_step2(configs,mesh,diag_physics) -!================================================================================================== +!================================================================================================================= + subroutine update_convection_step2(configs,diag_physics,its,ite) +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(in):: configs - type(mpas_pool_type),intent(in):: mesh + integer,intent(in):: its,ite !inout arguments: type(mpas_pool_type),intent(inout):: diag_physics !local pointers: - integer,pointer:: nCellsSolve integer,dimension(:),pointer:: i_rainc real(kind=RKIND),pointer:: bucket_rainc real(kind=RKIND),dimension(:),pointer:: cuprec,rainc !local variables and arrays: - integer:: iCell + integer:: i -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_config(configs,'config_bucket_rainc',bucket_rainc) - call mpas_pool_get_dimension(mesh,'nCellsSolve',nCellsSolve) - call mpas_pool_get_array(diag_physics,'i_rainc',i_rainc) call mpas_pool_get_array(diag_physics,'cuprec' ,cuprec ) call mpas_pool_get_array(diag_physics,'rainc' ,rainc ) !update the accumulated precipitation at the end of each dynamic time-step: - do iCell = 1, nCellsSolve - rainc(iCell) = rainc(iCell) + cuprec(iCell) * dt_dyn + do i = its, ite + rainc(i) = rainc(i) + cuprec(i) * dt_dyn if(l_acrain .and. bucket_rainc.gt.0._RKIND .and. & - rainc(iCell).gt.bucket_rainc) then - i_rainc(iCell) = i_rainc(iCell) + 1 - rainc(iCell) = rainc(iCell) - bucket_rainc + rainc(i).gt.bucket_rainc) then + i_rainc(i) = i_rainc(i) + 1 + rainc(i) = rainc(i) - bucket_rainc endif - enddo end subroutine update_convection_step2 -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_driver_convection -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_driver_gwdo.F b/src/core_atmosphere/physics/mpas_atmphys_driver_gwdo.F index 69229c57aa..1e5f2dbf9f 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_driver_gwdo.F +++ b/src/core_atmosphere/physics/mpas_atmphys_driver_gwdo.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_driver_gwdo use mpas_kind_types use mpas_derived_types @@ -23,48 +23,48 @@ module mpas_atmphys_driver_gwdo deallocate_gwdo, & driver_gwdo - integer,private:: i,j,k - - -!>\brief MPAS driver for parameterization of gravity wave drag over orography. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_driver_gwdo: -!> ---------------------------------------- -!> allocate_gwdo : allocate local arrays for parameterization of gravity wave drag. -!> deallocate_gwdo: deallocate local arrays for parameterization of gravity wave drag. -!> driver_gwdo : main driver (called from subroutine physics_driver). -!> gwdo_from_MPAS : initialize local arrays. -!> gwdo_to_MPAS : copy local arrays to MPAS arrays. -!> -!> WRF physics called from driver_gwdo: -!> --------------------------- -------- -!> * module_bl_gwdo : parameterization of gravity wave drag over orography. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * removed the pre-processor option "do_hydrostatic_pressure" before call to subroutine gwdo. -!> Laura D. Fowler (birch.ucar.edu) / 2013-05-29. -!> * changed the definition of dx_p to the mean distance between cell centers. -!> Laura D. Fowler (laura@ucar.edu) / 2013-08-23. -!> * in call to subroutine gwdo, replaced the variable g (that originally pointed to gravity) -!> with gravity, for simplicity. -!> Laura D. Fowler (laura@ucar.edu) / 2014-03-21. -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. + +!MPAS driver for parameterization of gravity wave drag over orography. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_driver_gwdo: +! ---------------------------------------- +! allocate_gwdo : allocate local arrays for parameterization of gravity wave drag. +! deallocate_gwdo: deallocate local arrays for parameterization of gravity wave drag. +! driver_gwdo : main driver (called from subroutine physics_driver). +! gwdo_from_MPAS : initialize local arrays. +! gwdo_to_MPAS : copy local arrays to MPAS arrays. +! +! WRF physics called from driver_gwdo: +! --------------------------- -------- +! * module_bl_gwdo : parameterization of gravity wave drag over orography. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * removed the pre-processor option "do_hydrostatic_pressure" before call to subroutine gwdo. +! Laura D. Fowler (birch.ucar.edu) / 2013-05-29. +! * changed the definition of dx_p to the mean distance between cell centers. +! Laura D. Fowler (laura@ucar.edu) / 2013-08-23. +! * in call to subroutine gwdo, replaced the variable g (that originally pointed to gravity) +! with gravity, for simplicity. +! Laura D. Fowler (laura@ucar.edu) / 2014-03-21. +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * renamed "ysu_gwdo" to "bl_gwdo_ysu". +! Laura D. Fowler (laura@ucar.edu) / 2016-03-25. +! * change the definition of dx_p to match that used in other physics parameterizations. +! Laura D. Fowler (laura@ucar.edu) / 2016-10-18. contains -!================================================================================================== +!================================================================================================================= subroutine allocate_gwdo -!================================================================================================== +!================================================================================================================= if(.not.allocated(dx_p) ) allocate(dx_p(ims:ime,jms:jme) ) if(.not.allocated(var2d_p) ) allocate(var2d_p(ims:ime,jms:jme) ) @@ -88,9 +88,9 @@ subroutine allocate_gwdo end subroutine allocate_gwdo -!================================================================================================== +!================================================================================================================= subroutine deallocate_gwdo -!================================================================================================== +!================================================================================================================= if(allocated(dx_p) ) deallocate(dx_p ) if(allocated(var2d_p) ) deallocate(var2d_p ) @@ -114,23 +114,34 @@ subroutine deallocate_gwdo end subroutine deallocate_gwdo -!================================================================================================== - subroutine gwdo_from_MPAS(mesh,sfc_input,diag_physics,tend_physics) -!================================================================================================== +!================================================================================================================= + subroutine gwdo_from_MPAS(configs,mesh,sfc_input,diag_physics,tend_physics,its,ite) +!================================================================================================================= !input arguments: + type(mpas_pool_type),intent(in):: configs type(mpas_pool_type),intent(in):: mesh type(mpas_pool_type),intent(in):: sfc_input type(mpas_pool_type),intent(in):: diag_physics type(mpas_pool_type),intent(in):: tend_physics + integer,intent(in):: its,ite + +!local variables: + integer:: i,k,j + !local pointers: integer,dimension(:),pointer:: kpbl + real(kind=RKIND),pointer:: len_disp + real(kind=RKIND),dimension(:),pointer :: meshDensity real(kind=RKIND),dimension(:),pointer :: oa1,oa2,oa3,oa4,ol1,ol2,ol3,ol4,con,var2d - real(kind=RKIND),dimension(:),pointer :: dcEdge_m,dusfcg,dvsfcg + real(kind=RKIND),dimension(:),pointer :: dusfcg,dvsfcg real(kind=RKIND),dimension(:,:),pointer:: dtaux3d,dtauy3d,rublten,rvblten -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- + + call mpas_pool_get_config(configs,'config_len_disp',len_disp) + call mpas_pool_get_array(mesh,'meshDensity',meshDensity) call mpas_pool_get_array(sfc_input,'oa1' ,oa1 ) call mpas_pool_get_array(sfc_input,'oa2' ,oa2 ) @@ -143,7 +154,6 @@ subroutine gwdo_from_MPAS(mesh,sfc_input,diag_physics,tend_physics) call mpas_pool_get_array(sfc_input,'con' ,con ) call mpas_pool_get_array(sfc_input,'var2d',var2d) - call mpas_pool_get_array(diag_physics,'dcEdge_m',dcEdge_m) call mpas_pool_get_array(diag_physics,'kpbl' ,kpbl ) call mpas_pool_get_array(diag_physics,'dusfcg' ,dusfcg ) call mpas_pool_get_array(diag_physics,'dvsfcg' ,dvsfcg ) @@ -169,7 +179,7 @@ subroutine gwdo_from_MPAS(mesh,sfc_input,diag_physics,tend_physics) do j = jts,jte do i = its,ite - dx_p(i,j) = dcEdge_m(i) + dx_p(i,j) = len_disp / meshDensity(i)**0.25 kpbl_p(i,j) = kpbl(i) dusfcg_p(i,j) = dusfcg(i) dvsfcg_p(i,j) = dvsfcg(i) @@ -189,19 +199,25 @@ subroutine gwdo_from_MPAS(mesh,sfc_input,diag_physics,tend_physics) end subroutine gwdo_from_MPAS -!================================================================================================== - subroutine gwdo_to_MPAS(diag_physics,tend_physics) -!================================================================================================== +!================================================================================================================= + subroutine gwdo_to_MPAS(diag_physics,tend_physics,its,ite) +!================================================================================================================= + +!input arguments: + integer,intent(in):: its,ite !inout arguments: type(mpas_pool_type),intent(inout):: diag_physics type(mpas_pool_type),intent(inout):: tend_physics +!local variables: + integer:: i,k,j + !local pointers: real(kind=RKIND),dimension(:),pointer :: dusfcg,dvsfcg real(kind=RKIND),dimension(:,:),pointer:: dtaux3d,dtauy3d,rubldiff,rvbldiff,rublten,rvblten -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_array(diag_physics,'dusfcg' ,dusfcg ) call mpas_pool_get_array(diag_physics,'dvsfcg' ,dvsfcg ) @@ -234,13 +250,16 @@ subroutine gwdo_to_MPAS(diag_physics,tend_physics) end subroutine gwdo_to_MPAS -!================================================================================================== - subroutine driver_gwdo(itimestep,mesh,sfc_input,diag_physics,tend_physics) -!================================================================================================== +!================================================================================================================= + subroutine driver_gwdo(itimestep,configs,mesh,sfc_input,diag_physics,tend_physics,its,ite) +!================================================================================================================= !input arguments: + type(mpas_pool_type),intent(in):: configs type(mpas_pool_type),intent(in):: mesh type(mpas_pool_type),intent(in):: sfc_input + + integer,intent(in):: its,ite integer,intent(in):: itimestep !inout arguments: @@ -251,16 +270,16 @@ subroutine driver_gwdo(itimestep,mesh,sfc_input,diag_physics,tend_physics) integer:: i,iCell,iEdge real(kind=RKIND),dimension(:),allocatable:: dx_max -!-------------------------------------------------------------------------------------------------- -! write(0,*) -! write(0,*) '--- enter subroutine driver_gwdo: dt_pbl=',dt_pbl +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine driver_gwdo:' !copy MPAS arrays to local arrays: - call gwdo_from_MPAS(mesh,sfc_input,diag_physics,tend_physics) + call gwdo_from_MPAS(configs,mesh,sfc_input,diag_physics,tend_physics,its,ite) gwdo_select: select case (trim(gwdo_scheme)) - case("ysu_gwdo") + case("bl_ysu_gwdo") call gwdo ( & p3d = pres_hydd_p , p3di = pres2_hydd_p , pi3d = pi_p , & u3d = u_p , v3d = v_p , t3d = t_p , & @@ -284,11 +303,13 @@ subroutine driver_gwdo(itimestep,mesh,sfc_input,diag_physics,tend_physics) end select gwdo_select !copy local arrays to MPAS grid: - call gwdo_to_MPAS(diag_physics,tend_physics) -! write(0,*) '--- end subroutine driver_gwdo' + call gwdo_to_MPAS(diag_physics,tend_physics,its,ite) + +!write(0,*) '--- end subroutine driver_gwdo.' +!write(0,*) end subroutine driver_gwdo -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_driver_gwdo -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_driver_lsm.F b/src/core_atmosphere/physics/mpas_atmphys_driver_lsm.F index 0520686487..fd018dfaeb 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_driver_lsm.F +++ b/src/core_atmosphere/physics/mpas_atmphys_driver_lsm.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_driver_lsm use mpas_kind_types use mpas_derived_types @@ -18,6 +18,7 @@ module mpas_atmphys_driver_lsm !wrf physics use module_sf_noahdrv + use module_sf_sfcdiags implicit none private @@ -25,6 +26,7 @@ module mpas_atmphys_driver_lsm allocate_lsm, & deallocate_lsm, & driver_lsm + logical,parameter:: rdmaxalb = .false. !use snow albedo from geogrid;false use table values logical,parameter:: myj = .false. !true if using Mellor-Yamada PBL scheme. @@ -34,197 +36,242 @@ module mpas_atmphys_driver_lsm !urban physics: MPAS does not plan to run the urban physics option. integer,parameter:: sf_urban_physics = 0 !activate urban canopy model (=0: no urban canopy) - integer,private:: i,j,k,n - -!>\brief MPAS driver for parameterization of land surface processes. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_driver_lsm: -!> --------------------------------------- -!> allocate_lsm : allocate local arrays for land surface parameterization. -!> deallocate_lsm: deallocate local arrays for land surface parameterization. -!> driver_lsm : main driver (called from subroutine physics_driver). -!> lsm_from_MPAS : initialize local arrays. -!> lsm_to_MPAS : copy local arrays to MPAS arrays. -!> -!> WRF physics called from driver_lsm: -!> ------------------------ ---------- -!> * module_sf_noahdrv : NOAH 4-layers land surface scheme. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * removed the pre-processor option "do_hydrostatic_pressure" before call to subroutine lsm. -!> Laura D. Fowler (birch.ucar.edu) / 2013-05-29. -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. -!> * moved the definition of isurban to landuse_init_forMPAS in mpas_atmphys_landuse.F. -!> isurban is now defined as a function of the input landuse data file. -!> Dominikus Heinzeller (IMK) / 2014-07-24. -!> * removed the global variable qz0 and initialized the local variable qz0_p to 0. qz0 is only -!> used in the MYJ PBL parameterization which is not available in MPAS. -!> Laura D. Fowler (laura@ucar.edu) / 2015-03-05. -!> * in subroutine lsm_from_MPAS, modified the initialization of local variable dzs_p. -!> Laura D. Fowler (laura@ucar.edu) / 2015-04-11. -!> * in subroutine lsm_to_MPAS, removed updating isltyp, ivgtyp, shdmin, and shdmax as they -!> are constant in the Noah lsm. -!> Laura D. Fowler (laura@ucar.edu) / 2015-04-11. - -!> -!> DOCUMENTATION: -!> ./physics_wrf/module_sf_noahdrv.F: main driver for the "NOAH" land-surface parameterization. -!> In the argument list,I added "OPTIONAL" to the declaration of a few arrays to avoid compiling -!> with the "urban physics" option. These arrays are: -!> .. num_roof_layers; num_wall_layers; num_road_layers;num_urban_layers. -!> .. ust_urb2d;frc_urb2d;utype_urb2d. -!> Laura D. Fowler (01-18-2011). + +!MPAS driver for parameterization of land surface processes. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_driver_lsm: +! --------------------------------------- +! allocate_lsm : allocate local arrays for land surface parameterization. +! deallocate_lsm: deallocate local arrays for land surface parameterization. +! driver_lsm : main driver (called from subroutine physics_driver). +! lsm_from_MPAS : initialize local arrays. +! lsm_to_MPAS : copy local arrays to MPAS arrays. +! +! WRF physics called from driver_lsm: +! ------------------------ ---------- +! * module_sf_noahdrv : NOAH 4-layers land surface scheme. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * removed the pre-processor option "do_hydrostatic_pressure" before call to subroutine lsm. +! Laura D. Fowler (birch.ucar.edu) / 2013-05-29. +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * moved the definition of isurban to landuse_init_forMPAS in mpas_atmphys_landuse.F. +! isurban is now defined as a function of the input landuse data file. +! Dominikus Heinzeller (IMK) / 2014-07-24. +! * removed the global variable qz0 and initialized the local variable qz0_p to 0. qz0 is only +! used in the MYJ PBL parameterization which is not available in MPAS. +! Laura D. Fowler (laura@ucar.edu) / 2015-03-05. +! * in subroutine lsm_from_MPAS, modified the initialization of local variable dzs_p. +! Laura D. Fowler (laura@ucar.edu) / 2015-04-11. +! * in subroutine lsm_to_MPAS, removed updating isltyp, ivgtyp, shdmin, and shdmax as they +! are constant in the Noah lsm. +! Laura D. Fowler (laura@ucar.edu) / 2015-04-11. +! * in subroutine lsm_from_MPAS, modified the calculation of rain_bl, now that the convection +! and cloud microphysics parameterizations are contained in "packages." The original +! calculation failed when configuration_convection_scheme or config_microphysics_scheme +! was set of 'off'. +! Laura D. Fowler (laura@ucar.edu) / 2016-04-13. +! * added call to the subroutine sfcdiags to update the 2-meter temperature, potential temperature, and +! water vapor mixing ratio after call to lsm. +! Laura D. Fowler (laura@ucar.edu) / 2016-05-11. +! * added the calculation of surface variables over seaice cells when config_frac_seaice is set to true. +! Laura D. Fowler (laura@ucar.edu) / 2016-10-03. + +! +! DOCUMENTATION: +! ./physics_wrf/module_sf_noahdrv.F: main driver for the "NOAH" land-surface parameterization. +! In the argument list,I added "OPTIONAL" to the declaration of a few arrays to avoid compiling +! with the "urban physics" option. These arrays are: +! .. num_roof_layers; num_wall_layers; num_road_layers;num_urban_layers. +! .. ust_urb2d;frc_urb2d;utype_urb2d. +! Laura D. Fowler (01-18-2011). contains -!================================================================================================== - subroutine allocate_lsm -!================================================================================================== +!================================================================================================================= + subroutine allocate_lsm(config_frac_seaice) +!================================================================================================================= + + logical,intent(in):: config_frac_seaice +!----------------------------------------------------------------------------------------------------------------- !arrays for soil layer properties: - if(.not.allocated(dzs_p) ) allocate(dzs_p(1:num_soils) ) - if(.not.allocated(smcrel_p) ) allocate(smcrel_p(ims:ime,1:num_soils,jms:jme) ) - if(.not.allocated(sh2o_p) ) allocate(sh2o_p(ims:ime,1:num_soils,jms:jme) ) - if(.not.allocated(smois_p) ) allocate(smois_p(ims:ime,1:num_soils,jms:jme) ) - if(.not.allocated(tslb_p) ) allocate(tslb_p(ims:ime,1:num_soils,jms:jme) ) + if(.not.allocated(dzs_p) ) allocate(dzs_p(1:num_soils) ) + if(.not.allocated(smcrel_p)) allocate(smcrel_p(ims:ime,1:num_soils,jms:jme)) + if(.not.allocated(sh2o_p) ) allocate(sh2o_p(ims:ime,1:num_soils,jms:jme) ) + if(.not.allocated(smois_p) ) allocate(smois_p(ims:ime,1:num_soils,jms:jme) ) + if(.not.allocated(tslb_p) ) allocate(tslb_p(ims:ime,1:num_soils,jms:jme) ) !other arrays: - if(.not.allocated(acsnom_p) ) allocate(acsnom_p(ims:ime,jms:jme) ) - if(.not.allocated(acsnow_p) ) allocate(acsnow_p(ims:ime,jms:jme) ) - if(.not.allocated(canwat_p) ) allocate(canwat_p(ims:ime,jms:jme) ) - if(.not.allocated(chs_p) ) allocate(chs_p(ims:ime,jms:jme) ) - if(.not.allocated(chs2_p) ) allocate(chs2_p(ims:ime,jms:jme) ) - if(.not.allocated(chklowq_p) ) allocate(chklowq_p(ims:ime,jms:jme) ) - if(.not.allocated(cpm_p) ) allocate(cpm_p(ims:ime,jms:jme) ) - if(.not.allocated(cqs2_p) ) allocate(cqs2_p(ims:ime,jms:jme) ) - if(.not.allocated(isltyp_p) ) allocate(isltyp_p(ims:ime,jms:jme) ) - if(.not.allocated(ivgtyp_p) ) allocate(ivgtyp_p(ims:ime,jms:jme) ) - if(.not.allocated(glw_p) ) allocate(glw_p(ims:ime,jms:jme) ) - if(.not.allocated(grdflx_p) ) allocate(grdflx_p(ims:ime,jms:jme) ) - if(.not.allocated(gsw_p) ) allocate(gsw_p(ims:ime,jms:jme) ) - if(.not.allocated(hfx_p) ) allocate(hfx_p(ims:ime,jms:jme) ) - if(.not.allocated(lai_p) ) allocate(lai_p(ims:ime,jms:jme) ) - if(.not.allocated(lh_p) ) allocate(lh_p(ims:ime,jms:jme) ) - if(.not.allocated(noahres_p) ) allocate(noahres_p(ims:ime,jms:jme) ) - if(.not.allocated(potevp_p) ) allocate(potevp_p(ims:ime,jms:jme) ) - if(.not.allocated(qfx_p) ) allocate(qfx_p(ims:ime,jms:jme) ) - if(.not.allocated(qgh_p) ) allocate(qgh_p(ims:ime,jms:jme) ) - if(.not.allocated(qsfc_p) ) allocate(qsfc_p(ims:ime,jms:jme) ) - if(.not.allocated(qz0_p) ) allocate(qz0_p(ims:ime,jms:jme) ) - if(.not.allocated(rainbl_p) ) allocate(rainbl_p(ims:ime,jms:jme) ) - if(.not.allocated(br_p) ) allocate(br_p(ims:ime,jms:jme) ) - if(.not.allocated(sfc_albbck_p) ) allocate(sfc_albbck_p(ims:ime,jms:jme) ) - if(.not.allocated(sfc_albedo_p) ) allocate(sfc_albedo_p(ims:ime,jms:jme) ) - if(.not.allocated(sfc_emibck_p) ) allocate(sfc_emibck_p(ims:ime,jms:jme) ) - if(.not.allocated(sfc_emiss_p) ) allocate(sfc_emiss_p(ims:ime,jms:jme) ) - if(.not.allocated(sfcrunoff_p) ) allocate(sfcrunoff_p(ims:ime,jms:jme) ) - if(.not.allocated(shdmin_p) ) allocate(shdmin_p(ims:ime,jms:jme) ) - if(.not.allocated(shdmax_p) ) allocate(shdmax_p(ims:ime,jms:jme) ) - if(.not.allocated(smstav_p) ) allocate(smstav_p(ims:ime,jms:jme) ) - if(.not.allocated(smstot_p) ) allocate(smstot_p(ims:ime,jms:jme) ) - if(.not.allocated(snoalb_p) ) allocate(snoalb_p(ims:ime,jms:jme) ) - if(.not.allocated(snotime_p) ) allocate(snotime_p(ims:ime,jms:jme) ) - if(.not.allocated(snopcx_p) ) allocate(snopcx_p(ims:ime,jms:jme) ) - if(.not.allocated(snow_p) ) allocate(snow_p(ims:ime,jms:jme) ) - if(.not.allocated(snowc_p) ) allocate(snowc_p(ims:ime,jms:jme) ) - if(.not.allocated(snowh_p) ) allocate(snowh_p(ims:ime,jms:jme) ) - if(.not.allocated(sr_p) ) allocate(sr_p(ims:ime,jms:jme) ) - if(.not.allocated(swdown_p) ) allocate(swdown_p(ims:ime,jms:jme) ) - if(.not.allocated(tmn_p) ) allocate(tmn_p(ims:ime,jms:jme) ) - if(.not.allocated(tsk_p) ) allocate(tsk_p(ims:ime,jms:jme) ) - if(.not.allocated(udrunoff_p) ) allocate(udrunoff_p(ims:ime,jms:jme) ) - if(.not.allocated(vegfra_p) ) allocate(vegfra_p(ims:ime,jms:jme) ) - if(.not.allocated(xice_p) ) allocate(xice_p(ims:ime,jms:jme) ) - if(.not.allocated(xland_p) ) allocate(xland_p(ims:ime,jms:jme) ) - if(.not.allocated(z0_p) ) allocate(z0_p(ims:ime,jms:jme) ) - if(.not.allocated(znt_p) ) allocate(znt_p(ims:ime,jms:jme) ) + if(.not.allocated(acsnom_p) ) allocate(acsnom_p(ims:ime,jms:jme) ) + if(.not.allocated(acsnow_p) ) allocate(acsnow_p(ims:ime,jms:jme) ) + if(.not.allocated(canwat_p) ) allocate(canwat_p(ims:ime,jms:jme) ) + if(.not.allocated(chs_p) ) allocate(chs_p(ims:ime,jms:jme) ) + if(.not.allocated(chs2_p) ) allocate(chs2_p(ims:ime,jms:jme) ) + if(.not.allocated(chklowq_p) ) allocate(chklowq_p(ims:ime,jms:jme) ) + if(.not.allocated(cpm_p) ) allocate(cpm_p(ims:ime,jms:jme) ) + if(.not.allocated(cqs2_p) ) allocate(cqs2_p(ims:ime,jms:jme) ) + if(.not.allocated(isltyp_p) ) allocate(isltyp_p(ims:ime,jms:jme) ) + if(.not.allocated(ivgtyp_p) ) allocate(ivgtyp_p(ims:ime,jms:jme) ) + if(.not.allocated(glw_p) ) allocate(glw_p(ims:ime,jms:jme) ) + if(.not.allocated(grdflx_p) ) allocate(grdflx_p(ims:ime,jms:jme) ) + if(.not.allocated(gsw_p) ) allocate(gsw_p(ims:ime,jms:jme) ) + if(.not.allocated(hfx_p) ) allocate(hfx_p(ims:ime,jms:jme) ) + if(.not.allocated(lai_p) ) allocate(lai_p(ims:ime,jms:jme) ) + if(.not.allocated(lh_p) ) allocate(lh_p(ims:ime,jms:jme) ) + if(.not.allocated(noahres_p) ) allocate(noahres_p(ims:ime,jms:jme) ) + if(.not.allocated(potevp_p) ) allocate(potevp_p(ims:ime,jms:jme) ) + if(.not.allocated(qfx_p) ) allocate(qfx_p(ims:ime,jms:jme) ) + if(.not.allocated(qgh_p) ) allocate(qgh_p(ims:ime,jms:jme) ) + if(.not.allocated(qsfc_p) ) allocate(qsfc_p(ims:ime,jms:jme) ) + if(.not.allocated(qz0_p) ) allocate(qz0_p(ims:ime,jms:jme) ) + if(.not.allocated(rainbl_p) ) allocate(rainbl_p(ims:ime,jms:jme) ) + if(.not.allocated(br_p) ) allocate(br_p(ims:ime,jms:jme) ) + if(.not.allocated(sfc_albbck_p)) allocate(sfc_albbck_p(ims:ime,jms:jme)) + if(.not.allocated(sfc_albedo_p)) allocate(sfc_albedo_p(ims:ime,jms:jme)) + if(.not.allocated(sfc_emibck_p)) allocate(sfc_emibck_p(ims:ime,jms:jme)) + if(.not.allocated(sfc_emiss_p) ) allocate(sfc_emiss_p(ims:ime,jms:jme) ) + if(.not.allocated(sfcrunoff_p) ) allocate(sfcrunoff_p(ims:ime,jms:jme) ) + if(.not.allocated(shdmin_p) ) allocate(shdmin_p(ims:ime,jms:jme) ) + if(.not.allocated(shdmax_p) ) allocate(shdmax_p(ims:ime,jms:jme) ) + if(.not.allocated(smstav_p) ) allocate(smstav_p(ims:ime,jms:jme) ) + if(.not.allocated(smstot_p) ) allocate(smstot_p(ims:ime,jms:jme) ) + if(.not.allocated(snoalb_p) ) allocate(snoalb_p(ims:ime,jms:jme) ) + if(.not.allocated(snotime_p) ) allocate(snotime_p(ims:ime,jms:jme) ) + if(.not.allocated(snopcx_p) ) allocate(snopcx_p(ims:ime,jms:jme) ) + if(.not.allocated(snow_p) ) allocate(snow_p(ims:ime,jms:jme) ) + if(.not.allocated(snowc_p) ) allocate(snowc_p(ims:ime,jms:jme) ) + if(.not.allocated(snowh_p) ) allocate(snowh_p(ims:ime,jms:jme) ) + if(.not.allocated(sr_p) ) allocate(sr_p(ims:ime,jms:jme) ) + if(.not.allocated(swdown_p) ) allocate(swdown_p(ims:ime,jms:jme) ) + if(.not.allocated(tmn_p) ) allocate(tmn_p(ims:ime,jms:jme) ) + if(.not.allocated(tsk_p) ) allocate(tsk_p(ims:ime,jms:jme) ) + if(.not.allocated(udrunoff_p) ) allocate(udrunoff_p(ims:ime,jms:jme) ) + if(.not.allocated(vegfra_p) ) allocate(vegfra_p(ims:ime,jms:jme) ) + if(.not.allocated(xice_p) ) allocate(xice_p(ims:ime,jms:jme) ) + if(.not.allocated(xland_p) ) allocate(xland_p(ims:ime,jms:jme) ) + if(.not.allocated(z0_p) ) allocate(z0_p(ims:ime,jms:jme) ) + if(.not.allocated(znt_p) ) allocate(znt_p(ims:ime,jms:jme) ) + if(.not.allocated(t2m_p) ) allocate(t2m_p(ims:ime,jms:jme) ) + if(.not.allocated(th2m_p) ) allocate(th2m_p(ims:ime,jms:jme) ) + if(.not.allocated(q2_p) ) allocate(q2_p(ims:ime,jms:jme) ) + + if(config_frac_seaice) then + if(.not.allocated(tsk_sea)) allocate(tsk_sea(ims:ime,jms:jme)) + if(.not.allocated(tsk_ice)) allocate(tsk_ice(ims:ime,jms:jme)) + endif end subroutine allocate_lsm -!================================================================================================== - subroutine deallocate_lsm -!================================================================================================== +!================================================================================================================= + subroutine deallocate_lsm(config_frac_seaice) +!================================================================================================================= + + logical,intent(in):: config_frac_seaice +!----------------------------------------------------------------------------------------------------------------- !arrays for soil layer properties: - if(allocated(dzs_p) ) deallocate(dzs_p ) - if(allocated(smcrel_p) ) deallocate(smcrel_p ) - if(allocated(sh2o_p) ) deallocate(sh2o_p ) - if(allocated(smois_p) ) deallocate(smois_p ) - if(allocated(tslb_p) ) deallocate(tslb_p ) + if(allocated(dzs_p) ) deallocate(dzs_p ) + if(allocated(smcrel_p)) deallocate(smcrel_p) + if(allocated(sh2o_p) ) deallocate(sh2o_p ) + if(allocated(smois_p) ) deallocate(smois_p ) + if(allocated(tslb_p) ) deallocate(tslb_p ) !other arrays: - if(allocated(acsnom_p) ) deallocate(acsnom_p ) - if(allocated(acsnow_p) ) deallocate(acsnow_p ) - if(allocated(canwat_p) ) deallocate(canwat_p ) - if(allocated(chs_p) ) deallocate(chs_p ) - if(allocated(chs2_p) ) deallocate(chs2_p ) - if(allocated(chklowq_p) ) deallocate(chklowq_p ) - if(allocated(cpm_p) ) deallocate(cpm_p ) - if(allocated(cqs2_p) ) deallocate(cqs2_p ) - if(allocated(glw_p) ) deallocate(glw_p ) - if(allocated(grdflx_p) ) deallocate(grdflx_p ) - if(allocated(gsw_p) ) deallocate(gsw_p ) - if(allocated(hfx_p) ) deallocate(hfx_p ) - if(allocated(isltyp_p) ) deallocate(isltyp_p ) - if(allocated(ivgtyp_p) ) deallocate(ivgtyp_p ) - if(allocated(lai_p) ) deallocate(lai_p ) - if(allocated(lh_p) ) deallocate(lh_p ) - if(allocated(noahres_p) ) deallocate(noahres_p ) - if(allocated(potevp_p) ) deallocate(potevp_p ) - if(allocated(qfx_p) ) deallocate(qfx_p ) - if(allocated(qgh_p) ) deallocate(qgh_p ) - if(allocated(qsfc_p) ) deallocate(qsfc_p ) - if(allocated(qz0_p) ) deallocate(qz0_p ) - if(allocated(rainbl_p) ) deallocate(rainbl_p ) - if(allocated(br_p) ) deallocate(br_p ) - if(allocated(sfc_albbck_p) ) deallocate(sfc_albbck_p ) - if(allocated(sfc_albedo_p) ) deallocate(sfc_albedo_p ) - if(allocated(sfc_emibck_p) ) deallocate(sfc_emibck_p ) - if(allocated(sfc_emiss_p) ) deallocate(sfc_emiss_p ) - if(allocated(sfcrunoff_p) ) deallocate(sfcrunoff_p ) - if(allocated(shdmin_p) ) deallocate(shdmin_p ) - if(allocated(shdmax_p) ) deallocate(shdmax_p ) - if(allocated(smstav_p) ) deallocate(smstav_p ) - if(allocated(smstot_p) ) deallocate(smstot_p ) - if(allocated(snoalb_p) ) deallocate(snoalb_p ) - if(allocated(snotime_p) ) deallocate(snotime_p ) - if(allocated(snopcx_p) ) deallocate(snopcx_p ) - if(allocated(snow_p) ) deallocate(snow_p ) - if(allocated(snowc_p) ) deallocate(snowc_p ) - if(allocated(snowh_p) ) deallocate(snowh_p ) - if(allocated(sr_p) ) deallocate(sr_p ) - if(allocated(swdown_p) ) deallocate(swdown_p ) - if(allocated(tmn_p) ) deallocate(tmn_p ) - if(allocated(tsk_p) ) deallocate(tsk_p ) - if(allocated(udrunoff_p) ) deallocate(udrunoff_p ) - if(allocated(vegfra_p) ) deallocate(vegfra_p ) - if(allocated(xice_p) ) deallocate(xice_p ) - if(allocated(xland_p) ) deallocate(xland_p ) - if(allocated(z0_p) ) deallocate(z0_p ) - if(allocated(znt_p) ) deallocate(znt_p ) + if(allocated(acsnom_p) ) deallocate(acsnom_p ) + if(allocated(acsnow_p) ) deallocate(acsnow_p ) + if(allocated(canwat_p) ) deallocate(canwat_p ) + if(allocated(chs_p) ) deallocate(chs_p ) + if(allocated(chs2_p) ) deallocate(chs2_p ) + if(allocated(chklowq_p) ) deallocate(chklowq_p ) + if(allocated(cpm_p) ) deallocate(cpm_p ) + if(allocated(cqs2_p) ) deallocate(cqs2_p ) + if(allocated(glw_p) ) deallocate(glw_p ) + if(allocated(grdflx_p) ) deallocate(grdflx_p ) + if(allocated(gsw_p) ) deallocate(gsw_p ) + if(allocated(hfx_p) ) deallocate(hfx_p ) + if(allocated(isltyp_p) ) deallocate(isltyp_p ) + if(allocated(ivgtyp_p) ) deallocate(ivgtyp_p ) + if(allocated(lai_p) ) deallocate(lai_p ) + if(allocated(lh_p) ) deallocate(lh_p ) + if(allocated(noahres_p) ) deallocate(noahres_p ) + if(allocated(potevp_p) ) deallocate(potevp_p ) + if(allocated(qfx_p) ) deallocate(qfx_p ) + if(allocated(qgh_p) ) deallocate(qgh_p ) + if(allocated(qsfc_p) ) deallocate(qsfc_p ) + if(allocated(qz0_p) ) deallocate(qz0_p ) + if(allocated(rainbl_p) ) deallocate(rainbl_p ) + if(allocated(br_p) ) deallocate(br_p ) + if(allocated(sfc_albbck_p)) deallocate(sfc_albbck_p) + if(allocated(sfc_albedo_p)) deallocate(sfc_albedo_p) + if(allocated(sfc_emibck_p)) deallocate(sfc_emibck_p) + if(allocated(sfc_emiss_p) ) deallocate(sfc_emiss_p ) + if(allocated(sfcrunoff_p) ) deallocate(sfcrunoff_p ) + if(allocated(shdmin_p) ) deallocate(shdmin_p ) + if(allocated(shdmax_p) ) deallocate(shdmax_p ) + if(allocated(smstav_p) ) deallocate(smstav_p ) + if(allocated(smstot_p) ) deallocate(smstot_p ) + if(allocated(snoalb_p) ) deallocate(snoalb_p ) + if(allocated(snotime_p) ) deallocate(snotime_p ) + if(allocated(snopcx_p) ) deallocate(snopcx_p ) + if(allocated(snow_p) ) deallocate(snow_p ) + if(allocated(snowc_p) ) deallocate(snowc_p ) + if(allocated(snowh_p) ) deallocate(snowh_p ) + if(allocated(sr_p) ) deallocate(sr_p ) + if(allocated(swdown_p) ) deallocate(swdown_p ) + if(allocated(tmn_p) ) deallocate(tmn_p ) + if(allocated(tsk_p) ) deallocate(tsk_p ) + if(allocated(udrunoff_p) ) deallocate(udrunoff_p ) + if(allocated(vegfra_p) ) deallocate(vegfra_p ) + if(allocated(xice_p) ) deallocate(xice_p ) + if(allocated(xland_p) ) deallocate(xland_p ) + if(allocated(z0_p) ) deallocate(z0_p ) + if(allocated(znt_p) ) deallocate(znt_p ) + if(allocated(t2m_p) ) deallocate(t2m_p ) + if(allocated(th2m_p) ) deallocate(th2m_p ) + if(allocated(q2_p) ) deallocate(q2_p ) + + if(config_frac_seaice) then + if(allocated(chs_sea) ) deallocate(chs_sea ) + if(allocated(chs2_sea)) deallocate(chs2_sea) + if(allocated(cqs2_sea)) deallocate(cqs2_sea) + if(allocated(cpm_sea) ) deallocate(cpm_sea ) + if(allocated(hfx_sea) ) deallocate(hfx_sea ) + if(allocated(qfx_sea) ) deallocate(qfx_sea ) + if(allocated(qgh_sea) ) deallocate(qgh_sea ) + if(allocated(qsfc_sea)) deallocate(qsfc_sea) + if(allocated(lh_sea) ) deallocate(lh_sea ) + if(allocated(tsk_sea) ) deallocate(tsk_sea ) + if(allocated(tsk_ice) ) deallocate(tsk_ice ) + endif end subroutine deallocate_lsm -!================================================================================================== - subroutine lsm_from_MPAS(mesh,diag_physics,sfc_input) -!================================================================================================== +!================================================================================================================= + subroutine lsm_from_MPAS(configs,mesh,diag_physics,sfc_input,its,ite) +!================================================================================================================= !input arguments: + type(mpas_pool_type),intent(in):: configs type(mpas_pool_type),intent(in):: mesh type(mpas_pool_type),intent(inout):: diag_physics type(mpas_pool_type),intent(inout):: sfc_input + integer,intent(in):: its,ite !local pointers: + logical,pointer:: config_frac_seaice + + character(len=StrKIND),pointer:: config_microp_scheme, & + config_convection_scheme + integer,dimension(:),pointer:: isltyp,ivgtyp real(kind=RKIND),dimension(:),pointer :: acsnom,acsnow,canwat,chs,chs2,chklowq,cpm,cqs2,glw, & @@ -233,14 +280,20 @@ subroutine lsm_from_MPAS(mesh,diag_physics,sfc_input) smstav,smstot,snotime,snopcx,sr,udrunoff,z0,znt real(kind=RKIND),dimension(:),pointer :: shdmin,shdmax,snoalb,sfc_albbck,snow,snowc,snowh,tmn, & skintemp,vegfra,xice,xland + real(kind=RKIND),dimension(:),pointer :: t2m,th2m,q2 real(kind=RKIND),dimension(:),pointer :: raincv,rainncv real(kind=RKIND),dimension(:,:),pointer:: sh2o,smcrel,smois,tslb,dzs !local variables and arrays: logical:: do_fill integer:: ip,iEdg + integer:: i,j,n -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- + + call mpas_pool_get_config(configs,'config_frac_seaice' ,config_frac_seaice ) + call mpas_pool_get_config(configs,'config_convection_scheme',config_convection_scheme) + call mpas_pool_get_config(configs,'config_microp_scheme' ,config_microp_scheme ) call mpas_pool_get_array(diag_physics,'acsnom' ,acsnom ) call mpas_pool_get_array(diag_physics,'acsnow' ,acsnow ) @@ -262,8 +315,6 @@ subroutine lsm_from_MPAS(mesh,diag_physics,sfc_input) call mpas_pool_get_array(diag_physics,'qgh' ,qgh ) call mpas_pool_get_array(diag_physics,'qsfc' ,qsfc ) call mpas_pool_get_array(diag_physics,'br' ,br ) - call mpas_pool_get_array(diag_physics,'raincv' ,raincv ) - call mpas_pool_get_array(diag_physics,'rainncv' ,rainncv ) call mpas_pool_get_array(diag_physics,'sfc_albedo',sfc_albedo) call mpas_pool_get_array(diag_physics,'sfc_emibck',sfc_emibck) call mpas_pool_get_array(diag_physics,'sfc_emiss' ,sfc_emiss ) @@ -272,10 +323,12 @@ subroutine lsm_from_MPAS(mesh,diag_physics,sfc_input) call mpas_pool_get_array(diag_physics,'smstot' ,smstot ) call mpas_pool_get_array(diag_physics,'snotime' ,snotime ) call mpas_pool_get_array(diag_physics,'snopcx' ,snopcx ) - call mpas_pool_get_array(diag_physics,'sr' ,sr ) call mpas_pool_get_array(diag_physics,'udrunoff' ,udrunoff ) call mpas_pool_get_array(diag_physics,'z0' ,z0 ) call mpas_pool_get_array(diag_physics,'znt' ,znt ) + call mpas_pool_get_array(diag_physics,'t2m' ,t2m ) + call mpas_pool_get_array(diag_physics,'th2m' ,th2m ) + call mpas_pool_get_array(diag_physics,'q2' ,q2 ) call mpas_pool_get_array(sfc_input,'isltyp' ,isltyp ) call mpas_pool_get_array(sfc_input,'ivgtyp' ,ivgtyp ) @@ -344,10 +397,12 @@ subroutine lsm_from_MPAS(mesh,diag_physics,sfc_input) smstot_p(i,j) = smstot(i) snotime_p(i,j) = snotime(i) snopcx_p(i,j) = snopcx(i) - sr_p(i,j) = sr(i) udrunoff_p(i,j) = udrunoff(i) z0_p(i,j) = z0(i) znt_p(i,j) = znt(i) + t2m_p(i,j) = t2m(i) + th2m_p(i,j) = th2m(i) + q2_p(i,j) = q2(i) isltyp_p(i,j) = isltyp(i) ivgtyp_p(i,j) = ivgtyp(i) @@ -368,25 +423,84 @@ subroutine lsm_from_MPAS(mesh,diag_physics,sfc_input) enddo enddo +!modify the surface albedo and surface emissivity, and surface temperatures over sea-ice points: + if(config_frac_seaice) then + do j = jts,jte + do i = its,ite + if(xice(i).ge.xice_threshold .and. xice(i).le.1._RKIND) then + sfc_albedo_p(i,j) = (sfc_albedo(i) - 0.08_RKIND*(1._RKIND-xice(i))) / xice(i) + sfc_emiss_p(i,j) = (sfc_emiss(i) - 0.98_RKIND*(1._RKIND-xice(i))) / xice(i) + else + sfc_albedo_p(i,j) = sfc_albedo(i) + sfc_emiss_p(i,j) = sfc_emiss(i) + endif + enddo + enddo + + !calculate sea-surface and sea-ice temperatures over sea-ice grid cells: + call correct_tsk_over_seaice(ims,ime,jms,jme,its,ite,jts,jte,xice_threshold,xice_p, & + tsk_p,tsk_sea,tsk_ice) + + do j = jts,jte + do i = its,ite + tsk_p(i,j) = tsk_ice(i,j) + enddo + enddo + endif + + do j = jts,jte + do i = its,ite + sr_p(i,j) = 0._RKIND + rainbl_p(i,j) = 0._RKIND + enddo + enddo + + if(config_microp_scheme .ne. 'off') then + call mpas_pool_get_array(diag_physics,'sr' ,sr ) + call mpas_pool_get_array(diag_physics,'rainncv',rainncv) + + do j = jts,jte + do i = its,ite + sr_p(i,j) = sr(i) + rainbl_p(i,j) = rainbl_p(i,j) + rainncv(i) + enddo + enddo + endif + if(config_convection_scheme .ne. 'off') then + call mpas_pool_get_array(diag_physics,'raincv',raincv) + + do j = jts,jte + do i = its,ite + rainbl_p(i,j) = rainbl_p(i,j) + raincv(i) + enddo + enddo + endif + do j = jts,jte do i = its,ite - rainbl_p(i,j) = raincv(i) + rainncv(i) swdown_p(i,j) = gsw(i) / (1._RKIND - sfc_albedo(i)) enddo enddo end subroutine lsm_from_MPAS -!================================================================================================== - subroutine lsm_to_MPAS(mesh,diag_physics,sfc_input) -!================================================================================================== +!================================================================================================================= + subroutine lsm_to_MPAS(configs,mesh,diag_physics,sfc_input,its,ite) +!================================================================================================================= !input arguments: + type(mpas_pool_type),intent(in):: configs type(mpas_pool_type),intent(in):: mesh type(mpas_pool_type),intent(inout):: diag_physics type(mpas_pool_type),intent(inout):: sfc_input + integer,intent(in):: its,ite + !local pointers: + logical,pointer:: config_frac_seaice + + character(len=StrKIND),pointer:: config_microp_scheme + integer,dimension(:),pointer:: isltyp,ivgtyp real(kind=RKIND),dimension(:),pointer :: acsnom,acsnow,canwat,chs,chs2,chklowq,cpm,cqs2,glw, & @@ -395,13 +509,18 @@ subroutine lsm_to_MPAS(mesh,diag_physics,sfc_input) smstav,smstot,snotime,snopcx,sr,udrunoff,z0,znt real(kind=RKIND),dimension(:),pointer :: shdmin,shdmax,snoalb,sfc_albbck,snow,snowc,snowh,tmn, & skintemp,vegfra,xice,xland + real(kind=RKIND),dimension(:),pointer :: t2m,th2m,q2 real(kind=RKIND),dimension(:),pointer :: raincv,rainncv real(kind=RKIND),dimension(:,:),pointer:: sh2o,smcrel,smois,tslb !local variables and arrays: integer:: ip,iEdg + integer:: i,j,n + +!----------------------------------------------------------------------------------------------------------------- -!-------------------------------------------------------------------------------------------------- + call mpas_pool_get_config(configs,'config_frac_seaice' ,config_frac_seaice ) + call mpas_pool_get_config(configs,'config_microp_scheme',config_microp_scheme) call mpas_pool_get_array(diag_physics,'acsnom' ,acsnom ) call mpas_pool_get_array(diag_physics,'acsnow' ,acsnow ) @@ -433,10 +552,12 @@ subroutine lsm_to_MPAS(mesh,diag_physics,sfc_input) call mpas_pool_get_array(diag_physics,'smstot' ,smstot ) call mpas_pool_get_array(diag_physics,'snotime' ,snotime ) call mpas_pool_get_array(diag_physics,'snopcx' ,snopcx ) - call mpas_pool_get_array(diag_physics,'sr' ,sr ) call mpas_pool_get_array(diag_physics,'udrunoff' ,udrunoff ) call mpas_pool_get_array(diag_physics,'z0' ,z0 ) call mpas_pool_get_array(diag_physics,'znt' ,znt ) + call mpas_pool_get_array(diag_physics,'t2m' ,t2m ) + call mpas_pool_get_array(diag_physics,'th2m' ,th2m ) + call mpas_pool_get_array(diag_physics,'q2' ,q2 ) call mpas_pool_get_array(sfc_input,'isltyp' ,isltyp ) call mpas_pool_get_array(sfc_input,'ivgtyp' ,ivgtyp ) @@ -498,10 +619,12 @@ subroutine lsm_to_MPAS(mesh,diag_physics,sfc_input) smstot(i) = smstot_p(i,j) snotime(i) = snotime_p(i,j) snopcx(i) = snopcx_p(i,j) - sr(i) = sr_p(i,j) udrunoff(i) = udrunoff_p(i,j) z0(i) = z0_p(i,j) znt(i) = znt_p(i,j) + t2m(i) = t2m_p(i,j) + th2m(i) = th2m_p(i,j) + q2(i) = q2_p(i,j) snoalb(i) = snoalb_p(i,j) sfc_albbck(i) = sfc_albbck_p(i,j) @@ -516,11 +639,42 @@ subroutine lsm_to_MPAS(mesh,diag_physics,sfc_input) enddo enddo + if(config_frac_seaice) then + do j = jts,jte + do i = its,ite + if(xice_p(i,j).ge.xice_threshold .and. xice_p(i,j).le.1._RKIND) then + chs(i) = xice_p(i,j)*chs_p(i,j) + (1._RKIND-xice_p(i,j))*chs_sea(i,j) + chs2(i) = xice_p(i,j)*chs2_p(i,j) + (1._RKIND-xice_p(i,j))*chs2_sea(i,j) + cqs2(i) = xice_p(i,j)*cqs2_p(i,j) + (1._RKIND-xice_p(i,j))*cqs2_sea(i,j) + cpm(i) = xice_p(i,j)*cpm_p(i,j) + (1._RKIND-xice_p(i,j))*cpm_sea(i,j) + hfx(i) = xice_p(i,j)*hfx_p(i,j) + (1._RKIND-xice_p(i,j))*hfx_sea(i,j) + lh(i) = xice_p(i,j)*lh_p(i,j) + (1._RKIND-xice_p(i,j))*lh_sea(i,j) + qfx(i) = xice_p(i,j)*qfx_p(i,j) + (1._RKIND-xice_p(i,j))*qfx_sea(i,j) + qgh(i) = xice_p(i,j)*qgh_p(i,j) + (1._RKIND-xice_p(i,j))*qgh_sea(i,j) + qsfc(i) = xice_p(i,j)*qsfc_p(i,j) + (1._RKIND-xice_p(i,j))*qsfc_sea(i,j) + skintemp(i) = xice_p(i,j)*tsk_p(i,j) + (1._RKIND-xice_p(i,j))*tsk_sea(i,j) + sfc_albedo(i) = xice_p(i,j)*sfc_albedo_p(i,j) + (1._RKIND-xice_p(i,j))*0.08_RKIND + sfc_emiss(i) = xice_p(i,j)*sfc_emiss_p(i,j) + (1._RKIND-xice_p(i,j))*0.98_RKIND + endif + enddo + enddo + endif + + if(config_microp_scheme .ne. 'off') then + call mpas_pool_get_array(diag_physics,'sr',sr) + + do j = jts,jte + do i = its,ite + sr(i) = sr_p(i,j) + enddo + enddo + endif + end subroutine lsm_to_MPAS -!================================================================================================== +!================================================================================================================= subroutine init_lsm(dminfo,mesh,configs,diag_physics,sfc_input) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info),intent(in):: dminfo @@ -531,34 +685,30 @@ subroutine init_lsm(dminfo,mesh,configs,diag_physics,sfc_input) type(mpas_pool_type),intent(inout):: diag_physics type(mpas_pool_type),intent(inout):: sfc_input -!-------------------------------------------------------------------------------------------------- -! write(0,*) -! write(0,*) '--- enter land surface model initialization:' +!----------------------------------------------------------------------------------------------------------------- lsm_select: select case (trim(lsm_scheme)) case ("noah") -! write(0,*) ' enter subroutine noah_init_forMPAS:' call noah_init_forMPAS(dminfo,mesh,configs,diag_physics,sfc_input) -! write(0,*) ' end subroutine noah_init_forMPAS' case default end select lsm_select -! write(0,*) '--- end land surface model initialization' - end subroutine init_lsm -!================================================================================================== - subroutine driver_lsm(itimestep,configs,mesh,diag_physics,sfc_input) -!================================================================================================== +!================================================================================================================= + subroutine driver_lsm(itimestep,configs,mesh,diag_physics,sfc_input,its,ite) +!================================================================================================================= !input arguments: - integer,intent(in):: itimestep type(mpas_pool_type),intent(in):: mesh type(mpas_pool_type),intent(in):: configs + integer,intent(in):: its,ite + integer,intent(in):: itimestep + !inout arguments: type(mpas_pool_type),intent(inout):: diag_physics type(mpas_pool_type),intent(inout):: sfc_input @@ -567,12 +717,9 @@ subroutine driver_lsm(itimestep,configs,mesh,diag_physics,sfc_input) logical,pointer:: config_sfc_albedo character(len=StrKIND),pointer:: mminlu -!--------------------------------------------------------------------------------------------- -! write(0,*) -! write(0,*) '--- enter subroutine driver_lsm:' -! write(0,*) '--- isice = ',isice -! write(0,*) '--- iswater = ', iswater -! write(0,*) '--- isurban = ', isurban +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine driver_lsm:' call mpas_pool_get_config(configs,'config_sfc_albedo' ,config_sfc_albedo ) call mpas_pool_get_array(sfc_input,'mminlu',mminlu) @@ -582,7 +729,7 @@ subroutine driver_lsm(itimestep,configs,mesh,diag_physics,sfc_input) 102 format(3i6,8(1x,e15.8)) !copy MPAS arrays to local arrays: - call lsm_from_MPAS(mesh,diag_physics,sfc_input) + call lsm_from_MPAS(configs,mesh,diag_physics,sfc_input,its,ite) ! write(0,*) '--- end lsm_from_MPAS' !call to land-surface scheme: @@ -612,10 +759,10 @@ subroutine driver_lsm(itimestep,configs,mesh,diag_physics,sfc_input) dzs = dzs_p , isurban = isurban , isice = isice , & rovcp = rcp , dt = dt_pbl , myj = myj , & itimestep = itimestep , frpcpn = frpcpn , rdlai2d = rdlai2d , & - xice_threshold = xice_threshold , & - usemonalb = config_sfc_albedo , & + xice_threshold = xice_threshold , & + usemonalb = config_sfc_albedo , & mminlu = mminlu , & - num_soil_layers = num_soils , & + num_soil_layers = num_soils , & num_roof_layers = num_soils , & num_wall_layers = num_soils , & num_road_layers = num_soils , & @@ -626,16 +773,66 @@ subroutine driver_lsm(itimestep,configs,mesh,diag_physics,sfc_input) its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & ) + call sfcdiags( & + hfx = hfx_p , qfx = qfx_p , tsk = tsk_p , qsfc = qsfc_p , chs = chs_p , & + chs2 = chs2_p , cqs2 = cqs2_p , t2 = t2m_p , th2 = th2m_p , q2 = q2_p , & + psfc = psfc_p , t3d = t_p , qv3d = qv_p , cp = cp , R_d = R_d , & + rovcp = rcp , ua_phys = ua_phys , & + ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde , & + ims = ims , ime = ime , jms = jms , jme = jme , kms = kms , kme = kme , & + its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & + ) + + case default end select lsm_select !copy local arrays to MPAS grid: - call lsm_to_MPAS(mesh,diag_physics,sfc_input) -! write(0,*) '--- end subroutine driver_lsm' + call lsm_to_MPAS(configs,mesh,diag_physics,sfc_input,its,ite) + +!write(0,*) '--- end subroutine driver_lsm.' end subroutine driver_lsm -!================================================================================================== +!================================================================================================================= + subroutine correct_tsk_over_seaice(ims,ime,jms,jme,its,ite,jts,jte,xice_thresh,xice,tsk,tsk_sea,tsk_ice) +!================================================================================================================= + +!input arguments: + integer,intent(in):: ims,ime,its,ite,jms,jme,jts,jte + real(kind=RKIND),intent(in):: xice_thresh + real(kind=RKIND),intent(in),dimension(ims:ime,jms:jme):: tsk,xice + +!inout arguments: + real(kind=RKIND),intent(inout),dimension(ims:ime,jms:jme):: tsk_sea,tsk_ice + +!local variables: + integer:: i,j + +!----------------------------------------------------------------------------------------------------------------- + +!initialize the local sea-surface temperature and local sea-ice temperature to the local surface +!temperature: + do j = jts,jte + do i = its,ite + tsk_sea(i,j) = tsk(i,j) + tsk_ice(i,j) = tsk(i,j) + + if(xice(i,j).ge.xice_thresh .and. xice(i,j).le.1._RKIND) then + !over sea-ice grid cells, limit sea-surface temperatures to temperatures warmer than 271.4: + tsk_sea(i,j) = max(tsk_sea(i,j),271.4_RKIND) + + !over sea-ice grid cells, avoids unphysically too cold sea-ice temperatures for grid cells + !with small sea-ice fractions: + if(xice(i,j).lt.0.2_RKIND .and. tsk_ice(i,j).lt.253.15_RKIND) tsk_ice(i,j) = 253.15_RKIND + if(xice(i,j).lt.0.1_RKIND .and. tsk_ice(i,j).lt.263.15_RKIND) tsk_ice(i,j) = 263.15_RKIND + endif + enddo + enddo + + end subroutine correct_tsk_over_seaice + +!================================================================================================================= end module mpas_atmphys_driver_lsm -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_driver_microphysics.F b/src/core_atmosphere/physics/mpas_atmphys_driver_microphysics.F index 74a9be0df1..4c4ad1fa77 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_driver_microphysics.F +++ b/src/core_atmosphere/physics/mpas_atmphys_driver_microphysics.F @@ -5,113 +5,147 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_driver_microphysics use mpas_kind_types use mpas_derived_types use mpas_pool_routines use mpas_atmphys_constants - use mpas_atmphys_vars + use mpas_atmphys_init_microphysics use mpas_atmphys_interface + use mpas_atmphys_vars !wrf physics: use module_mp_kessler + use module_mp_thompson use module_mp_wsm6 implicit none private public:: allocate_microphysics, & deallocate_microphysics, & - microphysics_driver , & + driver_microphysics, & microphysics_init -!>\brief MPAS driver for parameterization of cloud microphysics processes. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_driver_microphysics: -!> ------------------------------------------------ -!> allocate_microphysics : allocate local arrays for parameterization of cloud microphysics. -!> deallocate_microphysics: deallocate local arrays for parameterization of cloud microphysics. -!> microphysics_init : initialization of individual cloud microphysics schemes. -!> microphysics_driver : main driver (called from mpas_atm_time_integration). -!> precip_from_MPAS : initialize timestep local arrays for precipitation. -!> precip_to_MPAS : copy local arrays to MPAS arrays. -!> compute_radar_reflectivity: compute radar reflectivities. -!> -!> WRF physics called from microphysics_driver: -!> -------------------------------------------- -!> * module_mp_kessler : Kessler cloud microphysics. -!> * module_mp_wsm6 : WSM6 cloud microphysics. -!> -!> comments: -!> unlike all the other physics parameterizations, parameterizations of cloud microphysics schemes -!> are called at the bottom of subroutine atm_srk3 in module atm_time_integration. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * removed call to the Thompson cloud microphysics scheme until the scheme is updated to that -!> in WRF revision 3.5. -!> Laura D. Fowler (laura@ucar.edu) / 2013-05-29. -!> * added subroutine compute_relhum to calculate the relative humidity using the functions -!> rslf and rsif from the Thompson cloud microphysics scheme. -!> Laura D. Fowler (laura@ucar.edu) / 2013-07-12. -!> * removed the argument tend from the call to microphysics_from_MPAS (not needed). -!> Laura D. Fowler (laura@ucar.edu) / 2013-11-07. -!> * in call to subroutine wsm6, replaced the variable g (that originally pointed to gravity) -!> with gravity, for simplicity. -!> Laura D. Fowler (laura@ucar.edu) / 2014-03-21. -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * Modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. -!> * Moved the variable relhum from the diag_physics to the diag pool. Changed the argument -!> list for the subroutine compute_relhum accordingly. -!> Laura D. Fowler (laura@ucar.edu) / 2015-04-22. +!MPAS driver for parameterization of cloud microphysics processes. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_driver_microphysics: +! ------------------------------------------------ +! allocate_microphysics : allocate local arrays for parameterization of cloud microphysics. +! deallocate_microphysics : deallocate local arrays for parameterization of cloud microphysics. +! microphysics_init : initialization of individual cloud microphysics schemes. +! driver_microphysics : main driver (called from mpas_atm_time_integration). +! precip_from_MPAS : initialize timestep local arrays for precipitation. +! precip_to_MPAS : copy local arrays to MPAS arrays. +! compute_radar_reflectivity: compute radar reflectivities. +! compute_relhum : compute relative humidity. +! +! WRF physics called from microphysics_driver: +! -------------------------------------------- +! * module_mp_kessler : Kessler cloud microphysics. +! * module_mp_thompson: Thompson cloud microphysics. +! * module_mp_wsm6 : WSM6 cloud microphysics. +! +! comments: +! unlike all the other physics parameterizations, parameterizations of cloud microphysics schemes +! are called at the bottom of subroutine atm_srk3 in module atm_time_integration. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * removed call to the Thompson cloud microphysics scheme until scheme is updated to that in WRF revision 3.5. +! Laura D. Fowler (laura@ucar.edu) / 2013-05-29. +! * added subroutine compute_relhum to calculate the relative humidity using the functions rslf and rsif from +! the Thompson cloud microphysics scheme. +! Laura D. Fowler (laura@ucar.edu) / 2013-07-12. +! * removed the argument tend from the call to microphysics_from_MPAS (not needed). +! Laura D. Fowler (laura@ucar.edu) / 2013-11-07. +! * in call to subroutine wsm6, replaced the variable g (that originally pointed to gravity) with gravity, +! for simplicity. +! Laura D. Fowler (laura@ucar.edu) / 2014-03-21. +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * moved the variable relhum from the diag_physics to the diag pool. Changed the argument +! list for the subroutine compute_relhum accordingly. +! Laura D. Fowler (laura@ucar.edu) / 2015-04-22. +! * added parameterization of the Thompson cloud microphysics from WRF version 3.8. +! Laura D. Fowler (laura@ucar.edu) / 2016-03-28. +! * in subroutine compute_relhum, multiply relhum by 100. so that it has the same unit as in the initial +! conditions. +! Laura D. Fowler (laura@ucar.edu) / 2016-06-20. +! * added parameterization of the WSM6 cloud microphysics from WRF version 3.8.1. To initialize WSM6 as in its +! original version, set the hail_option to 0. +! Laura D. Fowler (laura@ucar.edu) / 2016-09-19. + +!--- initialization option for WSM6 from WRF version 3.8.1. this option could also be set as a namelist parameter. + integer,parameter:: hail_opt = 0 contains -!================================================================================================== +!================================================================================================================= subroutine allocate_microphysics -!================================================================================================== +!================================================================================================================= !sounding variables: - if(.not.allocated(rho_p) ) allocate(rho_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(th_p) ) allocate(th_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(pi_p) ) allocate(pi_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(pres_p) ) allocate(pres_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(z_p) ) allocate(z_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(dz_p) ) allocate(dz_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(rho_p) ) allocate(rho_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(th_p) ) allocate(th_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(pi_p) ) allocate(pi_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(pres_p) ) allocate(pres_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(z_p) ) allocate(z_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(dz_p) ) allocate(dz_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(w_p) ) allocate(w_p(ims:ime,kms:kme,jms:jme) ) !mass mixing ratios: - if(.not.allocated(qv_p) ) allocate(qv_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(qc_p) ) allocate(qc_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(qr_p) ) allocate(qr_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(qv_p) ) allocate(qv_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(qc_p) ) allocate(qc_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(qr_p) ) allocate(qr_p(ims:ime,kms:kme,jms:jme)) !surface precipitation: - if(.not.allocated(rainnc_p) ) allocate(rainnc_p(ims:ime,jms:jme) ) - if(.not.allocated(rainncv_p) ) allocate(rainncv_p(ims:ime,jms:jme) ) + if(.not.allocated(rainnc_p) ) allocate(rainnc_p(ims:ime,jms:jme) ) + if(.not.allocated(rainncv_p)) allocate(rainncv_p(ims:ime,jms:jme)) microp_select: select case(microp_scheme) - case ("wsm6") + case ("mp_thompson","mp_wsm6") !mass mixing ratios: - if(.not.allocated(qi_p) ) allocate(qi_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(qs_p) ) allocate(qs_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(qg_p) ) allocate(qg_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(qi_p) ) allocate(qi_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(qs_p) ) allocate(qs_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(qg_p) ) allocate(qg_p(ims:ime,kms:kme,jms:jme)) !surface precipitation: - if(.not.allocated(sr_p) ) allocate(sr_p(ims:ime,jms:jme) ) - if(.not.allocated(snownc_p) ) allocate(snownc_p(ims:ime,jms:jme) ) - if(.not.allocated(snowncv_p) ) allocate(snowncv_p(ims:ime,jms:jme) ) - if(.not.allocated(graupelnc_p) ) allocate(graupelnc_p(ims:ime,jms:jme) ) - if(.not.allocated(graupelncv_p) ) allocate(graupelncv_p(ims:ime,jms:jme) ) + if(.not.allocated(sr_p) ) allocate(sr_p(ims:ime,jms:jme) ) + if(.not.allocated(snownc_p) ) allocate(snownc_p(ims:ime,jms:jme) ) + if(.not.allocated(snowncv_p) ) allocate(snowncv_p(ims:ime,jms:jme) ) + if(.not.allocated(graupelnc_p) ) allocate(graupelnc_p(ims:ime,jms:jme) ) + if(.not.allocated(graupelncv_p)) allocate(graupelncv_p(ims:ime,jms:jme)) + + !cloud water,cloud ice,and snow effective radii: + if(.not.allocated(recloud_p) ) allocate(recloud_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(reice_p) ) allocate(reice_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(resnow_p) ) allocate(resnow_p(ims:ime,kms:kme,jms:jme) ) + + microp2_select: select case(microp_scheme) + + case("mp_thompson") + !number concentrations: + if(.not.allocated(ntc_p)) allocate(ntc_p(ims:ime,jms:jme)) + if(.not.allocated(muc_p)) allocate(muc_p(ims:ime,jms:jme)) + if(.not.allocated(ni_p) ) allocate(ni_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(nr_p) ) allocate(nr_p(ims:ime,kms:kme,jms:jme)) + + if(.not.allocated(rainprod_p)) allocate(rainprod_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(evapprod_p)) allocate(evapprod_p(ims:ime,kms:kme,jms:jme)) + + case default + + end select microp2_select case default @@ -119,9 +153,9 @@ subroutine allocate_microphysics end subroutine allocate_microphysics -!================================================================================================== +!================================================================================================================= subroutine deallocate_microphysics -!================================================================================================== +!================================================================================================================= !sounding variables: if(allocated(rho_p) ) deallocate(rho_p ) @@ -130,6 +164,7 @@ subroutine deallocate_microphysics if(allocated(pres_p) ) deallocate(pres_p ) if(allocated(z_p) ) deallocate(z_p ) if(allocated(dz_p) ) deallocate(dz_p ) + if(allocated(w_p) ) deallocate(w_p ) !mass mixing ratios: if(allocated(qv_p) ) deallocate(qv_p ) @@ -142,7 +177,7 @@ subroutine deallocate_microphysics microp_select: select case(microp_scheme) - case ("wsm6") + case ("mp_thompson","mp_wsm6") !mass mixing ratios: if(allocated(qi_p) ) deallocate(qi_p ) if(allocated(qs_p) ) deallocate(qs_p ) @@ -155,36 +190,65 @@ subroutine deallocate_microphysics if(allocated(graupelnc_p) ) deallocate(graupelnc_p ) if(allocated(graupelncv_p) ) deallocate(graupelncv_p ) + !cloud water,cloud ice,and snow effective radii: + if(.not.allocated(recloud_p) ) allocate(recloud_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(reice_p) ) allocate(reice_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(resnow_p) ) allocate(resnow_p(ims:ime,kms:kme,jms:jme) ) + + microp2_select: select case(microp_scheme) + + case("mp_thompson") + !number concentrations: + if(allocated(ntc_p)) deallocate(ntc_p) + if(allocated(muc_p)) deallocate(muc_p) + if(allocated(ni_p) ) deallocate(ni_p ) + if(allocated(nr_p) ) deallocate(nr_p ) + + if(allocated(rainprod_p)) deallocate(rainprod_p) + if(allocated(evapprod_p)) deallocate(evapprod_p) + + case default + + end select microp2_select + case default end select microp_select end subroutine deallocate_microphysics -!================================================================================================== - subroutine microphysics_init -!================================================================================================== -! write(0,*) -! write(0,*) '--- enter cloud microphysics initialization:' +!================================================================================================================= + subroutine microphysics_init(dminfo,mesh,sfc_input,diag_physics) +!================================================================================================================= - microp_select: select case(microp_scheme) +!input arguments: + type(dm_info),intent(in):: dminfo + type(mpas_pool_type),intent(in):: mesh + type(mpas_pool_type),intent(in):: sfc_input - case("wsm6") -! write(0,*) '--- enter subroutine wsm6init:' - call wsm6init(rho_a,rho_r,rho_s,cliq,cpv,.false.) -! write(0,*) '--- end subroutine wsm6init' +!inout arguments: + type(mpas_pool_type),intent(inout):: diag_physics - case default +!----------------------------------------------------------------------------------------------------------------- - end select microp_select + microp_select: select case(microp_scheme) + + case("mp_thompson") + call thompson_init(l_mp_tables) + call init_thompson_clouddroplets_forMPAS(mesh,sfc_input,diag_physics) -! write(0,*) '--- end cloud microphysics initialization' + case("mp_wsm6") + call wsm6init(rho_a,rho_r,rho_s,cliq,cpv,hail_opt,.false.) + + case default + + end select microp_select end subroutine microphysics_init -!================================================================================================== - subroutine microphysics_driver(configs,mesh,state,time_lev,diag,diag_physics,tend,itimestep) -!================================================================================================== +!================================================================================================================= + subroutine driver_microphysics(configs,mesh,state,time_lev,diag,diag_physics,tend,itimestep,its,ite) +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(in):: configs @@ -192,6 +256,7 @@ subroutine microphysics_driver(configs,mesh,state,time_lev,diag,diag_physics,ten integer,intent(in):: time_lev integer,intent(in):: itimestep + integer,intent(in):: its,ite !inout arguments: type(mpas_pool_type),intent(inout):: state @@ -203,49 +268,27 @@ subroutine microphysics_driver(configs,mesh,state,time_lev,diag,diag_physics,ten logical:: log_microphysics integer:: i,icell,icount,istep,j,k,kk -!================================================================================================== -! write(0,*) -! write(0,*) '--- enter subroutine microphysics_driver: itimestep=', itimestep -! write(0,*) ' dt_microp=',dt_microp -! write(0,*) ' n_microp =',n_microp - -!initialization: -! write(0,*) ' ims= ',ims,' ime=',ime -! write(0,*) ' jms= ',jms,' jme=',jme -! write(0,*) ' kms= ',kms,' kme=',kme -! write(0,*) -! write(0,*) ' ids= ',ids,' ide=',ide -! write(0,*) ' jds= ',jds,' jde=',jde -! write(0,*) ' kds= ',kds,' kde=',kde -! write(0,*) -! write(0,*) ' its= ',its,' ite=',ite -! write(0,*) ' jts= ',jts,' jte=',jte -! write(0,*) ' kts= ',kts,' kte=',kte +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '---enter subroutine driver_microphysics:' !... allocation of microphysics arrays: +!$OMP MASTER call allocate_microphysics +!$OMP END MASTER +!$OMP BARRIER !... initialization of precipitation related arrays: - call precip_from_MPAS(diag_physics,mesh) + call precip_from_MPAS(diag_physics,its,ite) !... initialization of soundings for non-hydrostatic dynamical cores. - call microphysics_from_MPAS(mesh,state,time_lev,diag) - -!-------------------------------------------------------------------------------------------------- + call microphysics_from_MPAS(mesh,state,time_lev,diag,diag_physics,its,ite) !... call to different cloud microphysics schemes: - -!-------------------------------------------------------------------------------------------------- - - istep = 1 - - do while (istep .le. n_microp) - - microp_select: select case(microp_scheme) - - case ("kessler") - - call kessler( & + microp_select: select case(microp_scheme) + + case ("mp_kessler") + call kessler( & t = th_p , qv = qv_p , qc = qc_p , & qr = qr_p , rho = rho_p , pii = pi_p , & dt_in = dt_microp , z = z_p , xlv = xlv , & @@ -257,78 +300,97 @@ subroutine microphysics_driver(configs,mesh,state,time_lev,diag,diag_physics,ten ims = ims , ime = ime , jms = jms , jme = jme , kms = kms , kme = kme , & its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & ) -! write(0,*) '--- end kessler:',istep - - case ("wsm6") - call wsm6( & - th = th_p , q = qv_p , qc = qc_p , & - qr = qr_p , qi = qi_p , qs = qs_p , & - qg = qg_p , den = rho_p , pii = pi_p , & - p = pres_p , delz = dz_p , delt = dt_microp , & - g = gravity , cpd = cp , cpv = cpv , & - rd = R_d , rv = R_v , t0c = svpt0 , & - ep1 = ep_1 , ep2 = ep_2 , qmin = epsilon , & - xls = xls , xlv0 = xlv , xlf0 = xlf , & - den0 = rho_a , denr = rho_w , cliq = cliq , & - cice = cice , psat = psat , rain = rainnc_p , & - rainncv = rainncv_p , snow = snownc_p , snowncv = snowncv_p , & - graupel = graupelnc_p , graupelncv = graupelncv_p , sr = sr_p , & - ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde , & - ims = ims , ime = ime , jms = jms , jme = jme , kms = kms , kme = kme , & - its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & - ) -! write(0,*) '--- end wsm6:',istep - case default + case ("mp_thompson") + istep = 1 + do while (istep .le. n_microp) + call mp_gt_driver( & + th = th_p , qv = qv_p , qc = qc_p , & + qr = qr_p , qi = qi_p , qs = qs_p , & + qg = qg_p , ni = ni_p , nr = nr_p , & + pii = pi_p , p = pres_p , dz = dz_p , & + w = w_p , dt_in = dt_microp , itimestep = itimestep , & + rainnc = rainnc_p , rainncv = rainncv_p , snownc = snownc_p , & + snowncv = snowncv_p , graupelnc = graupelnc_p , graupelncv = graupelncv_p , & + sr = sr_p , rainprod = rainprod_p , evapprod = evapprod_p , & + re_cloud = recloud_p , re_ice = reice_p , re_snow = resnow_p , & + has_reqc = has_reqc , has_reqi = has_reqi , has_reqs = has_reqs , & + ntc = ntc_p , muc = muc_p , & + ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde , & + ims = ims , ime = ime , jms = jms , jme = jme , kms = kms , kme = kme , & + its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & + ) + istep = istep + 1 + enddo + + case ("mp_wsm6") + call wsm6( & + th = th_p , q = qv_p , qc = qc_p , & + qr = qr_p , qi = qi_p , qs = qs_p , & + qg = qg_p , den = rho_p , pii = pi_p , & + p = pres_p , delz = dz_p , delt = dt_microp , & + g = gravity , cpd = cp , cpv = cpv , & + rd = R_d , rv = R_v , t0c = svpt0 , & + ep1 = ep_1 , ep2 = ep_2 , qmin = epsilon , & + xls = xls , xlv0 = xlv , xlf0 = xlf , & + den0 = rho_a , denr = rho_w , cliq = cliq , & + cice = cice , psat = psat , rain = rainnc_p , & + rainncv = rainncv_p , snow = snownc_p , snowncv = snowncv_p , & + graupel = graupelnc_p , graupelncv = graupelncv_p , sr = sr_p , & + re_cloud = recloud_p , re_ice = reice_p , re_snow = resnow_p , & + has_reqc = has_reqc , has_reqi = has_reqi , has_reqs = has_reqs , & + ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde , & + ims = ims , ime = ime , jms = jms , jme = jme , kms = kms , kme = kme , & + its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & + ) + + case default - end select microp_select - - istep = istep + 1 - - end do + end select microp_select !... calculate the 10cm radar reflectivity and relative humidity, if needed: if (l_diags) then - ! Ensure that we only call compute_radar_reflectivity() if we are using an MPS that - ! supports the computation of simulated radar reflectivity - if (trim(microp_scheme) == "wsm6") then - call compute_radar_reflectivity(diag_physics) + !ensure that we only call compute_radar_reflectivity() if we are using an MPS that supports + !the computation of simulated radar reflectivity: + if(trim(microp_scheme) == "mp_wsm6" .or. & + trim(microp_scheme) == "mp_thompson") then + call compute_radar_reflectivity(diag_physics,its,ite) else write(0,*) '*** NOTICE: NOT computing simulated radar reflectivity' - write(0,*) ' since WSM6 microphysics scheme was not selected' + write(0,*) ' since WSM6 or Thompson microphysics scheme was not selected' end if - ! calculate the relative humidity over water if the temperature is strictly greater than 0.C, - ! over ice otherwise. - call compute_relhum(mesh,diag) + !calculate the relative humidity over water if the temperature is strictly greater than 0.C, + !over ice otherwise. + call compute_relhum(diag,its,ite) end if !... copy updated precipitation from the wrf-physics grid back to the geodesic-dynamics grid: - call precip_to_MPAS(configs,mesh,diag_physics) + call precip_to_MPAS(configs,diag_physics,its,ite) !... copy updated cloud microphysics variables from the wrf-physics grid back to the geodesic- ! dynamics grid: - call microphysics_to_MPAS(mesh,state,time_lev,diag,tend,itimestep) + call microphysics_to_MPAS(mesh,state,time_lev,diag,diag_physics,tend,itimestep,its,ite) !... deallocation of all microphysics arrays: +!$OMP BARRIER +!$OMP MASTER call deallocate_microphysics +!$OMP END MASTER -!formats: - 200 format(i3,i6,10(1x,e15.8)) - 201 format(3i6,10(1x,e15.8)) - 203 format('microphysics begins:',3i6,2(1x,f6.1)) - 204 format('microphysics precip:',3i6,8(1x,e15.8)) +!write(0,*) '---enter subroutine driver_microphysics:' +!write(0,*) - end subroutine microphysics_driver + end subroutine driver_microphysics -!================================================================================================== - subroutine precip_from_MPAS(diag_physics,mesh) -!================================================================================================== +!================================================================================================================= + subroutine precip_from_MPAS(diag_physics,its,ite) +!================================================================================================================= !input variables: - type(mpas_pool_type) ,intent(in):: mesh + integer,intent(in):: its,ite !output variables: type(mpas_pool_type),intent(inout):: diag_physics @@ -338,11 +400,9 @@ subroutine precip_from_MPAS(diag_physics,mesh) real,dimension(:),pointer:: graupelncv,rainncv,snowncv,sr !local variables and arrays: - integer:: i,iCell,j + integer:: i,j -!-------------------------------------------------------------------------------------------------- - - call mpas_pool_get_dimension(mesh,'nCellsSolve',nCellsSolve) +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_array(diag_physics,'graupelncv',graupelncv) call mpas_pool_get_array(diag_physics,'rainncv' ,rainncv ) @@ -357,16 +417,14 @@ subroutine precip_from_MPAS(diag_physics,mesh) enddo enddo - do iCell = 1, nCellsSolve - rainncv(iCell) = 0._RKIND + do i = its,ite + rainncv(i) = 0._RKIND enddo !variables specific to different cloud microphysics schemes: - microp_select_init: select case(microp_scheme) - case ("wsm6") - + case ("mp_thompson","mp_wsm6") do j = jts, jte do i = its, ite snowncv_p(i,j) = 0._RKIND @@ -377,10 +435,10 @@ subroutine precip_from_MPAS(diag_physics,mesh) enddo enddo - do iCell = 1, nCellsSolve - snowncv(iCell) = 0._RKIND - graupelncv(iCell) = 0._RKIND - sr(iCell) = 0._RKIND + do i = its,ite + snowncv(i) = 0._RKIND + graupelncv(i) = 0._RKIND + sr(i) = 0._RKIND enddo case default @@ -389,19 +447,19 @@ subroutine precip_from_MPAS(diag_physics,mesh) end subroutine precip_from_MPAS -!================================================================================================== - subroutine precip_to_MPAS(configs,mesh,diag_physics) -!================================================================================================== +!================================================================================================================= + subroutine precip_to_MPAS(configs,diag_physics,its,ite) +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(in):: configs - type(mpas_pool_type),intent(in):: mesh + + integer,intent(in):: its,ite !inout arguments: type(mpas_pool_type),intent(inout):: diag_physics !local pointers: - integer,pointer:: nCellsSolve integer,dimension(:),pointer:: i_rainnc real(kind=RKIND),pointer:: config_bucket_rainnc @@ -413,12 +471,10 @@ subroutine precip_to_MPAS(configs,mesh,diag_physics) integer:: i,j,k real(kind=RKIND):: rho_a -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_config(configs,'config_bucket_rainnc',config_bucket_rainnc) - call mpas_pool_get_dimension(mesh,'nCellsSolve',nCellsSolve) - call mpas_pool_get_array(diag_physics,'i_rainnc' ,i_rainnc ) call mpas_pool_get_array(diag_physics,'precipw' ,precipw ) call mpas_pool_get_array(diag_physics,'graupelnc' ,graupelnc ) @@ -429,12 +485,11 @@ subroutine precip_to_MPAS(configs,mesh,diag_physics) call mpas_pool_get_array(diag_physics,'snowncv' ,snowncv ) call mpas_pool_get_array(diag_physics,'sr' ,sr ) - do i = 1, nCellsSolve + do i = its,ite precipw(i) = 0._RKIND enddo !variables common to all cloud microphysics schemes: - do j = jts,jte do i = its,ite @@ -460,14 +515,11 @@ subroutine precip_to_MPAS(configs,mesh,diag_physics) enddo !variables specific to different cloud microphysics schemes: - microp_select_init: select case(microp_scheme) - case ("wsm6") - + case ("mp_thompson","mp_wsm6") do j = jts,jte do i = its,ite - !time-step precipitation: snowncv(i) = snownc_p(i,j) graupelncv(i) = graupelnc_p(i,j) @@ -476,7 +528,6 @@ subroutine precip_to_MPAS(configs,mesh,diag_physics) !accumulated precipitation: snownc(i) = snownc(i) + snowncv(i) graupelnc(i) = graupelnc(i) + graupelncv(i) - enddo enddo @@ -486,34 +537,37 @@ subroutine precip_to_MPAS(configs,mesh,diag_physics) end subroutine precip_to_MPAS -!================================================================================================== - subroutine compute_radar_reflectivity(diag_physics) -!================================================================================================== +!================================================================================================================= + subroutine compute_radar_reflectivity(diag_physics,its,ite) +!================================================================================================================= + +!input arguments: + integer,intent(in):: its,ite !inout arguments: type(mpas_pool_type),intent(inout):: diag_physics !local pointers: - real(kind=RKIND),dimension(:),pointer:: refl10cm_max + real(kind=RKIND),dimension(:),pointer:: refl10cm_max,refl10cm_1km,refl10cm_1km_max !local variables and arrays: - integer:: i,j,k - real(kind=RKIND),dimension(:),allocatable:: qv1d,qr1d,qs1d,qg1d,t1d,p1d,dBZ1d + integer:: i,j,k,kp + real(kind=RKIND),dimension(:),allocatable:: qv1d,qc1d,qr1d,qs1d,qg1d,t1d,p1d,nr1d,dBZ1d,zp + real(kind=RKIND):: w1,w2 -!-------------------------------------------------------------------------------------------------- -!write(0,*) -!write(0,*) '--- enter subroutine COMPUTE_RADAR_REFLECTIVITY:' +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_array(diag_physics,'refl10cm_max',refl10cm_max) + call mpas_pool_get_array(diag_physics,'refl10cm_1km',refl10cm_1km) + call mpas_pool_get_array(diag_physics,'refl10cm_1km_max',refl10cm_1km_max) microp_select: select case(microp_scheme) - case ("kessler") + case ("mp_kessler") call physics_error_fatal('--- calculation of radar reflectivity is not available' // & 'with kessler cloud microphysics') - case ("wsm6") - + case ("mp_wsm6") if(.not.allocated(p1d) ) allocate(p1d(kts:kte) ) if(.not.allocated(t1d) ) allocate(t1d(kts:kte) ) if(.not.allocated(qv1d) ) allocate(qv1d(kts:kte) ) @@ -521,6 +575,7 @@ subroutine compute_radar_reflectivity(diag_physics) if(.not.allocated(qs1d) ) allocate(qs1d(kts:kte) ) if(.not.allocated(qg1d) ) allocate(qg1d(kts:kte) ) if(.not.allocated(dBz1d)) allocate(dBZ1d(kts:kte)) + if(.not.allocated(zp) ) allocate(zp(kts:kte) ) do j = jts,jte do i = its,ite @@ -532,64 +587,119 @@ subroutine compute_radar_reflectivity(diag_physics) qs1d(k) = qs_p(i,k,j) qg1d(k) = qg_p(i,k,j) dBZ1d(k) = -35._RKIND + zp(k) = z_p(i,k,j) - z_p(i,1,j)+0.5*dz_p(i,1,j) ! height AGL enddo call refl10cm_wsm6(qv1d,qr1d,qs1d,qg1d,t1d,p1d,dBZ1d,kts,kte,i,j) + kp = 1 + do k = kts,kte + dBZ1d(k) = max(-35._RKIND,dBZ1d(k)) + if(zp(k) .lt. 1000.) kp = k + enddo + refl10cm_max(i) = maxval(dBZ1d(:)) + w1 = (zp(kp+1)-1000.)/(zp(kp+1)-zp(kp)) + w2 = 1.0 - w1 + refl10cm_1km(i) = w1*dBZ1d(kp) + w2*dBZ1d(kp+1) + refl10cm_1km_max(i) = max(refl10cm_1km_max(i),refl10cm_1km(i)) + enddo + enddo + + if(allocated(p1d) ) deallocate(p1d ) + if(allocated(t1d) ) deallocate(t1d ) + if(allocated(qv1d) ) deallocate(qv1d ) + if(allocated(qr1d) ) deallocate(qr1d ) + if(allocated(qs1d) ) deallocate(qs1d ) + if(allocated(qg1d) ) deallocate(qg1d ) + if(allocated(dBz1d)) deallocate(dBZ1d) + if(allocated(zp) ) deallocate(zp ) + + case ("mp_thompson") + if(.not.allocated(p1d) ) allocate(p1d(kts:kte) ) + if(.not.allocated(t1d) ) allocate(t1d(kts:kte) ) + if(.not.allocated(qv1d) ) allocate(qv1d(kts:kte) ) + if(.not.allocated(qc1d) ) allocate(qc1d(kts:kte) ) + if(.not.allocated(qr1d) ) allocate(qr1d(kts:kte) ) + if(.not.allocated(qs1d) ) allocate(qs1d(kts:kte) ) + if(.not.allocated(qg1d) ) allocate(qg1d(kts:kte) ) + if(.not.allocated(nr1d) ) allocate(nr1d(kts:kte) ) + if(.not.allocated(dBz1d)) allocate(dBZ1d(kts:kte)) + if(.not.allocated(zp) ) allocate(zp(kts:kte) ) + + do j = jts,jte + do i = its,ite + do k = kts,kte + p1d(k) = pres_p(i,k,j) + t1d(k) = th_p(i,k,j) * pi_p(i,k,j) + qv1d(k) = qv_p(i,k,j) + qc1d(k) = qc_p(i,k,j) + qr1d(k) = qr_p(i,k,j) + qs1d(k) = qs_p(i,k,j) + qg1d(k) = qg_p(i,k,j) + nr1d(k) = nr_p(i,k,j) + dBZ1d(k) = -35._RKIND + zp(k) = z_p(i,k,j) - z_p(i,1,j)+0.5*dz_p(i,1,j) ! height AGL + enddo + + call calc_refl10cm(qv1d,qc1d,qr1d,nr1d,qs1d,qg1d,t1d,p1d,dBZ1d,kts,kte,i,j) + + kp = 1 do k = kts,kte dBZ1d(k) = max(-35._RKIND,dBZ1d(k)) -! write(0,201) i,k,dBZ1d(k) + if(zp(k) .lt. 1000.) kp = k enddo refl10cm_max(i) = maxval(dBZ1d(:)) -! if(refl10cm_max(i) .gt. 0.) write(0,201) j,i,refl10cm_max(i) + w1 = (zp(kp+1)-1000.)/(zp(kp+1)-zp(kp)) + w2 = 1.0 - w1 + refl10cm_1km(i) = w1*dBZ1d(kp) + w2*dBZ1d(kp+1) + refl10cm_1km_max(i) = max(refl10cm_1km_max(i),refl10cm_1km(i)) enddo enddo if(allocated(p1d) ) deallocate(p1d ) if(allocated(t1d) ) deallocate(t1d ) if(allocated(qv1d) ) deallocate(qv1d ) + if(allocated(qc1d) ) deallocate(qc1d ) if(allocated(qr1d) ) deallocate(qr1d ) if(allocated(qs1d) ) deallocate(qs1d ) if(allocated(qg1d) ) deallocate(qg1d ) + if(allocated(nr1d) ) deallocate(nr1d ) if(allocated(dBz1d)) deallocate(dBZ1d) + if(allocated(zp) ) deallocate(zp ) case default end select microp_select -!write(0,*) '--- end subroutine COMPUTE_RADAR_REFLECTIVITY' - - 201 format(2i6,e15.8) end subroutine compute_radar_reflectivity -!================================================================================================== - subroutine compute_relhum(mesh,diag) -!================================================================================================== +!================================================================================================================= + subroutine compute_relhum(diag,its,ite) +!================================================================================================================= !input arguments: - type(mpas_pool_type),intent(in):: mesh + integer,intent(in):: its,ite !inout arguments: type(mpas_pool_type),intent(inout):: diag !local pointers: - integer,pointer:: nCellsSolve,nVertLevels real(kind=RKIND),dimension(:,:),pointer:: relhum !local variables and arrays: integer:: i,j,k - real(kind=RKIND):: tempc + real(kind=RKIND):: tempc, rh real(kind=RKIND),dimension(:),allocatable:: qv1d,qvs1d,t1d,p1d -!-------------------------------------------------------------------------------------------------- - - call mpas_pool_get_dimension(mesh,'nCellsSolve',nCellsSolve) - call mpas_pool_get_dimension(mesh,'nVertLevels',nVertLevels) +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_array(diag,'relhum',relhum) - - relhum(1:nVertLevels,1:nCellsSolve) = 0._RKIND + do k = kts,kte + do i = its,ite + relhum(k,i) = 0._RKIND + enddo + enddo if(.not.allocated(p1d) ) allocate(p1d(kts:kte) ) if(.not.allocated(t1d) ) allocate(t1d(kts:kte) ) @@ -598,7 +708,6 @@ subroutine compute_relhum(mesh,diag) do j = jts,jte do i = its,ite - do k = kts,kte p1d(k) = pres_p(i,k,j) t1d(k) = th_p(i,k,j) * pi_p(i,k,j) @@ -607,8 +716,8 @@ subroutine compute_relhum(mesh,diag) if(tempc .le. 0._RKIND) qvs1d(k) = rsif(p1d(k),t1d(k)) qv1d(k) = qv_p(i,k,j) relhum(k,i) = qv1d(k) / qvs1d(k) + relhum(k,i) = relhum(k,i) * 100._RKIND enddo - enddo enddo @@ -619,77 +728,6 @@ subroutine compute_relhum(mesh,diag) end subroutine compute_relhum -!============================================================================================= -!NOTE: functions rslf and rsif are taken from module_mp_thompson temporarily for computing -! the diagnostic relative humidity. These two functions will be removed from this module -! when the Thompson cloud microphysics scheme will be restored to MPAS-Dev. -! Laura D. Fowler (birch.mmm.ucar.edu) / 2013-07-11. - -!+---+-----------------------------------------------------------------+ -! THIS FUNCTION CALCULATES THE LIQUID SATURATION VAPOR MIXING RATIO AS -! A FUNCTION OF TEMPERATURE AND PRESSURE -! - REAL FUNCTION RSLF(P,T) - - IMPLICIT NONE - REAL, INTENT(IN):: P, T - REAL:: ESL,X - REAL, PARAMETER:: C0= .611583699E03 - REAL, PARAMETER:: C1= .444606896E02 - REAL, PARAMETER:: C2= .143177157E01 - REAL, PARAMETER:: C3= .264224321E-1 - REAL, PARAMETER:: C4= .299291081E-3 - REAL, PARAMETER:: C5= .203154182E-5 - REAL, PARAMETER:: C6= .702620698E-8 - REAL, PARAMETER:: C7= .379534310E-11 - REAL, PARAMETER:: C8=-.321582393E-13 - - X=MAX(-80.,T-273.16) - -! ESL=612.2*EXP(17.67*X/(T-29.65)) - ESL=C0+X*(C1+X*(C2+X*(C3+X*(C4+X*(C5+X*(C6+X*(C7+X*C8))))))) - RSLF=.622*ESL/(P-ESL) - -! ALTERNATIVE -! ; Source: Murphy and Koop, Review of the vapour pressure of ice and -! supercooled water for atmospheric applications, Q. J. R. -! Meteorol. Soc (2005), 131, pp. 1539-1565. -! ESL = EXP(54.842763 - 6763.22 / T - 4.210 * ALOG(T) + 0.000367 * T -! + TANH(0.0415 * (T - 218.8)) * (53.878 - 1331.22 -! / T - 9.44523 * ALOG(T) + 0.014025 * T)) - - END FUNCTION RSLF -!+---+-----------------------------------------------------------------+ -! THIS FUNCTION CALCULATES THE ICE SATURATION VAPOR MIXING RATIO AS A -! FUNCTION OF TEMPERATURE AND PRESSURE -! - REAL FUNCTION RSIF(P,T) - - IMPLICIT NONE - REAL, INTENT(IN):: P, T - REAL:: ESI,X - REAL, PARAMETER:: C0= .609868993E03 - REAL, PARAMETER:: C1= .499320233E02 - REAL, PARAMETER:: C2= .184672631E01 - REAL, PARAMETER:: C3= .402737184E-1 - REAL, PARAMETER:: C4= .565392987E-3 - REAL, PARAMETER:: C5= .521693933E-5 - REAL, PARAMETER:: C6= .307839583E-7 - REAL, PARAMETER:: C7= .105785160E-9 - REAL, PARAMETER:: C8= .161444444E-12 - - X=MAX(-80.,T-273.16) - ESI=C0+X*(C1+X*(C2+X*(C3+X*(C4+X*(C5+X*(C6+X*(C7+X*C8))))))) - RSIF=.622*ESI/(P-ESI) - -! ALTERNATIVE -! ; Source: Murphy and Koop, Review of the vapour pressure of ice and -! supercooled water for atmospheric applications, Q. J. R. -! Meteorol. Soc (2005), 131, pp. 1539-1565. -! ESI = EXP(9.550426 - 5723.265/T + 3.53068*ALOG(T) - 0.00728332*T) - - END FUNCTION RSIF - -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_driver_microphysics -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_driver_oml.F b/src/core_atmosphere/physics/mpas_atmphys_driver_oml.F new file mode 100644 index 0000000000..bba081b200 --- /dev/null +++ b/src/core_atmosphere/physics/mpas_atmphys_driver_oml.F @@ -0,0 +1,153 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!================================================================================================================= + module mpas_atmphys_driver_oml + use mpas_kind_types + use mpas_derived_types + use mpas_pool_routines + + use mpas_atmphys_constants + use mpas_atmphys_landuse + use mpas_atmphys_lsm_noahinit + use mpas_atmphys_vars + use mpas_constants + +!wrf physics + use module_sf_oml + + implicit none + private + public :: driver_oml1d + + integer,private:: i,j,k,n + +!MPAS driver for 1d ocean mixed layer +!Bill Skamarock (send comments to skamaroc@ucar.edu). +!2014-06-11. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * added update of the sea-surface temperature below call to subroutine oml1d. +! Laura D. Fowler (laura@ucar.edu) / 2014-07-24. + + + contains + + +!================================================================================================================= + subroutine driver_oml1d(configs,mesh,diag,diag_physics,sfc_input) +!================================================================================================================= + +!input arguments: + type(mpas_pool_type),intent(in):: mesh + type(mpas_pool_type),intent(in):: configs + type(mpas_pool_type),intent(in):: diag + +!inout arguments: + type(mpas_pool_type),intent(inout):: diag_physics + type(mpas_pool_type),intent(inout):: sfc_input + + + logical,pointer:: config_oml1d + real(kind=RKIND),pointer:: oml_gamma + real(kind=RKIND),pointer:: oml_relaxation_time + + real(kind=RKIND), dimension(:), pointer:: t_oml + real(kind=RKIND), dimension(:), pointer:: t_oml_initial + real(kind=RKIND), dimension(:), pointer:: t_oml_200m_initial + real(kind=RKIND), dimension(:), pointer:: h_oml + real(kind=RKIND), dimension(:), pointer:: h_oml_initial + real(kind=RKIND), dimension(:), pointer:: hu_oml + real(kind=RKIND), dimension(:), pointer:: hv_oml + + real(kind=RKIND), dimension(:), pointer:: sst + real(kind=RKIND), dimension(:), pointer:: skintemp + real(kind=RKIND), dimension(:), pointer:: hfx + real(kind=RKIND), dimension(:), pointer:: lh + real(kind=RKIND), dimension(:), pointer:: gsw + real(kind=RKIND), dimension(:), pointer:: glw + real(kind=RKIND), dimension(:), pointer:: sfc_emiss + real(kind=RKIND), dimension(:), pointer:: ust + real(kind=RKIND), dimension(:), pointer:: xland + + real(kind=RKIND), dimension(:,:), pointer:: uReconstructZonal + real(kind=RKIND), dimension(:,:), pointer:: uReconstructmeridional + real(kind=RKIND), dimension(:) , pointer:: latCell + + integer, pointer:: nCells, nCellsSolve + + integer :: iCell + real(kind=RKIND):: f_coriolis + +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine driver_oml1d:' + +! namelist parameters for ocean mixed layer model + call mpas_pool_get_config(configs,'config_oml1d' ,config_oml1d ) + call mpas_pool_get_config(configs,'config_oml_gamma' ,oml_gamma ) + call mpas_pool_get_config(configs,'config_oml_relaxation_time' ,oml_relaxation_time ) + +! state and initial state for ocean mixed layer model + call mpas_pool_get_array(diag_physics,'t_oml' ,t_oml ) + call mpas_pool_get_array(diag_physics,'t_oml_initial' ,t_oml_initial ) + call mpas_pool_get_array(diag_physics,'t_oml_200m_initial' ,t_oml_200m_initial ) + call mpas_pool_get_array(diag_physics,'h_oml' ,h_oml ) + call mpas_pool_get_array(diag_physics,'h_oml_initial' ,h_oml_initial ) + call mpas_pool_get_array(diag_physics,'hu_oml' ,hu_oml ) + call mpas_pool_get_array(diag_physics,'hv_oml' ,hv_oml ) + +! state and diagnostics from mpas + call mpas_pool_get_array(diag_physics,'hfx' ,hfx ) + call mpas_pool_get_array(diag_physics,'lh' ,lh ) + call mpas_pool_get_array(diag_physics,'gsw' ,gsw ) + call mpas_pool_get_array(diag_physics,'glw' ,glw ) + call mpas_pool_get_array(diag_physics,'sfc_emiss' ,sfc_emiss ) + call mpas_pool_get_array(diag_physics,'ust' ,ust ) + + call mpas_pool_get_array(sfc_input,'sst' ,sst ) + call mpas_pool_get_array(sfc_input,'skintemp' ,skintemp ) + call mpas_pool_get_array(sfc_input,'xland' ,xland ) + + call mpas_pool_get_array(diag,'uReconstructZonal' ,uReconstructZonal ) + call mpas_pool_get_array(diag,'uReconstructMeridional' ,uReconstructMeridional ) + + call mpas_pool_get_array(mesh,'latCell' ,latCell ) + + call mpas_pool_get_dimension(mesh,'nCells' ,nCells ) + call mpas_pool_get_dimension(mesh,'nCellsSolve' ,nCellsSolve ) + + if (config_oml1d) then + + do iCell = 1, nCellsSolve + +! if ocean point, call the 1d ocean mixed layer model + if( xland(iCell) .gt. 1.5) then + f_coriolis = 2.*omega*cos(latCell(iCell)) + call oml1d( t_oml(iCell), t_oml_initial(iCell), h_oml(iCell), h_oml_initial(iCell), & + hu_oml(iCell), hv_oml(iCell), skintemp(iCell), hfx(iCell), & + lh(iCell), gsw(iCell), glw(iCell), t_oml_200m_initial(iCell), & + uReconstructZonal(1,iCell), uReconstructMeridional(1,iCell), & + ust(iCell), f_coriolis, sfc_emiss(iCell), stbolt, gravity, dt_pbl, & + oml_gamma, oml_relaxation_time ) + + sst(iCell) = skintemp(iCell) + + end if + + end do + + end if + +!write(0,*) '--- end subroutine driver_oml1d.' + + end subroutine driver_oml1d + +!================================================================================================================= + end module mpas_atmphys_driver_oml +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_driver_pbl.F b/src/core_atmosphere/physics/mpas_atmphys_driver_pbl.F index d385545182..2b3a15a7a7 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_driver_pbl.F +++ b/src/core_atmosphere/physics/mpas_atmphys_driver_pbl.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_driver_pbl use mpas_kind_types use mpas_derived_types @@ -15,6 +15,7 @@ module mpas_atmphys_driver_pbl use mpas_atmphys_vars !wrf physics: + use module_bl_mynn use module_bl_ysu implicit none @@ -23,184 +24,380 @@ module mpas_atmphys_driver_pbl deallocate_pbl, & driver_pbl - integer,private:: i,j,k - - -!>\brief MPAS driver for parameterization of Planetary Boundary Layer (PBL) processes. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_driver_pbl: -!> --------------------------------------- -!> allocate_pbl : allocate local arrays for parameterization of PBL processes. -!> deallocate_pbl: deallocate local arrays for parameterization of PBL processes. -!> driver_pbl : main driver (called from subroutine physics_driver). -!> pbl_from_MPAS : initialize local arrays. -!> pbl_to_MPAS : copy local arrays to MPAS arrays. -!> -!> WRF physics called from driver_pbl: -!> ----------------------------------- -!> * module_bl_ysu : YSU PBL scheme. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * removed the pre-processor option "do_hydrostatic_pressure" before call to subroutine ysu. -!> Laura D. Fowler (birch.ucar.edu) / 2013-05-29. -!> * in call to subroutine ysu, replaced the variable g (that originally pointed to gravity) -!> with gravity, for simplicity. -!> Laura D. Fowler (laura@ucar.edu) / 2014-03-21. -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * Modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +!MPAS driver for parameterization of Planetary Boundary Layer (PBL) processes. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_driver_pbl: +! --------------------------------------- +! allocate_pbl : allocate local arrays for parameterization of PBL processes. +! deallocate_pbl: deallocate local arrays for parameterization of PBL processes. +! driver_pbl : main driver (called from subroutine physics_driver). +! pbl_from_MPAS : initialize local arrays. +! pbl_to_MPAS : copy local arrays to MPAS arrays. +! +! WRF physics called from driver_pbl: +! ----------------------------------- +! * module_bl_ysu : YSU PBL scheme. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * removed the pre-processor option "do_hydrostatic_pressure" before call to subroutine ysu. +! Laura D. Fowler (birch.ucar.edu) / 2013-05-29. +! * in call to subroutine ysu, replaced the variable g (that originally pointed to gravity) +! with gravity, for simplicity. +! Laura D. Fowler (laura@ucar.edu) / 2014-03-21. +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * renamed "ysu" with "bl_ysu". +! Laura D. Fowler (laura@ucar.edu) / 2016-03-25. +! * added the implementation of the MYNN PBL scheme from WRF 3.6.1. +! Laura D. Fowler (laura@ucar.edu) / 2016-03-30. +! * corrected the initialization of sh3d for the mynn parameterization. +! Laura D. Fowler (laura@ucar.edu) / 2016-04-13. +! * for the mynn parameterization, change the definition of dx_p to match that used in other physics +! parameterizations. +! Laura D. Fowler (laura@ucar.edu) / 2016-10-18. +! * updated the call to subroutine ysu in comjunction with updating module_bl_ysu.F from WRF version 3.6.1 to +! WRF version 3.8.1 +! Laura D. Fowler (laura@ucar.edu) / 2016-10-27. contains -!================================================================================================== +!================================================================================================================= subroutine allocate_pbl -!================================================================================================== - -!from surface-layer model: - if(.not.allocated(br_p) ) allocate(br_p(ims:ime,jms:jme) ) - if(.not.allocated(ctopo_p) ) allocate(ctopo_p(ims:ime,jms:jme) ) - if(.not.allocated(ctopo2_p) ) allocate(ctopo2_p(ims:ime,jms:jme) ) - if(.not.allocated(gz1oz0_p) ) allocate(gz1oz0_p(ims:ime,jms:jme) ) - if(.not.allocated(hfx_p) ) allocate(hfx_p(ims:ime,jms:jme) ) - if(.not.allocated(psih_p) ) allocate(psih_p(ims:ime,jms:jme) ) - if(.not.allocated(psim_p) ) allocate(psim_p(ims:ime,jms:jme) ) - if(.not.allocated(qfx_p) ) allocate(qfx_p(ims:ime,jms:jme) ) - if(.not.allocated(regime_p) ) allocate(regime_p(ims:ime,jms:jme) ) - if(.not.allocated(u10_p) ) allocate(u10_p(ims:ime,jms:jme) ) - if(.not.allocated(ust_p) ) allocate(ust_p(ims:ime,jms:jme) ) - if(.not.allocated(v10_p) ) allocate(v10_p(ims:ime,jms:jme) ) - if(.not.allocated(wspd_p) ) allocate(wspd_p(ims:ime,jms:jme) ) - if(.not.allocated(xland_p) ) allocate(xland_p(ims:ime,jms:jme) ) - if(.not.allocated(znt_p) ) allocate(znt_p(ims:ime,jms:jme) ) -!for YSU PBL scheme: - if(.not.allocated(hpbl_p) ) allocate(hpbl_p(ims:ime,jms:jme) ) - if(.not.allocated(kpbl_p) ) allocate(kpbl_p(ims:ime,jms:jme) ) - if(.not.allocated(exch_p) ) allocate(exch_p(ims:ime,kms:kme,jms:jme) ) -!tendencies: - if(.not.allocated(rublten_p) ) allocate(rublten_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(rvblten_p) ) allocate(rvblten_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(rthblten_p)) allocate(rthblten_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(rqvblten_p)) allocate(rqvblten_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(rqcblten_p)) allocate(rqcblten_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(rqiblten_p)) allocate(rqiblten_p(ims:ime,kms:kme,jms:jme) ) -!temporary for debugging the YSU PBL scheme: - if(.not.allocated(kzh_p) ) allocate(kzh_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(kzm_p) ) allocate(kzm_p(ims:ime,kms:kme,jms:jme) ) - if(.not.allocated(kzq_p) ) allocate(kzq_p(ims:ime,kms:kme,jms:jme) ) +!================================================================================================================= + + if(.not.allocated(hfx_p) ) allocate(hfx_p(ims:ime,jms:jme) ) + if(.not.allocated(qfx_p) ) allocate(qfx_p(ims:ime,jms:jme) ) + if(.not.allocated(ust_p) ) allocate(ust_p(ims:ime,jms:jme) ) + if(.not.allocated(wspd_p) ) allocate(wspd_p(ims:ime,jms:jme) ) + if(.not.allocated(xland_p)) allocate(xland_p(ims:ime,jms:jme)) + if(.not.allocated(hpbl_p) ) allocate(hpbl_p(ims:ime,jms:jme) ) + if(.not.allocated(kpbl_p) ) allocate(kpbl_p(ims:ime,jms:jme) ) + if(.not.allocated(znt_p) ) allocate(znt_p(ims:ime,jms:jme) ) + if(.not.allocated(delta_p)) allocate(delta_p(ims:ime,jms:jme)) + if(.not.allocated(wstar_p)) allocate(wstar_p(ims:ime,jms:jme)) + if(.not.allocated(uoce_p) ) allocate(uoce_p(ims:ime,jms:jme) ) + if(.not.allocated(voce_p) ) allocate(voce_p(ims:ime,jms:jme) ) + + + !tendencies: + if(.not.allocated(rublten_p) ) allocate(rublten_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(rvblten_p) ) allocate(rvblten_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(rthblten_p)) allocate(rthblten_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(rqvblten_p)) allocate(rqvblten_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(rqcblten_p)) allocate(rqcblten_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(rqiblten_p)) allocate(rqiblten_p(ims:ime,kms:kme,jms:jme)) + + !exchange coefficients: + if(.not.allocated(kzh_p)) allocate(kzh_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(kzm_p)) allocate(kzm_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(kzq_p)) allocate(kzq_p(ims:ime,kms:kme,jms:jme)) + + pbl_select: select case (trim(pbl_scheme)) + + case("bl_ysu") + !from surface-layer model: + if(.not.allocated(br_p) ) allocate(br_p(ims:ime,jms:jme) ) + if(.not.allocated(ctopo_p) ) allocate(ctopo_p(ims:ime,jms:jme) ) + if(.not.allocated(ctopo2_p)) allocate(ctopo2_p(ims:ime,jms:jme) ) + if(.not.allocated(psih_p) ) allocate(psih_p(ims:ime,jms:jme) ) + if(.not.allocated(psim_p) ) allocate(psim_p(ims:ime,jms:jme) ) + if(.not.allocated(regime_p)) allocate(regime_p(ims:ime,jms:jme) ) + if(.not.allocated(u10_p) ) allocate(u10_p(ims:ime,jms:jme) ) + if(.not.allocated(v10_p) ) allocate(v10_p(ims:ime,jms:jme) ) + if(.not.allocated(exch_p) ) allocate(exch_p(ims:ime,kms:kme,jms:jme)) + !from radiation schemes: + if(.not.allocated(rthraten_p)) allocate(rthraten_p(ims:ime,kms:kme,jms:jme)) + + case("bl_mynn") + if(.not.allocated(dx_p) ) allocate(dx_p(ims:ime,jms:jme) ) + if(.not.allocated(ch_p) ) allocate(ch_p(ims:ime,jms:jme) ) + if(.not.allocated(qcg_p) ) allocate(qcg_p(ims:ime,jms:jme) ) + if(.not.allocated(qsfc_p) ) allocate(qsfc_p(ims:ime,jms:jme) ) + if(.not.allocated(rmol_p) ) allocate(rmol_p(ims:ime,jms:jme) ) + if(.not.allocated(tsk_p) ) allocate(tsk_p(ims:ime,jms:jme) ) + if(.not.allocated(vdfg_p) ) allocate(vdfg_p(ims:ime,jms:jme) ) + + if(.not.allocated(cov_p) ) allocate(cov_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(qke_p) ) allocate(qke_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(qsq_p) ) allocate(qsq_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(tsq_p) ) allocate(tsq_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(qkeadv_p)) allocate(qkeadv_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(elpbl_p) ) allocate(elpbl_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(tkepbl_p)) allocate(tkepbl_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(sh3d_p) ) allocate(sh3d_p(ims:ime,kms:kme,jms:jme) ) + + if(.not.allocated(dqke_p) ) allocate(dqke_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(qbuoy_p) ) allocate(qbuoy_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(qdiss_p) ) allocate(qdiss_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(qshear_p)) allocate(qshear_p(ims:ime,kms:kme,jms:jme)) + if(.not.allocated(qwt_p) ) allocate(qwt_p(ims:ime,kms:kme,jms:jme) ) + + if(.not.allocated(rniblten_p)) allocate(rniblten_p(ims:ime,kms:kme,jms:jme)) + + case default + + end select pbl_select end subroutine allocate_pbl -!================================================================================================== +!================================================================================================================= subroutine deallocate_pbl -!================================================================================================== - -!from surface-layer model: - if(allocated(br_p) ) deallocate(br_p ) - if(allocated(ctopo_p) ) deallocate(ctopo_p ) - if(allocated(ctopo2_p) ) deallocate(ctopo2_p ) - if(allocated(gz1oz0_p) ) deallocate(gz1oz0_p ) - if(allocated(hfx_p) ) deallocate(hfx_p ) - if(allocated(psih_p) ) deallocate(psih_p ) - if(allocated(psim_p) ) deallocate(psim_p ) - if(allocated(qfx_p) ) deallocate(qfx_p ) - if(allocated(regime_p) ) deallocate(regime_p ) - if(allocated(u10_p) ) deallocate(u10_p ) - if(allocated(ust_p) ) deallocate(ust_p ) - if(allocated(v10_p) ) deallocate(v10_p ) - if(allocated(wspd_p) ) deallocate(wspd_p ) - if(allocated(xland_p) ) deallocate(xland_p ) - if(allocated(znt_p) ) deallocate(znt_p ) -!for YSU PBL scheme: - if(allocated(hpbl_p) ) deallocate(hpbl_p ) - if(allocated(kpbl_p) ) deallocate(kpbl_p ) - if(allocated(exch_p) ) deallocate(exch_p ) -!tendencies - if(allocated(rublten_p) ) deallocate(rublten_p ) - if(allocated(rvblten_p) ) deallocate(rvblten_p ) - if(allocated(rthblten_p)) deallocate(rthblten_p ) - if(allocated(rqvblten_p)) deallocate(rqvblten_p ) - if(allocated(rqcblten_p)) deallocate(rqcblten_p ) - if(allocated(rqiblten_p)) deallocate(rqiblten_p ) -!temporary for debugging the YSU PBL scheme: - if(allocated(kzh_p) ) deallocate(kzh_p ) - if(allocated(kzm_p) ) deallocate(kzm_p ) - if(allocated(kzq_p) ) deallocate(kzq_p ) +!================================================================================================================= + + if(allocated(hfx_p) ) deallocate(hfx_p ) + if(allocated(qfx_p) ) deallocate(qfx_p ) + if(allocated(ust_p) ) deallocate(ust_p ) + if(allocated(wspd_p) ) deallocate(wspd_p ) + if(allocated(xland_p)) deallocate(xland_p) + if(allocated(hpbl_p) ) deallocate(hpbl_p ) + if(allocated(kpbl_p) ) deallocate(kpbl_p ) + if(allocated(znt_p) ) deallocate(znt_p ) + if(allocated(delta_p)) deallocate(delta_p) + if(allocated(wstar_p)) deallocate(wstar_p) + if(allocated(uoce_p) ) deallocate(uoce_p ) + if(allocated(voce_p) ) deallocate(voce_p ) + + !tendencies: + if(allocated(rublten_p) ) deallocate(rublten_p ) + if(allocated(rvblten_p) ) deallocate(rvblten_p ) + if(allocated(rthblten_p)) deallocate(rthblten_p) + if(allocated(rqvblten_p)) deallocate(rqvblten_p) + if(allocated(rqcblten_p)) deallocate(rqcblten_p) + if(allocated(rqiblten_p)) deallocate(rqiblten_p) + + !exchange coefficients: + if(allocated(kzh_p)) deallocate(kzh_p) + if(allocated(kzm_p)) deallocate(kzm_p) + if(allocated(kzq_p)) deallocate(kzq_p) + + pbl_select: select case (trim(pbl_scheme)) + + case("bl_ysu") + !from surface-layer model: + if(allocated(br_p) ) deallocate(br_p ) + if(allocated(ctopo_p) ) deallocate(ctopo_p ) + if(allocated(ctopo2_p)) deallocate(ctopo2_p) + if(allocated(psih_p) ) deallocate(psih_p ) + if(allocated(psim_p) ) deallocate(psim_p ) + if(allocated(regime_p)) deallocate(regime_p) + if(allocated(u10_p) ) deallocate(u10_p ) + if(allocated(v10_p) ) deallocate(v10_p ) + if(allocated(exch_p) ) deallocate(exch_p ) + !from radiation schemes: + if(allocated(rthraten_p)) deallocate(rthraten_p) + + case("bl_mynn") + if(allocated(dx_p) ) deallocate(dx_p ) + if(allocated(ch_p) ) deallocate(ch_p ) + if(allocated(qcg_p) ) deallocate(qcg_p ) + if(allocated(qsfc_p) ) deallocate(qsfc_p ) + if(allocated(rmol_p) ) deallocate(rmol_p ) + if(allocated(tsk_p) ) deallocate(tsk_p ) + if(allocated(vdfg_p) ) deallocate(vdfg_p ) + + if(allocated(cov_p) ) deallocate(cov_p ) + if(allocated(qke_p) ) deallocate(qke_p ) + if(allocated(qsq_p) ) deallocate(qsq_p ) + if(allocated(tsq_p) ) deallocate(tsq_p ) + if(allocated(qkeadv_p)) deallocate(qkeadv_p) + if(allocated(elpbl_p) ) deallocate(elpbl_p ) + if(allocated(tkepbl_p)) deallocate(tkepbl_p) + if(allocated(sh3d_p) ) deallocate(sh3d_p ) + if(allocated(dqke_p) ) deallocate(dqke_p ) + if(allocated(qbuoy_p) ) deallocate(qbuoy_p ) + if(allocated(qdiss_p) ) deallocate(qdiss_p ) + if(allocated(qshear_p)) deallocate(qshear_p) + if(allocated(qwt_p) ) deallocate(qwt_p ) + + if(allocated(rniblten_p)) deallocate(rniblten_p) + + case default + + end select pbl_select end subroutine deallocate_pbl -!================================================================================================== - subroutine pbl_from_MPAS(sfc_input,diag_physics) -!================================================================================================== +!================================================================================================================= + subroutine pbl_from_MPAS(configs,mesh,sfc_input,diag_physics,tend_physics,its,ite) +!================================================================================================================= !input arguments: + type(mpas_pool_type),intent(in):: configs + type(mpas_pool_type),intent(in):: mesh type(mpas_pool_type),intent(in):: diag_physics type(mpas_pool_type),intent(in):: sfc_input + type(mpas_pool_type),intent(in):: tend_physics + + integer,intent(in):: its,ite + +!local variables: + integer:: i,k,j !local pointers: - real(kind=RKIND),dimension(:),pointer:: xland - real(kind=RKIND),dimension(:),pointer:: br,gz1oz0,hfx,hpbl,fm,fh,qfx,regime,u10,ust,v10,wspd,znt + real(kind=RKIND),dimension(:),pointer:: hfx,hpbl,qfx,ust,wspd,xland,znt + real(kind=RKIND),dimension(:),pointer:: delta,wstar + +!local pointers for YSU scheme: + logical,pointer:: config_ysu_pblmix + real(kind=RKIND),dimension(:),pointer:: br,fh,fm,regime,u10,v10 + real(kind=RKIND),dimension(:,:),pointer:: rthratenlw,rthratensw + +!local pointers for MYNN scheme: + real(kind=RKIND),pointer:: len_disp + real(kind=RKIND),dimension(:),pointer :: meshDensity + real(kind=RKIND),dimension(:),pointer :: ch,qsfc,qcg,rmol,skintemp + real(kind=RKIND),dimension(:,:),pointer:: cov,qke,qsq,tsq,sh3d,tke_pbl,qke_adv,el_pbl -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- - call mpas_pool_get_array(diag_physics,'br' ,br ) - call mpas_pool_get_array(diag_physics,'gz1oz0',gz1oz0) call mpas_pool_get_array(diag_physics,'hfx' ,hfx ) call mpas_pool_get_array(diag_physics,'hpbl' ,hpbl ) - call mpas_pool_get_array(diag_physics,'fm' ,fm ) - call mpas_pool_get_array(diag_physics,'fh' ,fh ) call mpas_pool_get_array(diag_physics,'qfx' ,qfx ) - call mpas_pool_get_array(diag_physics,'regime',regime) - call mpas_pool_get_array(diag_physics,'u10' ,u10 ) call mpas_pool_get_array(diag_physics,'ust' ,ust ) - call mpas_pool_get_array(diag_physics,'v10' ,v10 ) call mpas_pool_get_array(diag_physics,'wspd' ,wspd ) call mpas_pool_get_array(diag_physics,'znt' ,znt ) + call mpas_pool_get_array(diag_physics,'delta' ,delta ) + call mpas_pool_get_array(diag_physics,'wstar' ,wstar ) + call mpas_pool_get_array(sfc_input ,'xland' ,xland ) do j = jts,jte do i = its,ite -!from surface-layer model: - br_p(i,j) = br(i) - gz1oz0_p(i,j) = gz1oz0(i) + !from surface-layer model: hfx_p(i,j) = hfx(i) hpbl_p(i,j) = hpbl(i) - psim_p(i,j) = fm(i) - psih_p(i,j) = fh(i) qfx_p(i,j) = qfx(i) - regime_p(i,j) = regime(i) - u10_p(i,j) = u10(i) ust_p(i,j) = ust(i) - v10_p(i,j) = v10(i) wspd_p(i,j) = wspd(i) - znt_p(i,j) = znt(i) - - xland_p(i,j) = xland(i) -!initialization for YSU PBL scheme: - ctopo_p(i,j) = 1._RKIND - ctopo2_p(i,j) = 1._RKIND + xland_p(i,j) = xland(i) kpbl_p(i,j) = 1 + znt_p(i,j) = znt(i) + delta_p(i,j) = delta(i) + wstar_p(i,j) = wstar(i) + !... ocean currents are set to zero: + uoce_p(i,j) = 0._RKIND + voce_p(i,j) = 0._RKIND enddo enddo + pbl_select: select case (trim(pbl_scheme)) + + case("bl_ysu") + call mpas_pool_get_config(configs,'config_ysu_pblmix',config_ysu_pblmix) + + call mpas_pool_get_array(diag_physics,'br' ,br ) + call mpas_pool_get_array(diag_physics,'fm' ,fm ) + call mpas_pool_get_array(diag_physics,'fh' ,fh ) + call mpas_pool_get_array(diag_physics,'regime',regime) + call mpas_pool_get_array(diag_physics,'u10' ,u10 ) + call mpas_pool_get_array(diag_physics,'v10' ,v10 ) + + call mpas_pool_get_array(tend_physics,'rthratenlw',rthratenlw) + call mpas_pool_get_array(tend_physics,'rthratensw',rthratensw) + + ysu_pblmix = 0 + if(config_ysu_pblmix) ysu_pblmix = 1 + + do j = jts,jte + do i = its,ite + !from surface-layer model: + br_p(i,j) = br(i) + psim_p(i,j) = fm(i) + psih_p(i,j) = fh(i) + regime_p(i,j) = regime(i) + u10_p(i,j) = u10(i) + v10_p(i,j) = v10(i) + !initialization for YSU PBL scheme: + ctopo_p(i,j) = 1._RKIND + ctopo2_p(i,j) = 1._RKIND + enddo + enddo + + do j = jts,jte + do k = kts,kte + do i = its,ite + exch_p(i,k,j) = 0._RKIND + rthraten_p(i,k,j) = rthratenlw(k,i) + rthratensw(k,i) + enddo + enddo + enddo + + case("bl_mynn") + call mpas_pool_get_config(configs,'config_len_disp',len_disp) + call mpas_pool_get_array(mesh,'meshDensity',meshDensity) + + call mpas_pool_get_array(sfc_input ,'skintemp',skintemp) + call mpas_pool_get_array(diag_physics,'ch' ,ch ) + call mpas_pool_get_array(diag_physics,'qcg' ,qcg ) + call mpas_pool_get_array(diag_physics,'qsfc' ,qsfc ) + call mpas_pool_get_array(diag_physics,'rmol' ,rmol ) + + call mpas_pool_get_array(diag_physics,'el_pbl' ,el_pbl ) + call mpas_pool_get_array(diag_physics,'cov' ,cov ) + call mpas_pool_get_array(diag_physics,'qke' ,qke ) + call mpas_pool_get_array(diag_physics,'qke_adv',qke_adv ) + call mpas_pool_get_array(diag_physics,'qsq' ,qsq ) + call mpas_pool_get_array(diag_physics,'tsq' ,tsq ) + call mpas_pool_get_array(diag_physics,'tke_pbl',tke_pbl ) + call mpas_pool_get_array(diag_physics,'sh3d' ,sh3d ) + + do j = jts,jte + do i = its,ite + dx_p(i,j) = len_disp / meshDensity(i)**0.25 + ch_p(i,j) = ch(i) + qcg_p(i,j) = qcg(i) + qsfc_p(i,j) = qsfc(i) + rmol_p(i,j) = rmol(i) + tsk_p(i,j) = skintemp(i) + !... no gravitational settling of fog/cloud droplets (grav_settling = 0): + vdfg_p(i,j) = 0._RKIND + enddo + enddo + + do j = jts,jte + do k = kts,kte + do i = its,ite + elpbl_p(i,k,j) = el_pbl(k,i) + cov_p(i,k,j) = cov(k,i) + qke_p(i,k,j) = qke(k,i) + qsq_p(i,k,j) = qsq(k,i) + tsq_p(i,k,j) = tsq(k,i) + tkepbl_p(i,k,j) = tke_pbl(k,i) + qkeadv_p(i,k,j) = qke_adv(k,i) + sh3d_p(i,k,j) = sh3d(k,i) + rniblten_p(i,k,j) = 0._RKIND + + !... outputs: + dqke_p(i,k,j) = 0._RKIND + qbuoy_p(i,k,j) = 0._RKIND + qdiss_p(i,k,j) = 0._RKIND + qshear_p(i,k,j) = 0._RKIND + qwt_p(i,k,j) = 0._RKIND + enddo + enddo + enddo + + case default + + end select pbl_select + do j = jts,jte do k = kts,kte do i = its,ite - exch_p(i,k,j) = 0._RKIND rublten_p(i,k,j) = 0._RKIND rvblten_p(i,k,j) = 0._RKIND rthblten_p(i,k,j) = 0._RKIND rqvblten_p(i,k,j) = 0._RKIND rqcblten_p(i,k,j) = 0._RKIND rqiblten_p(i,k,j) = 0._RKIND -!temporary for debugging the YSU PBL scheme: + kzh_p(i,k,j) = 0._RKIND kzm_p(i,k,j) = 0._RKIND kzq_p(i,k,j) = 0._RKIND @@ -209,32 +406,47 @@ subroutine pbl_from_MPAS(sfc_input,diag_physics) enddo end subroutine pbl_from_MPAS - -!================================================================================================== - subroutine pbl_to_MPAS(diag_physics,tend_physics) -!================================================================================================== + +!================================================================================================================= + subroutine pbl_to_MPAS(diag_physics,tend_physics,its,ite) +!================================================================================================================= !inout arguments: type(mpas_pool_type),intent(inout):: diag_physics type(mpas_pool_type),intent(inout):: tend_physics + integer,intent(in):: its,ite + +!local variables: + integer:: i,k,j + !local pointers: integer,dimension(:),pointer:: kpbl real(kind=RKIND),dimension(:),pointer :: hpbl - real(kind=RKIND),dimension(:,:),pointer:: exch_h,kzh,kzm,kzq - real(kind=RKIND),dimension(:,:),pointer:: rublten,rvblten,rthblten,rqvblten,rqcblten,rqiblten - -!-------------------------------------------------------------------------------------------------- - - call mpas_pool_get_array(diag_physics,'kpbl' ,kpbl ) - call mpas_pool_get_array(diag_physics,'hpbl' ,hpbl ) - call mpas_pool_get_array(diag_physics,'exch_h',exch_h) - call mpas_pool_get_array(diag_physics,'kzh' ,kzh ) - call mpas_pool_get_array(diag_physics,'kzm' ,kzm ) - call mpas_pool_get_array(diag_physics,'kzq' ,kzq ) - - call mpas_pool_get_array(tend_physics,'rublten',rublten ) - call mpas_pool_get_array(tend_physics,'rvblten',rvblten ) + real(kind=RKIND),dimension(:,:),pointer:: kzh,kzm,kzq + real(kind=RKIND),dimension(:,:),pointer:: rublten,rvblten,rthblten,rqvblten,rqcblten,rqiblten, & + rniblten + +!local pointers for YSU scheme: + real(kind=RKIND),dimension(:,:),pointer:: exch_h + +!local pointers for MYNN scheme: + real(kind=RKIND),dimension(:),pointer :: delta,wstar + real(kind=RKIND),dimension(:,:),pointer:: cov,qke,qsq,tsq,sh3d,tke_pbl,qke_adv,el_pbl,dqke,qbuoy, & + qdiss,qshear,qwt + +!----------------------------------------------------------------------------------------------------------------- + + call mpas_pool_get_array(diag_physics,'kpbl' ,kpbl ) + call mpas_pool_get_array(diag_physics,'hpbl' ,hpbl ) + call mpas_pool_get_array(diag_physics,'kzh' ,kzh ) + call mpas_pool_get_array(diag_physics,'kzm' ,kzm ) + call mpas_pool_get_array(diag_physics,'kzq' ,kzq ) + call mpas_pool_get_array(diag_physics,'delta',delta) + call mpas_pool_get_array(diag_physics,'wstar',wstar) + + call mpas_pool_get_array(tend_physics,'rublten' ,rublten ) + call mpas_pool_get_array(tend_physics,'rvblten' ,rvblten ) call mpas_pool_get_array(tend_physics,'rthblten',rthblten) call mpas_pool_get_array(tend_physics,'rqvblten',rqvblten) call mpas_pool_get_array(tend_physics,'rqcblten',rqcblten) @@ -242,22 +454,23 @@ subroutine pbl_to_MPAS(diag_physics,tend_physics) do j = jts,jte do i = its,ite - hpbl(i) = hpbl_p(i,j) - kpbl(i) = kpbl_p(i,j) + hpbl(i) = hpbl_p(i,j) + kpbl(i) = kpbl_p(i,j) + delta(i) = delta_p(i,j) + wstar(i) = wstar_p(i,j) enddo enddo do j = jts,jte do k = kts,kte do i = its,ite - exch_h(k,i) = exch_p(i,k,j) - rublten(k,i) = rublten_p(i,k,j) - rvblten(k,i) = rvblten_p(i,k,j) + rublten(k,i) = rublten_p(i,k,j) + rvblten(k,i) = rvblten_p(i,k,j) rthblten(k,i) = rthblten_p(i,k,j) rqvblten(k,i) = rqvblten_p(i,k,j) rqcblten(k,i) = rqcblten_p(i,k,j) rqiblten(k,i) = rqiblten_p(i,k,j) -!temporary for debugging the YSU PBL scheme: + kzh(k,i) = kzh_p(i,k,j) kzm(k,i) = kzm_p(i,k,j) kzq(k,i) = kzq_p(i,k,j) @@ -265,28 +478,102 @@ subroutine pbl_to_MPAS(diag_physics,tend_physics) enddo enddo + pbl_select: select case (trim(pbl_scheme)) + + case("bl_ysu") + call mpas_pool_get_array(diag_physics,'exch_h',exch_h) + + do j = jts,jte + do k = kts,kte + do i = its,ite + exch_h(k,i) = exch_p(i,k,j) + enddo + enddo + enddo + + case("bl_mynn") + call mpas_pool_get_array(diag_physics,'el_pbl' ,el_pbl ) + call mpas_pool_get_array(diag_physics,'cov' ,cov ) + call mpas_pool_get_array(diag_physics,'qke' ,qke ) + call mpas_pool_get_array(diag_physics,'qke_adv' ,qke_adv ) + call mpas_pool_get_array(diag_physics,'qsq' ,qsq ) + call mpas_pool_get_array(diag_physics,'tsq' ,tsq ) + call mpas_pool_get_array(diag_physics,'tke_pbl' ,tke_pbl ) + call mpas_pool_get_array(diag_physics,'sh3d' ,sh3d ) + call mpas_pool_get_array(diag_physics,'dqke' ,dqke ) + call mpas_pool_get_array(diag_physics,'qbuoy' ,qbuoy ) + call mpas_pool_get_array(diag_physics,'qdiss' ,qdiss ) + call mpas_pool_get_array(diag_physics,'qshear' ,qshear ) + call mpas_pool_get_array(diag_physics,'qwt' ,qwt ) + call mpas_pool_get_array(tend_physics,'rniblten',rniblten) + + do j = jts,jte + do k = kts,kte + do i = its,ite + el_pbl(k,i) = elpbl_p(i,k,j) + cov(k,i) = cov_p(i,k,j) + qke(k,i) = qke_p(i,k,j) + qsq(k,i) = qsq_p(i,k,j) + tsq(k,i) = tsq_p(i,k,j) + sh3d(k,i) = sh3d_p(i,k,j) + tke_pbl(k,i) = tkepbl_p(i,k,j) + qke_adv(k,i) = qkeadv_p(i,k,j) + !... outputs: + dqke(k,i) = dqke_p(i,k,j) + qbuoy(k,i) = qbuoy_p(i,k,j) + qdiss(k,i) = qdiss_p(i,k,j) + qshear(k,i) = qshear_p(i,k,j) + qwt(k,i) = qwt_p(i,k,j) + + rniblten(k,i) = rniblten_p(i,k,j) + enddo + enddo + enddo + + case default + + end select pbl_select + end subroutine pbl_to_MPAS -!================================================================================================== - subroutine driver_pbl(sfc_input,diag_physics,tend_physics) -!================================================================================================== +!================================================================================================================= + subroutine driver_pbl(itimestep,configs,mesh,sfc_input,diag_physics,tend_physics,its,ite) +!================================================================================================================= + +!input arguments: + type(mpas_pool_type),intent(in):: configs + type(mpas_pool_type),intent(in):: mesh + + integer,intent(in):: its,ite + integer,intent(in):: itimestep -!input and output arguments: -!--------------------------- +!inout arguments: type(mpas_pool_type),intent(inout):: sfc_input type(mpas_pool_type),intent(inout):: diag_physics type(mpas_pool_type),intent(inout):: tend_physics -!-------------------------------------------------------------------------------------------------- -! write(0,*) -! write(0,*) '--- enter subroutine driver_pbl: dt_pbl=',dt_pbl +!local pointers: + logical,pointer:: config_do_restart + +!local variables: + integer:: initflag + integer:: i,k,j + +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine driver_pbl:' + + call mpas_pool_get_config(configs,'config_do_restart',config_do_restart) !copy MPAS arrays to local arrays: - call pbl_from_MPAS(sfc_input,diag_physics) + call pbl_from_MPAS(configs,mesh,sfc_input,diag_physics,tend_physics,its,ite) + + initflag = 1 + if(config_do_restart .or. itimestep > 1) initflag = 0 pbl_select: select case (trim(pbl_scheme)) - case("ysu") + case("bl_ysu") call ysu ( & p3d = pres_hyd_p , p3di = pres2_hyd_p , psfc = psfc_p , & th3d = th_p , t3d = t_p , dz8w = dz_p , & @@ -302,24 +589,59 @@ subroutine driver_pbl(sfc_input,diag_physics,tend_physics) psih = psih_p , xland = xland_p , hfx = hfx_p , & qfx = qfx_p , wspd = wspd_p , br = br_p , & dt = dt_pbl , kpbl2d = kpbl_p , exch_h = exch_p , & - u10 = u10_p , v10 = v10_p , ctopo = ctopo_p , & - ctopo2 = ctopo2_p , regime = regime_p , rho = rho_p , & - kzhout = kzh_p , kzmout = kzm_p , kzqout = kzq_p , & + wstar = wstar_p , delta = delta_p , uoce = uoce_p , & + voce = voce_p , rthraten = rthraten_p , u10 = u10_p , & + v10 = v10_p , ctopo = ctopo_p , ctopo2 = ctopo2_p , & + regime = regime_p , rho = rho_p , kzhout = kzh_p , & + kzmout = kzm_p , kzqout = kzq_p , & + ysu_topdown_pblmix = ysu_pblmix , & ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde , & ims = ims , ime = ime , jms = jms , jme = jme , kms = kms , kme = kme , & its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & ) - case default + case("bl_mynn") + call mynn_bl_driver ( & + p = pres_hyd_p , exner = pi_p , ps = psfc_p , & + th = th_p , dz = dz_p , u = u_p , & + v = v_p , qv = qv_p , qc = qc_p , & + qi = qi_p , qni = ni_p , rho = rho_p , & + du = rublten_p , dv = rvblten_p , dth = rthblten_p , & + dqv = rqvblten_p , dqc = rqcblten_p , dqi = rqiblten_p , & + dqni = rniblten_p , flag_qc = f_qc , flag_qnc = f_qnc , & + flag_qi = f_qi , flag_qni = f_qni , kpbl = kpbl_p , & + pblh = hpbl_p , xland = xland_p , ts = tsk_p , & + hfx = hfx_p , qfx = qfx_p , ch = ch_p , & + sh3d = sh3d_p , tsq = tsq_p , qsq = qsq_p , & + cov = cov_p , el_pbl = elpbl_p , qsfc = qsfc_p , & + qcg = qcg_p , ust = ust_p , rmol = rmol_p , & + wspd = wspd_p , wstar = wstar_p , delta = delta_p , & + delt = dt_pbl , k_h = kzh_p , k_m = kzm_p , & + k_q = kzq_p , uoce = uoce_p , voce = voce_p , & + qke = qke_p , qke_adv = qkeadv_p , vdfg = vdfg_p , & + tke_pbl = tkepbl_p , dqke = dqke_p , qwt = qwt_p , & + qshear = qshear_p , qbuoy = qbuoy_p , qdiss = qdiss_p , & + initflag = initflag , & + grav_settling = grav_settling , & + bl_mynn_cloudpdf = bl_mynn_cloudpdf , & + bl_mynn_tkeadvect = bl_mynn_tkeadvect , & + bl_mynn_tkebudget = bl_mynn_tkebudget , & + ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde , & + ims = ims , ime = ime , jms = jms , jme = jme , kms = kms , kme = kme , & + its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & + ) + + case default end select pbl_select !copy local arrays to MPAS grid: - call pbl_to_MPAS(diag_physics,tend_physics) -! write(0,*) '--- end subroutine driver_pbl' + call pbl_to_MPAS(diag_physics,tend_physics,its,ite) + +!write(0,*) '--- end subroutine driver_pbl.' end subroutine driver_pbl -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_driver_pbl -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_driver_radiation_lw.F b/src/core_atmosphere/physics/mpas_atmphys_driver_radiation_lw.F index c38972734f..52e841d4c4 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_driver_radiation_lw.F +++ b/src/core_atmosphere/physics/mpas_atmphys_driver_radiation_lw.F @@ -5,12 +5,11 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_driver_radiation_lw use mpas_kind_types use mpas_pool_routines use mpas_derived_types - use mpas_timer use mpas_atmphys_driver_radiation_sw, only: radconst use mpas_atmphys_constants @@ -32,60 +31,70 @@ module mpas_atmphys_driver_radiation_lw init_radiation_lw, & radiation_camlw_to_MPAS - integer,private:: i,j,k,kk,n - - -!>\brief MPAS driver for parameterization of longwave radiation codes. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_driver_radiation_lw: -!> ------------------------------------------------ -!> allocate_radiation_lw : allocate local arrays for parameterization of lw radiation codes. -!> deallocate_radiation_lw : deallocate local arrays for parameterization of lw radiation codes. -!> init_radiation_lw : initialization of individual lw radiation codes. -!> driver_radiation_lw : main driver (called from subroutine physics_driver). -!> radiation_lw_from_MPAS : initialize local arrays. -!> radiation_lw_to_MPAS : copy local arrays to MPAS arrays. -!> radiation_camlw_to_MPAS : save local arrays (absorption, emission) for CAM lw radiation code. -!> -!> WRF physics called from driver_radiation_lw: -!> -------------------------------------------- -!> * module_ra_cam : CAM long wave radiation code. -!> * module_ra_rrtmg_lw : RRTMG long wave radiation code. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * removed the pre-processor option "do_hydrostatic_pressure" before call to subroutines -!> rrtmg_lw and camrad. -!> Laura D. Fowler (birch.mmm,ucar.edu) / 2013-05-29. -!> * added structure diag in the call to subroutine init_radiation_lw. -!> Laura D. Fowler (laura@ucar.edu) / 2013-07-01. -!> * modified the call to subroutine rrtmg_lwrad to include the option of using the same ozone -!> climatology as the one used in the CAM radiation codes. -!> Laura D. Fowler (laura@ucar.edu) / 2013-07-17. -!> * in call to subroutine rrtmg_lwrad, replaced the variable g (that originally pointed to -!> gravity) with gravity, for simplicity. -!> Laura D. Fowler (laura@ucar.edu) / 2014-03-21. -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. + +!MPAS driver for parameterization of longwave radiation codes. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! +! subroutines in mpas_atmphys_driver_radiation_lw: +! ------------------------------------------------ +! allocate_radiation_lw : allocate local arrays for parameterization of lw radiation codes. +! deallocate_radiation_lw: deallocate local arrays for parameterization of lw radiation codes. +! init_radiation_lw : initialization of individual lw radiation codes. +! driver_radiation_lw : main driver (called from subroutine physics_driver). +! radiation_lw_from_MPAS : initialize local arrays. +! radiation_lw_to_MPAS : copy local arrays to MPAS arrays. +! radiation_camlw_to_MPAS: save local arrays (absorption, emission) for CAM lw radiation code. +! +! WRF physics called from driver_radiation_lw: +! -------------------------------------------- +! * module_ra_cam : CAM long wave radiation code. +! * module_ra_rrtmg_lw : RRTMG long wave radiation code. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * removed the pre-processor option "do_hydrostatic_pressure" before call to subroutines +! rrtmg_lw and camrad. +! Laura D. Fowler (birch.mmm,ucar.edu) / 2013-05-29. +! * added structure diag in the call to subroutine init_radiation_lw. +! Laura D. Fowler (laura@ucar.edu) / 2013-07-01. +! * modified the call to subroutine rrtmg_lwrad to include the option of using the same ozone +! climatology as the one used in the CAM radiation codes. +! Laura D. Fowler (laura@ucar.edu) / 2013-07-17. +! * in call to subroutine rrtmg_lwrad, replaced the variable g (that originally pointed to +! gravity) with gravity, for simplicity. +! Laura D. Fowler (laura@ucar.edu) / 2014-03-21. +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * cleaned-up the call to rrtmg_lwrad after cleaning up subroutine rrtmg_lwrad in module_ra_rrtmg_lw.F. +! Laura D. Fowler (laura@ucar.edu) / 2016-06-30. +! * added the cloud radii for cloud water, cloud ice, and snow calculated in the Thompson cloud microphysics +! scheme in the call to subroutine rrtmg_lwrad. +! Laura D. Fowler (laura@ucar.edu) / 2016-07-07. +! * added diagnostics of the effective radii for cloud water, cloud ice, and snow used in rrtmg_lwrad. +! Laura D. Fowler (laura@ucar.edu) / 2016-07-08. +! * removed qr_p, and qg_p in the call to rrtmg_lwrad since not used in the calculation of the cloud optical +! properties. +! Laura D. Fowler (laura@ucar.edu) / 2016-07-08. +! * in the call to rrtmg_lwrad, substituted the variables qv_p, qc_p, qi_p, and qs_p with qvrad_p, qcrad_p, +! qirad_p, and qsrad_p initialized in subroutine cloudiness_from_MPAS. +! Laura D. Fowler (laura@ucar.edu) / 2016-07-09. contains -!================================================================================================== +!================================================================================================================= subroutine allocate_radiation_lw(xtime_s) -!================================================================================================== +!================================================================================================================= !input arguments: real(kind=RKIND),intent(in):: xtime_s -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- if(.not.allocated(f_ice) ) allocate(f_ice(ims:ime,kms:kme,jms:jme) ) if(.not.allocated(f_rain) ) allocate(f_rain(ims:ime,kms:kme,jms:jme) ) @@ -113,6 +122,17 @@ subroutine allocate_radiation_lw(xtime_s) radiation_lw_select: select case (trim(radt_lw_scheme)) case("rrtmg_lw") + + if(.not.allocated(recloud_p) ) allocate(recloud_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(reice_p) ) allocate(reice_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(resnow_p) ) allocate(resnow_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(rrecloud_p) ) allocate(rrecloud_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(rreice_p) ) allocate(rreice_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(rresnow_p) ) allocate(rresnow_p(ims:ime,kms:kme,jms:jme) ) + + if(.not.allocated(lwdnflx_p) ) allocate(lwdnflx_p(ims:ime,kms:kme+1,jms:jme) ) + if(.not.allocated(lwdnflxc_p) ) allocate(lwdnflxc_p(ims:ime,kms:kme+1,jms:jme) ) + if(.not.allocated(lwdnflx_p) ) allocate(lwdnflx_p(ims:ime,kms:kme+1,jms:jme) ) if(.not.allocated(lwdnflxc_p) ) allocate(lwdnflxc_p(ims:ime,kms:kme+1,jms:jme) ) if(.not.allocated(lwupflx_p) ) allocate(lwupflx_p(ims:ime,kms:kme+1,jms:jme) ) @@ -169,9 +189,9 @@ subroutine allocate_radiation_lw(xtime_s) end subroutine allocate_radiation_lw -!================================================================================================== +!================================================================================================================= subroutine deallocate_radiation_lw -!================================================================================================== +!================================================================================================================= if(allocated(f_ice) ) deallocate(f_ice ) if(allocated(f_rain) ) deallocate(f_rain ) @@ -197,6 +217,13 @@ subroutine deallocate_radiation_lw radiation_lw_select: select case (trim(radt_lw_scheme)) case("rrtmg_lw") + if(allocated(recloud_p) ) deallocate(recloud_p ) + if(allocated(reice_p) ) deallocate(reice_p ) + if(allocated(resnow_p) ) deallocate(resnow_p ) + if(allocated(rrecloud_p) ) deallocate(rrecloud_p ) + if(allocated(rreice_p) ) deallocate(rreice_p ) + if(allocated(rresnow_p) ) deallocate(rresnow_p ) + if(allocated(lwdnflx_p) ) deallocate(lwdnflx_p ) if(allocated(lwdnflxc_p) ) deallocate(lwdnflxc_p ) if(allocated(lwupflx_p) ) deallocate(lwupflx_p ) @@ -242,10 +269,10 @@ subroutine deallocate_radiation_lw end subroutine deallocate_radiation_lw -!================================================================================================== +!================================================================================================================= subroutine radiation_lw_from_MPAS(xtime_s,configs,mesh,state,time_lev,diag_physics,atm_input, & - sfc_input) -!================================================================================================== + sfc_input,its,ite) +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(in):: mesh @@ -254,7 +281,10 @@ subroutine radiation_lw_from_MPAS(xtime_s,configs,mesh,state,time_lev,diag_physi type(mpas_pool_type),intent(in):: atm_input type(mpas_pool_type),intent(in):: sfc_input + integer,intent(in):: its,ite integer,intent(in):: time_lev + + real(kind=RKIND),intent(in):: xtime_s !inout arguments: @@ -268,13 +298,15 @@ subroutine radiation_lw_from_MPAS(xtime_s,configs,mesh,state,time_lev,diag_physi real(kind=RKIND),dimension(:),pointer :: m_ps,pin real(kind=RKIND),dimension(:),pointer :: sfc_albedo,sfc_emiss real(kind=RKIND),dimension(:,:),pointer :: cldfrac,m_hybi,o3clim,o3vmr + real(kind=RKIND),dimension(:,:),pointer :: re_cloud,re_ice,re_snow real(kind=RKIND),dimension(:,:,:),pointer:: aerosols,ozmixm !local variables and arrays: integer:: ncols,nlevs + integer:: i,j,k,n real(kind=RKIND),dimension(:,:),allocatable:: p2d,o32d -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_config(configs,'config_o3climatology',config_o3climatology) @@ -351,6 +383,23 @@ subroutine radiation_lw_from_MPAS(xtime_s,configs,mesh,state,time_lev,diag_physi radiation_lw_select: select case (trim(radt_lw_scheme)) case("rrtmg_lw") + call mpas_pool_get_array(diag_physics,'re_cloud',re_cloud) + call mpas_pool_get_array(diag_physics,'re_ice' ,re_ice ) + call mpas_pool_get_array(diag_physics,'re_snow' ,re_snow ) + + do j = jts,jte + do k = kts,kte + do i = its,ite + recloud_p(i,k,j) = re_cloud(k,i) + reice_p(i,k,j) = re_ice(k,i) + resnow_p(i,k,j) = re_snow(k,i) + rrecloud_p(i,k,j) = 0._RKIND + rreice_p(i,k,j) = 0._RKIND + rresnow_p(i,k,j) = 0._RKIND + enddo + enddo + enddo + do j = jts,jte do k = kts,kte+2 do i = its,ite @@ -393,7 +442,8 @@ subroutine radiation_lw_from_MPAS(xtime_s,configs,mesh,state,time_lev,diag_physi enddo enddo enddo - if(allocated(p2d)) deallocate(p2d) + if(allocated(p2d)) deallocate(p2d) + if(allocated(o32d)) deallocate(o32d) else do k = 1, num_oznLevels pin_p(k) = 0.0_RKIND @@ -440,7 +490,6 @@ subroutine radiation_lw_from_MPAS(xtime_s,configs,mesh,state,time_lev,diag_physi !and emstot_p are filled with the MPAS arrays abstot, absnxt, and emstot. If it !is a new run, these three arrays will be initialized to zero;If a restart run, !these three arrays will be filled with the restart values. - call mpas_timer_start("CAM lw: fill arrays for infrared absorption") if(xtime_s .lt. 1.e-12) then do j = jts,jte do n = 1,cam_abs_dim1 @@ -465,7 +514,6 @@ subroutine radiation_lw_from_MPAS(xtime_s,configs,mesh,state,time_lev,diag_physi enddo endif - call mpas_timer_start("CAM lw: ozone and aerosols") !ozone mixing ratio: do k = 1, num_oznlevels pin_p(k) = pin(k) @@ -499,7 +547,6 @@ subroutine radiation_lw_from_MPAS(xtime_s,configs,mesh,state,time_lev,diag_physi enddo enddo enddo - call mpas_timer_stop("CAM lw: ozone and aerosols") case default @@ -511,24 +558,28 @@ subroutine radiation_lw_from_MPAS(xtime_s,configs,mesh,state,time_lev,diag_physi end subroutine radiation_lw_from_MPAS -!================================================================================================== - subroutine radiation_lw_to_MPAS(diag_physics,tend_physics) -!================================================================================================== +!================================================================================================================= + subroutine radiation_lw_to_MPAS(diag_physics,tend_physics,its,ite) +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(inout):: diag_physics type(mpas_pool_type),intent(inout):: tend_physics + integer,intent(in):: its,ite + !local pointers: real(kind=RKIND),dimension(:),pointer :: glw,lwcf,lwdnb,lwdnbc,lwdnt,lwdntc,lwupb,lwupbc, & lwupt,lwuptc,olrtoa real(kind=RKIND),dimension(:,:),pointer:: rthratenlw + real(kind=RKIND),dimension(:,:),pointer:: rre_cloud,rre_ice,rre_snow !local variables and arrays: integer:: nlay,pcols + integer:: i,j,k real(kind=RKIND),dimension(:,:),allocatable:: p1d -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_array(diag_physics,'glw' ,glw ) call mpas_pool_get_array(diag_physics,'lwcf' ,lwcf ) @@ -566,20 +617,46 @@ subroutine radiation_lw_to_MPAS(diag_physics,tend_physics) enddo enddo + radiation_lw_select: select case (trim(radt_lw_scheme)) + + case("rrtmg_lw") + call mpas_pool_get_array(diag_physics,'rre_cloud',rre_cloud) + call mpas_pool_get_array(diag_physics,'rre_ice' ,rre_ice ) + call mpas_pool_get_array(diag_physics,'rre_snow' ,rre_snow ) + + do j = jts,jte + do k = kts,kte + do i = its,ite + rre_cloud(k,i) = rrecloud_p(i,k,j) + rre_ice(k,i) = rreice_p(i,k,j) + rre_snow(k,i) = rresnow_p(i,k,j) + enddo + enddo + enddo + + case default + + end select radiation_lw_select + end subroutine radiation_lw_to_MPAS -!================================================================================================== - subroutine radiation_camlw_to_MPAS(diag_physics) -!================================================================================================== +!================================================================================================================= + subroutine radiation_camlw_to_MPAS(diag_physics,its,ite) +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(inout):: diag_physics + integer,intent(in):: its,ite + +!local variables: + integer:: i,j,k,n + !local pointers: real(kind=RKIND),dimension(:,:),pointer :: emstot real(kind=RKIND),dimension(:,:,:),pointer:: absnxt,abstot -!--------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_array(diag_physics,'absnxt',absnxt) call mpas_pool_get_array(diag_physics,'abstot',abstot) @@ -610,9 +687,9 @@ subroutine radiation_camlw_to_MPAS(diag_physics) end subroutine radiation_camlw_to_MPAS -!================================================================================================== +!================================================================================================================= subroutine init_radiation_lw(dminfo,mesh,atm_input,diag,state,time_lev) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info),intent(in):: dminfo @@ -625,37 +702,31 @@ subroutine init_radiation_lw(dminfo,mesh,atm_input,diag,state,time_lev) type(mpas_pool_type),intent(inout),optional:: atm_input type(mpas_pool_type),intent(inout),optional:: state -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- -! write(0,*) -! write(0,*) '--- enter radiation_lw initialization:' radiation_lw_select: select case (trim(radt_lw_scheme)) case ("rrtmg_lw") -! write(0,*) ' enter subroutine rrtmg_lwinit:' call rrtmg_initlw_forMPAS(dminfo) -! write(0,*) ' end subroutine rrtmg_lwinit' case("cam_lw") -! write(0,*) ' enter subroutine camradinit:' call camradinit(dminfo,mesh,atm_input,diag,state,time_lev) -! write(0,*) ' end subroutine camradinit' case default end select radiation_lw_select -! write(0,*) '--- end radiation_lw initialization' end subroutine init_radiation_lw -!================================================================================================== +!================================================================================================================= subroutine driver_radiation_lw(xtime_s,configs,mesh,state,time_lev,diag_physics,atm_input, & - sfc_input,tend_physics) -!================================================================================================== + sfc_input,tend_physics,its,ite) +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(in):: mesh type(mpas_pool_type),intent(in):: configs + integer,intent(in):: its,ite integer,intent(in):: time_lev real(kind=RKIND),intent(in):: xtime_s @@ -674,9 +745,8 @@ subroutine driver_radiation_lw(xtime_s,configs,mesh,state,time_lev,diag_physics, integer:: o3input real(kind=RKIND):: radt,xtime_m -!-------------------------------------------------------------------------------------------------- - call mpas_timer_start("radiation_lw") -! write(0,100) +!----------------------------------------------------------------------------------------------------------------- +!write(0,100) !formats: 100 format(/,' --- enter subroutine driver_radiation_lw: ',i6) @@ -685,39 +755,36 @@ subroutine driver_radiation_lw(xtime_s,configs,mesh,state,time_lev,diag_physics, call mpas_pool_get_config(configs,'config_o3climatology',config_o3climatology) !copy MPAS arrays to local arrays: - call radiation_lw_from_MPAS(xtime_s,configs,mesh,state,time_lev,diag_physics,atm_input,sfc_input) + call radiation_lw_from_MPAS(xtime_s,configs,mesh,state,time_lev,diag_physics,atm_input,sfc_input,its,ite) !call to longwave radiation scheme: radiation_lw_select: select case (trim(radt_lw_scheme)) case ("rrtmg_lw") -! write(0,*) '--- enter subroutine rrtmg_lwrad:' o3input = 0 if(config_o3climatology) o3input = 2 call rrtmg_lwrad( & - p3d = pres_hyd_p , p8w = pres2_hyd_p , pi3d = pi_p ,& - t3d = t_p , t8w = t2_p , rho3d = rho_p ,& - rthratenlw = rthratenlw_p , lwupt = lwupt_p , lwuptc = lwuptc_p ,& - lwdnt = lwdnt_p , lwdntc = lwdntc_p , lwupb = lwupb_p ,& - lwupbc = lwupbc_p , lwdnb = lwdnb_p , lwdnbc = lwdnbc_p ,& - lwcf = lwcf_p , glw = glw_p , olr = olrtoa_p ,& - emiss = sfc_emiss_p , tsk = tsk_p , dz8w = dz_p ,& - cldfra3d = cldfrac_p , r = R_d , g = gravity ,& - icloud = icloud , warm_rain = warm_rain , f_ice_phy = f_ice ,& - f_rain_phy = f_rain , xland = xland_p , xice = xice_p ,& - snow = snow_p , qv3d = qv_p , qc3d = qc_p ,& - qr3d = qr_p , qi3d = qi_p , qs3d = qs_p ,& - qg3d = qg_p , f_qv = f_qv , f_qc = f_qc ,& - f_qr = f_qr , f_qi = f_qi , f_qs = f_qs ,& - f_qg = f_qg , o3input = o3input , noznlevels = num_oznlevels ,& - pin = pin_p , o3clim = o3clim_p , & - ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde ,& - ims = ims , ime = ime , jms = jms , jme = jme , kms = kms , kme = kme ,& - its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & + p3d = pres_hyd_p , p8w = pres2_hyd_p , pi3d = pi_p , & + t3d = t_p , t8w = t2_p , dz8w = dz_p , & +! qv3d = qv_p , qc3d = qc_p , qi3d = qi_p , & +! qs3d = qs_p , cldfra3d = cldfrac_p , tsk = tsk_p , & + qv3d = qvrad_p , qc3d = qcrad_p , qi3d = qirad_p , & + qs3d = qsrad_p , cldfra3d = cldfrac_p , tsk = tsk_p , & + emiss = sfc_emiss_p , xland = xland_p , xice = xice_p , & + snow = snow_p , icloud = icloud , o3input = o3input , & + noznlevels = num_oznlevels , pin = pin_p , o3clim = o3clim_p , & + glw = glw_p , olr = olrtoa_p , lwcf = lwcf_p , & + rthratenlw = rthratenlw_p , has_reqc = has_reqc , has_reqi = has_reqi , & + has_reqs = has_reqs , re_cloud = recloud_p , re_ice = reice_p , & + re_snow = resnow_p , rre_cloud = rrecloud_p , rre_ice = rreice_p , & + rre_snow = rresnow_p , lwupt = lwupt_p , lwuptc = lwuptc_p , & + lwdnt = lwdnt_p , lwdntc = lwdntc_p , lwupb = lwupb_p , & + lwupbc = lwupbc_p , lwdnb = lwdnb_p , lwdnbc = lwdnbc_p , & + ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde , & + ims = ims , ime = ime , jms = jms , jme = jme , kms = kms , kme = kme , & + its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & ) -! write(0,*) '--- exit subroutine rrtmg_lwrad' - case ("cam_lw") xtime_m = xtime_s/60. @@ -727,8 +794,7 @@ subroutine driver_radiation_lw(xtime_s,configs,mesh,state,time_lev,diag_physics, !... convert the radiation time_step to minutes: radt = dt_radtlw/60. - call mpas_timer_start("camrad") -! write(0,*) '--- enter subroutine camrad_lw: doabsems=',doabsems +! write(0,*) '--- enter subroutine camrad_lw: doabsems=',doabsems call camrad( dolw = .true. , dosw = .false. , & p_phy = pres_hyd_p , p8w = pres2_hyd_p , & pi_phy = pi_p , t_phy = t_p , & @@ -783,16 +849,12 @@ subroutine driver_radiation_lw(xtime_s,configs,mesh,state,time_lev,diag_physics, end select radiation_lw_select !copy local arrays to MPAS grid: - call radiation_lw_to_MPAS(diag_physics,tend_physics) + call radiation_lw_to_MPAS(diag_physics,tend_physics,its,ite) -! write(0,*) '--- end subroutine driver_radiation_lw' - call mpas_timer_stop("radiation_lw") - -!formats: - 200 format(i3,i3,8(1x,e15.8)) +!write(0,*) '--- end subroutine driver_radiation_lw.' end subroutine driver_radiation_lw -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_driver_radiation_lw -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_driver_radiation_sw.F b/src/core_atmosphere/physics/mpas_atmphys_driver_radiation_sw.F index b81ccf7ad0..d04c90f701 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_driver_radiation_sw.F +++ b/src/core_atmosphere/physics/mpas_atmphys_driver_radiation_sw.F @@ -5,12 +5,11 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_driver_radiation_sw use mpas_kind_types use mpas_derived_types use mpas_pool_routines - use mpas_timer use mpas_atmphys_constants use mpas_atmphys_manager, only: gmt,curr_julday,julday,year @@ -30,60 +29,67 @@ module mpas_atmphys_driver_radiation_sw init_radiation_sw, & radconst - integer,private:: i,j,k,kk,n - -!>\brief MPAS driver for parameterization of shortwave radiation codes. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_driver_radiation_sw: -!> ------------------------------------------------ -!> allocate_radiation_sw : allocate local arrays for parameterization of sw radiation codes. -!> deallocate_radiation_sw : deallocate local arrays for parameterization of sw radiation codes. -!> init_radiation_sw : initialization of individual sw radiation codes. -!> driver_radiation_sw : main driver (called from subroutine physics_driver). -!> radiation_sw_from_MPAS : initialize local arrays. -!> radiation_sw_to_MPAS : copy local arrays to MPAS arrays. -!> radconst : calculate solar declination,... -!> -!> WRF physics called from driver_radiation_sw: -!> -------------------------------------------- -!> * module_ra_cam : CAM short wave radiation code. -!> * module_ra_rrtmg_sw : RRTMG short wave radiation code. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * removed the pre-processor option "do_hydrostatic_pressure" before call to subroutines -!> rrtmg_sw and camrad. -!> Laura D. Fowler (laura@ucar.edu) / 2013-05-29. -!> * added structure diag in the call to subroutine init_radiation_sw and call to subroutine -!> camradinit for initialization of variable mxaerl. -!> Laura D. Fowler (laura@ucar.edu) / 2013-07-01. -!> * modified the call to subroutine rrtmg_swrad to include the option of using the same ozone -!> climatology as the one used in the CAM radiation codes. -!> Laura D. Fowler (laura@ucar.edu) / 2013-07-17. -!> * in call to subroutine rrtmg_swrad, replaced the variable g (that originally pointed to -!> gravity) with gravity, for simplicity. -!> Laura D. Fowler (laura@ucar.edu) / 2014-03-21. -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +!MPAS driver for parameterization of shortwave radiation codes. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_driver_radiation_sw: +! ------------------------------------------------ +! allocate_radiation_sw : allocate local arrays for parameterization of sw radiation codes. +! deallocate_radiation_sw: deallocate local arrays for parameterization of sw radiation codes. +! init_radiation_sw : initialization of individual sw radiation codes. +! driver_radiation_sw : main driver (called from subroutine physics_driver). +! radiation_sw_from_MPAS : initialize local arrays. +! radiation_sw_to_MPAS : copy local arrays to MPAS arrays. +! radconst : calculate solar declination,... +! +! WRF physics called from driver_radiation_sw: +! -------------------------------------------- +! * module_ra_cam : CAM short wave radiation code. +! * module_ra_rrtmg_sw : RRTMG short wave radiation code. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * removed the pre-processor option "do_hydrostatic_pressure" before call to subroutines +! rrtmg_sw and camrad. +! Laura D. Fowler (laura@ucar.edu) / 2013-05-29. +! * added structure diag in the call to subroutine init_radiation_sw and call to subroutine +! camradinit for initialization of variable mxaerl. +! Laura D. Fowler (laura@ucar.edu) / 2013-07-01. +! * modified the call to subroutine rrtmg_swrad to include the option of using the same ozone +! climatology as the one used in the CAM radiation codes. +! Laura D. Fowler (laura@ucar.edu) / 2013-07-17. +! * in call to subroutine rrtmg_swrad, replaced the variable g (that originally pointed to +! gravity) with gravity, for simplicity. +! Laura D. Fowler (laura@ucar.edu) / 2014-03-21. +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * cleaned-up the call to rrtmg_swrad after cleaning up subroutine rrtmg_swrad in module_ra_rrtmg_sw.F. +! Laura D. Fowler (laura@ucar.edu) / 2016-07-05. +! * added the cloud radii for cloud water, cloud ice, and snow calculated in the Thompson cloud microphysics +! scheme in the call to subroutine rrtmg_swrad. +! Laura D. Fowler (laura@ucar.edu) / 2016-07-07. +! * removed qr_p, and qg_p in the call to rrtmg_swrad since not used in the calculation of the cloud optical +! properties. +! Laura D. Fowler (laura@ucar.edu) / 2016-07-08. +! * in the call to rrtmg_swrad, substituted the variables qv_p, qc_p, qi_p, and qs_p with qvrad_p, qcrad_p, +! qirad_p, and qsrad_p initialized in subroutine cloudiness_from_MPAS. +! Laura D. Fowler (laura@ucar.edu) / 2016-07-09. contains -!================================================================================================== +!================================================================================================================= subroutine allocate_radiation_sw(xtime_s) -!================================================================================================== +!================================================================================================================= !input arguments: real(kind=RKIND),intent(in):: xtime_s -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- if(.not.allocated(f_ice) ) allocate(f_ice(ims:ime,kms:kme,jms:jme) ) if(.not.allocated(f_rain) ) allocate(f_rain(ims:ime,kms:kme,jms:jme) ) @@ -114,6 +120,10 @@ subroutine allocate_radiation_sw(xtime_s) radiation_sw_select: select case (trim(radt_sw_scheme)) case("rrtmg_sw") + if(.not.allocated(recloud_p) ) allocate(recloud_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(reice_p) ) allocate(reice_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(resnow_p) ) allocate(resnow_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(alswvisdir_p) ) allocate(alswvisdir_p(ims:ime,jms:jme) ) if(.not.allocated(alswvisdif_p) ) allocate(alswvisdif_p(ims:ime,jms:jme) ) if(.not.allocated(alswnirdir_p) ) allocate(alswnirdir_p(ims:ime,jms:jme) ) @@ -179,9 +189,9 @@ subroutine allocate_radiation_sw(xtime_s) end subroutine allocate_radiation_sw -!================================================================================================== +!================================================================================================================= subroutine deallocate_radiation_sw -!================================================================================================== +!================================================================================================================= if(allocated(f_ice) ) deallocate(f_ice ) if(allocated(f_rain) ) deallocate(f_rain ) @@ -209,6 +219,10 @@ subroutine deallocate_radiation_sw radiation_sw_select: select case (trim(radt_sw_scheme)) case("rrtmg_sw") + if(allocated(recloud_p) ) deallocate(recloud_p ) + if(allocated(reice_p) ) deallocate(reice_p ) + if(allocated(resnow_p) ) deallocate(resnow_p ) + if(allocated(alswvisdir_p) ) deallocate(alswvisdir_p ) if(allocated(alswvisdif_p) ) deallocate(alswvisdif_p ) if(allocated(alswnirdir_p) ) deallocate(alswnirdir_p ) @@ -259,10 +273,10 @@ subroutine deallocate_radiation_sw end subroutine deallocate_radiation_sw -!================================================================================================== +!================================================================================================================= subroutine radiation_sw_from_MPAS(configs,mesh,state,time_lev,diag_physics,atm_input, & - sfc_input,xtime_s) -!================================================================================================== + sfc_input,xtime_s,its,ite) +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(in):: mesh @@ -272,9 +286,14 @@ subroutine radiation_sw_from_MPAS(configs,mesh,state,time_lev,diag_physics,atm_i type(mpas_pool_type),intent(in):: atm_input type(mpas_pool_type),intent(in):: sfc_input + integer,intent(in):: its,ite integer,intent(in):: time_lev + real(kind=RKIND),intent(in):: xtime_s +!local variables: + integer:: i,j,k,n + !local pointers: logical,pointer:: config_o3climatology @@ -283,9 +302,10 @@ subroutine radiation_sw_from_MPAS(configs,mesh,state,time_lev,diag_physics,atm_i real(kind=RKIND),dimension(:),pointer :: m_ps,pin real(kind=RKIND),dimension(:),pointer :: sfc_albedo,sfc_emiss real(kind=RKIND),dimension(:,:),pointer :: cldfrac,m_hybi,o3clim + real(kind=RKIND),dimension(:,:),pointer :: re_cloud,re_ice,re_snow real(kind=RKIND),dimension(:,:,:),pointer:: aerosols,ozmixm -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_config(configs,'config_o3climatology',config_o3climatology) @@ -364,6 +384,20 @@ subroutine radiation_sw_from_MPAS(configs,mesh,state,time_lev,diag_physics,atm_i radiation_sw_select: select case (trim(radt_sw_scheme)) case("rrtmg_sw") + call mpas_pool_get_array(diag_physics,'re_cloud',re_cloud) + call mpas_pool_get_array(diag_physics,'re_ice' ,re_ice ) + call mpas_pool_get_array(diag_physics,'re_snow' ,re_snow ) + + do j = jts,jte + do k = kts,kte + do i = its,ite + recloud_p(i,k,j) = re_cloud(k,i) + reice_p(i,k,j) = re_ice(k,i) + resnow_p(i,k,j) = re_snow(k,i) + enddo + enddo + enddo + do j = jts,jte do k = kts,kte+2 do i = its,ite @@ -494,21 +528,26 @@ subroutine radiation_sw_from_MPAS(configs,mesh,state,time_lev,diag_physics,atm_i end subroutine radiation_sw_from_MPAS -!================================================================================================== - subroutine radiation_sw_to_MPAS(diag_physics,tend_physics) -!================================================================================================== +!================================================================================================================= + subroutine radiation_sw_to_MPAS(diag_physics,tend_physics,its,ite) +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(inout):: diag_physics type(mpas_pool_type),intent(inout):: tend_physics + integer,intent(in):: its,ite + +!local variables: + integer:: i,j,k,n + !local pointers: real(kind=RKIND),dimension(:),pointer :: coszr,gsw,swcf,swdnb,swdnbc,swdnt,swdntc, & swupb,swupbc,swupt,swuptc !real(kind=RKIND),dimension(:,:),pointer:: swdnflx,swdnflxc,swupflx,swupflxc real(kind=RKIND),dimension(:,:),pointer:: rthratensw -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_array(diag_physics,'coszr' ,coszr ) call mpas_pool_get_array(diag_physics,'gsw' ,gsw ) @@ -562,9 +601,9 @@ subroutine radiation_sw_to_MPAS(diag_physics,tend_physics) end subroutine radiation_sw_to_MPAS -!================================================================================================== +!================================================================================================================= subroutine init_radiation_sw(dminfo,mesh,atm_input,diag,state,time_lev) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info), intent(in):: dminfo @@ -577,42 +616,36 @@ subroutine init_radiation_sw(dminfo,mesh,atm_input,diag,state,time_lev) type(mpas_pool_type),intent(inout),optional:: atm_input type(mpas_pool_type),intent(inout),optional:: state -!-------------------------------------------------------------------------------------------------- -! write(0,*) -! write(0,*) '--- enter radiation_sw initialization:' +!----------------------------------------------------------------------------------------------------------------- !call to shortwave radiation scheme: radiation_sw_select: select case (trim(radt_sw_scheme)) case ("rrtmg_sw") -! write(0,*) ' enter subroutine rrtmg_swinit:' call rrtmg_initsw_forMPAS(dminfo) -! write(0,*) ' end subroutine rrtmg_swinit' case("cam_sw") -! write(0,*) ' enter subroutine camradinit:' call camradinit(dminfo,mesh,atm_input,diag,state,time_lev) -! write(0,*) ' end subroutine camradinit' case default end select radiation_sw_select -! write(0,*) '--- end radiation_sw initialization' - end subroutine init_radiation_sw -!================================================================================================== +!================================================================================================================= subroutine driver_radiation_sw(itimestep,configs,mesh,state,time_lev,diag_physics,atm_input, & - sfc_input,tend_physics,xtime_s) -!================================================================================================== + sfc_input,tend_physics,xtime_s,its,ite) +!================================================================================================================= !input arguments: integer,intent(in):: itimestep type(mpas_pool_type),intent(in):: configs type(mpas_pool_type),intent(in):: mesh + integer,intent(in):: its,ite integer,intent(in):: time_lev + real(kind=RKIND),intent(in):: xtime_s !inout arguments: @@ -630,8 +663,8 @@ subroutine driver_radiation_sw(itimestep,configs,mesh,state,time_lev,diag_physic integer:: o3input real(kind=RKIND):: radt,xtime_m -!-------------------------------------------------------------------------------------------------- -! write(0,100) itimestep +!----------------------------------------------------------------------------------------------------------------- +!write(0,100) itimestep call mpas_pool_get_config(configs,'config_o3climatology',config_o3climatology) @@ -649,12 +682,12 @@ subroutine driver_radiation_sw(itimestep,configs,mesh,state,time_lev,diag_physic xtime_m = xtime_s/60. !copy MPAS arrays to local arrays: - call radiation_sw_from_MPAS(configs,mesh,state,time_lev,diag_physics,atm_input,sfc_input,xtime_s) + call radiation_sw_from_MPAS(configs,mesh,state,time_lev,diag_physics,atm_input,sfc_input,xtime_s,its,ite) !... calculates solar declination: !call radconst(declin,solcon,julday,degrad,dpd) call radconst(declin,solcon,curr_julday,degrad,dpd) -! write(0,101) itimestep,year,julday,gmt,xtime_m,curr_julday,solcon,declin +!write(0,101) itimestep,year,julday,gmt,xtime_m,curr_julday,solcon,declin !... convert the radiation time_step to minutes: radt = dt_radtsw/60. @@ -663,44 +696,34 @@ subroutine driver_radiation_sw(itimestep,configs,mesh,state,time_lev,diag_physic radiation_sw_select: select case (trim(radt_sw_scheme)) case ("rrtmg_sw") - -! write(0,*) '--- enter subroutine rrtmg_swrad:' o3input = 0 if(config_o3climatology) o3input = 2 call rrtmg_swrad( & - p3d = pres_hyd_p , p8w = pres2_hyd_p , pi3d = pi_p ,& - t3d = t_p , t8w = t2_p , rho3d = rho_p ,& - rthratensw = rthratensw_p , swupt = swupt_p , swuptc = swuptc_p ,& - swdnt = swdnt_p , swdntc = swdntc_p , swupb = swupb_p ,& - swupbc = swupbc_p , swdnb = swdnb_p , swdnbc = swdnbc_p ,& - swcf = swcf_p , gsw = gsw_p , xtime = xtime_m ,& - gmt = gmt , xlat = xlat_p , xlong = xlon_p ,& - radt = radt , degrad = degrad , declin = declin ,& - coszr = coszr_p , julday = julday , solcon = solcon ,& - albedo = sfc_albedo_p , tsk = tsk_p , dz8w = dz_p ,& - cldfra3d = cldfrac_p , r = R_d , g = gravity ,& - icloud = icloud , warm_rain = warm_rain , f_ice_phy = f_ice ,& - f_rain_phy = f_rain , xland = xland_p , xice = xice_p ,& - snow = snow_p , qv3d = qv_p , qc3d = qc_p ,& - qr3d = qr_p , qi3d = qi_p , qs3d = qs_p ,& - qg3d = qg_p , f_qv = f_qv , f_qc = f_qc ,& - f_qr = f_qr , f_qi = f_qi , f_qs = f_qs ,& - f_qg = f_qg , alswvisdir = alswvisdir_p , alswvisdif = alswvisdif_p ,& - alswnirdir = alswnirdir_p , alswnirdif = alswnirdif_p , swvisdir = swvisdir_p ,& - swvisdif = swvisdif_p , swnirdir = swnirdir_p , swnirdif = swnirdif_p ,& - o3input = o3input , noznlevels = num_oznlevels , pin = pin_p ,& - o3clim = o3clim_p , sf_surface_physics = sf_surface_physics ,& - !end optional arguments. - ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde ,& - ims = ims , ime = ime , jms = jms , jme = jme , kms = kms , kme = kme ,& - its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & + p3d = pres_hyd_p , p8w = pres2_hyd_p , pi3d = pi_p , & + t3d = t_p , t8w = t2_p , dz8w = dz_p , & +! qv3d = qv_p , qc3d = qc_p , qi3d = qi_p , & +! qs3d = qs_p , cldfra3d = cldfrac_p , tsk = tsk_p , & + qv3d = qvrad_p , qc3d = qcrad_p , qi3d = qirad_p , & + qs3d = qsrad_p , cldfra3d = cldfrac_p , tsk = tsk_p , & + albedo = sfc_albedo_p , xland = xland_p , xice = xice_p , & + snow = snow_p , coszr = coszr_p , xtime = xtime_m , & + gmt = gmt , julday = julday , radt = radt , & + degrad = degrad , declin = declin , solcon = solcon , & + xlat = xlat_p , xlong = xlon_p , icloud = icloud , & + o3input = o3input , noznlevels = num_oznlevels , pin = pin_p , & + o3clim = o3clim_p , gsw = gsw_p , swcf = swcf_p , & + rthratensw = rthratensw_p , has_reqc = has_reqc , has_reqi = has_reqi , & + has_reqs = has_reqs , re_cloud = recloud_p , re_ice = reice_p , & + re_snow = resnow_p , swupt = swupt_p , swuptc = swuptc_p , & + swdnt = swdnt_p , swdntc = swdntc_p , swupb = swupb_p , & + swupbc = swupbc_p , swdnb = swdnb_p , swdnbc = swdnbc_p , & + ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde , & + ims = ims , ime = ime , jms = jms , jme = jme , kms = kms , kme = kme , & + its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & ) -! write(0,*) '--- exit subroutine rrtmg_swrad' case ("cam_sw") - -! write(0,*) '--- enter subroutine camrad_sw:' call camrad( dolw = .false. , dosw = .true. , & p_phy = pres_hyd_p , p8w = pres2_hyd_p , & pi_phy = pi_p , t_phy = t_p , & @@ -755,18 +778,15 @@ subroutine driver_radiation_sw(itimestep,configs,mesh,state,time_lev,diag_physic end select radiation_sw_select !copy local arrays to MPAS grid: - call radiation_sw_to_MPAS(diag_physics,tend_physics) - -! write(0,*) '--- end subroutine driver_radiation_sw' + call radiation_sw_to_MPAS(diag_physics,tend_physics,its,ite) -!formats: - 200 format(i3,i6,8(1x,e15.8)) +!write(0,*) '--- end subroutine driver_radiation_sw.' end subroutine driver_radiation_sw -!================================================================================================== +!================================================================================================================= subroutine radconst(declin,solcon,julian,degrad,dpd) -!================================================================================================== +!================================================================================================================= !input arguments: !integer,intent(in):: julian @@ -779,7 +799,7 @@ subroutine radconst(declin,solcon,julian,degrad,dpd) !local variables: real(kind=RKIND):: obecl,sinob,sxlong,arg,decdeg,djul,rjul,eccfac -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- declin=0. solcon=0. @@ -808,6 +828,6 @@ subroutine radconst(declin,solcon,julian,degrad,dpd) end subroutine radconst -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_driver_radiation_sw -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_driver_sfclayer.F b/src/core_atmosphere/physics/mpas_atmphys_driver_sfclayer.F index 5dd78f4143..fc2832db12 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_driver_sfclayer.F +++ b/src/core_atmosphere/physics/mpas_atmphys_driver_sfclayer.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_driver_sfclayer use mpas_kind_types use mpas_derived_types @@ -15,6 +15,7 @@ module mpas_atmphys_driver_sfclayer use mpas_atmphys_vars !wrf physics: + use module_sf_mynn use module_sf_sfclay implicit none @@ -30,177 +31,333 @@ module mpas_atmphys_driver_sfclayer integer,parameter,private:: scm_force_flux = 0 !SCM surface forcing by surface fluxes. !0=no 1=yes (WRF single column model option only). - integer,private:: i,j - - -!>\brief MPAS driver for parameterization of the surface layer. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_driver_sfclayer: -!> -------------------------------------------- -!> allocate_sfclayer : allocate local arrays for parameterization of surface layer. -!> deallocate_sfclayer : deallocate local arrays for parameterization of surface layer. -!> init_sfclayer : initialization of individual surface layer schemes. -!> driver_sfclayer : main driver (called from subroutine physics_driver). -!> sfclayer_from_MPAS : initialize local arrays. -!> sfclayer_to_MPAS : copy local arrays to MPAS arrays. -!> -!> WRF physics called from driver_sfclayer: -!> ---------------------------------------- -!> * module_sf_sfclay: Monin-Obukhov surface layer scheme. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * removed the pre-processor option "do_hydrostatic_pressure" before call to the subroutine -!> sfclay. -!> Laura D. Fowler (laura@ucar.edu) / 2013-05-29. -!> * updated the definition of the horizontal resolution to the actual mean distance between -!> cell centers. -!> Laura D. Fowler (laura@ucar.edu) / 2013-08-23. -!> * in call to subroutine sfclay, replaced the variable g (that originally pointed to gravity) -!> with gravity, for simplicity. -!> Laura D. Fowler (laura@ucar.edu) / 2014-03-21. -!> * in subroutine sfclayer_from_MPAS, added initialization of ustm, cd, cda, ck, and cka. in -!> subroutine sfclayer_to_MPAS, filled diag_physics%ustm with ustm_p after call to subroutine -!> sfclay. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-16. -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. -!> * added initialization of local logical "allowed_to read" in subroutine init_sfclayer. This -!> logical is actually not used in subroutine sfclayinit. -!> Laura D. Fowler (laura@ucar.edu) / 2014-09-25. - +!MPAS driver for parameterization of the surface layer. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_driver_sfclayer: +! -------------------------------------------- +! allocate_sfclayer : allocate local arrays for parameterization of surface layer. +! deallocate_sfclayer : deallocate local arrays for parameterization of surface layer. +! init_sfclayer : initialization of individual surface layer schemes. +! driver_sfclayer : main driver (called from subroutine physics_driver). +! sfclayer_from_MPAS : initialize local arrays. +! sfclayer_to_MPAS : copy local arrays to MPAS arrays. +! +! WRF physics called from driver_sfclayer: +! ---------------------------------------- +! * module_sf_sfclay: Monin-Obukhov surface layer scheme. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * removed the pre-processor option "do_hydrostatic_pressure" before call to the subroutine sfclay. +! Laura D. Fowler (laura@ucar.edu) / 2013-05-29. +! * updated the definition of the horizontal resolution to the actual mean distance between cell centers. +! Laura D. Fowler (laura@ucar.edu) / 2013-08-23. +! * in call to subroutine sfclay, replaced the variable g (that originally pointed to gravity) +! with gravity, for simplicity. +! Laura D. Fowler (laura@ucar.edu) / 2014-03-21. +! * in subroutine sfclayer_from_MPAS, added initialization of ustm, cd, cda, ck, and cka. in +! subroutine sfclayer_to_MPAS, filled diag_physics%ustm with ustm_p after call to subroutine sfclay. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-16. +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * added initialization of local logical "allowed_to read" in subroutine init_sfclayer. This logical +! is actually not used in subroutine sfclayinit. +! Laura D. Fowler (laura@ucar.edu) / 2014-09-25. +! * renamed "monin_obukhov" with "sf_monin_obukhov". +! Laura D. Fowler (laura@ucar.edu) / 2016-03-25. +! * added the implementation of the MYNN surface layer scheme from WRF 3.6.1. +! Laura D. Fowler (laura@ucar.edu) / 2016-03-30. +! * added the calculation of surface layer variables over seaice cells when config_frac_seaice is set to true. +! Laura D. Fowler (laura@ucar.edu) / 2016-10-03. +! * changed the definition of dx_p to match that used in other physics parameterizations. +! parameterizations. +! Laura D. Fowler (laura@ucar.edu) / 2016-10-18. contains -!================================================================================================== - subroutine allocate_sfclayer -!================================================================================================== - - if(.not.allocated(dx_p) ) allocate(dx_p(ims:ime,jms:jme) ) - if(.not.allocated(br_p) ) allocate(br_p(ims:ime,jms:jme) ) - if(.not.allocated(cd_p) ) allocate(cd_p(ims:ime,jms:jme) ) - if(.not.allocated(cda_p) ) allocate(cda_p(ims:ime,jms:jme) ) - if(.not.allocated(chs_p) ) allocate(chs_p(ims:ime,jms:jme) ) - if(.not.allocated(chs2_p) ) allocate(chs2_p(ims:ime,jms:jme) ) - if(.not.allocated(ck_p) ) allocate(ck_p(ims:ime,jms:jme) ) - if(.not.allocated(cka_p) ) allocate(cka_p(ims:ime,jms:jme) ) - if(.not.allocated(cpm_p) ) allocate(cpm_p(ims:ime,jms:jme) ) - if(.not.allocated(cqs2_p) ) allocate(cqs2_p(ims:ime,jms:jme) ) - if(.not.allocated(gz1oz0_p) ) allocate(gz1oz0_p(ims:ime,jms:jme) ) - if(.not.allocated(flhc_p) ) allocate(flhc_p(ims:ime,jms:jme) ) - if(.not.allocated(flqc_p) ) allocate(flqc_p(ims:ime,jms:jme) ) - if(.not.allocated(fh_p) ) allocate(fh_p(ims:ime,jms:jme) ) - if(.not.allocated(fm_p) ) allocate(fm_p(ims:ime,jms:jme) ) - if(.not.allocated(hfx_p) ) allocate(hfx_p(ims:ime,jms:jme) ) - if(.not.allocated(hpbl_p) ) allocate(hpbl_p(ims:ime,jms:jme) ) - if(.not.allocated(lh_p) ) allocate(lh_p(ims:ime,jms:jme) ) - if(.not.allocated(mavail_p) ) allocate(mavail_p(ims:ime,jms:jme) ) - if(.not.allocated(mol_p) ) allocate(mol_p(ims:ime,jms:jme) ) - if(.not.allocated(psih_p) ) allocate(psih_p(ims:ime,jms:jme) ) - if(.not.allocated(psim_p) ) allocate(psim_p(ims:ime,jms:jme) ) - if(.not.allocated(q2_p) ) allocate(q2_p(ims:ime,jms:jme) ) - if(.not.allocated(qfx_p) ) allocate(qfx_p(ims:ime,jms:jme) ) - if(.not.allocated(qgh_p) ) allocate(qgh_p(ims:ime,jms:jme) ) - if(.not.allocated(qsfc_p) ) allocate(qsfc_p(ims:ime,jms:jme) ) - if(.not.allocated(regime_p) ) allocate(regime_p(ims:ime,jms:jme) ) - if(.not.allocated(rmol_p) ) allocate(rmol_p(ims:ime,jms:jme) ) - if(.not.allocated(t2m_p) ) allocate(t2m_p(ims:ime,jms:jme) ) - if(.not.allocated(tsk_p) ) allocate(tsk_p(ims:ime,jms:jme) ) - if(.not.allocated(th2m_p) ) allocate(th2m_p(ims:ime,jms:jme) ) - if(.not.allocated(u10_p) ) allocate(u10_p(ims:ime,jms:jme) ) - if(.not.allocated(ust_p) ) allocate(ust_p(ims:ime,jms:jme) ) - if(.not.allocated(ustm_p) ) allocate(ustm_p(ims:ime,jms:jme) ) - if(.not.allocated(v10_p) ) allocate(v10_p(ims:ime,jms:jme) ) - if(.not.allocated(wspd_p) ) allocate(wspd_p(ims:ime,jms:jme) ) - if(.not.allocated(xland_p) ) allocate(xland_p(ims:ime,jms:jme) ) - if(.not.allocated(zol_p) ) allocate(zol_p(ims:ime,jms:jme) ) - if(.not.allocated(znt_p) ) allocate(znt_p(ims:ime,jms:jme) ) +!================================================================================================================= + subroutine allocate_sfclayer(config_frac_seaice) +!================================================================================================================= + + logical,intent(in):: config_frac_seaice +!----------------------------------------------------------------------------------------------------------------- + + if(.not.allocated(dx_p) ) allocate(dx_p(ims:ime,jms:jme) ) + if(.not.allocated(br_p) ) allocate(br_p(ims:ime,jms:jme) ) + if(.not.allocated(cd_p) ) allocate(cd_p(ims:ime,jms:jme) ) + if(.not.allocated(cda_p) ) allocate(cda_p(ims:ime,jms:jme) ) + if(.not.allocated(chs_p) ) allocate(chs_p(ims:ime,jms:jme) ) + if(.not.allocated(chs2_p) ) allocate(chs2_p(ims:ime,jms:jme) ) + if(.not.allocated(ck_p) ) allocate(ck_p(ims:ime,jms:jme) ) + if(.not.allocated(cka_p) ) allocate(cka_p(ims:ime,jms:jme) ) + if(.not.allocated(cpm_p) ) allocate(cpm_p(ims:ime,jms:jme) ) + if(.not.allocated(cqs2_p) ) allocate(cqs2_p(ims:ime,jms:jme) ) + if(.not.allocated(gz1oz0_p)) allocate(gz1oz0_p(ims:ime,jms:jme)) + if(.not.allocated(flhc_p) ) allocate(flhc_p(ims:ime,jms:jme) ) + if(.not.allocated(flqc_p) ) allocate(flqc_p(ims:ime,jms:jme) ) + if(.not.allocated(hfx_p) ) allocate(hfx_p(ims:ime,jms:jme) ) + if(.not.allocated(hpbl_p) ) allocate(hpbl_p(ims:ime,jms:jme) ) + if(.not.allocated(lh_p) ) allocate(lh_p(ims:ime,jms:jme) ) + if(.not.allocated(mavail_p)) allocate(mavail_p(ims:ime,jms:jme)) + if(.not.allocated(mol_p) ) allocate(mol_p(ims:ime,jms:jme) ) + if(.not.allocated(psih_p) ) allocate(psih_p(ims:ime,jms:jme) ) + if(.not.allocated(psim_p) ) allocate(psim_p(ims:ime,jms:jme) ) + if(.not.allocated(q2_p) ) allocate(q2_p(ims:ime,jms:jme) ) + if(.not.allocated(qfx_p) ) allocate(qfx_p(ims:ime,jms:jme) ) + if(.not.allocated(qgh_p) ) allocate(qgh_p(ims:ime,jms:jme) ) + if(.not.allocated(qsfc_p) ) allocate(qsfc_p(ims:ime,jms:jme) ) + if(.not.allocated(regime_p)) allocate(regime_p(ims:ime,jms:jme)) + if(.not.allocated(rmol_p) ) allocate(rmol_p(ims:ime,jms:jme) ) + if(.not.allocated(t2m_p) ) allocate(t2m_p(ims:ime,jms:jme) ) + if(.not.allocated(tsk_p) ) allocate(tsk_p(ims:ime,jms:jme) ) + if(.not.allocated(th2m_p) ) allocate(th2m_p(ims:ime,jms:jme) ) + if(.not.allocated(u10_p) ) allocate(u10_p(ims:ime,jms:jme) ) + if(.not.allocated(ust_p) ) allocate(ust_p(ims:ime,jms:jme) ) + if(.not.allocated(ustm_p) ) allocate(ustm_p(ims:ime,jms:jme) ) + if(.not.allocated(v10_p) ) allocate(v10_p(ims:ime,jms:jme) ) + if(.not.allocated(wspd_p) ) allocate(wspd_p(ims:ime,jms:jme) ) + if(.not.allocated(xland_p) ) allocate(xland_p(ims:ime,jms:jme) ) + if(.not.allocated(zol_p) ) allocate(zol_p(ims:ime,jms:jme) ) + if(.not.allocated(znt_p) ) allocate(znt_p(ims:ime,jms:jme) ) + + if(config_frac_seaice) then + if(.not.allocated(sst_p) ) allocate(sst_p(ims:ime,jms:jme) ) + if(.not.allocated(xice_p) ) allocate(xice_p(ims:ime,jms:jme) ) + + if(.not.allocated(br_sea) ) allocate(br_sea(ims:ime,jms:jme) ) + if(.not.allocated(chs_sea) ) allocate(chs_sea(ims:ime,jms:jme) ) + if(.not.allocated(chs2_sea) ) allocate(chs2_sea(ims:ime,jms:jme) ) + if(.not.allocated(cqs2_sea) ) allocate(cqs2_sea(ims:ime,jms:jme) ) + if(.not.allocated(cpm_sea) ) allocate(cpm_sea(ims:ime,jms:jme) ) + if(.not.allocated(flhc_sea) ) allocate(flhc_sea(ims:ime,jms:jme) ) + if(.not.allocated(flqc_sea) ) allocate(flqc_sea(ims:ime,jms:jme) ) + if(.not.allocated(gz1oz0_sea) ) allocate(gz1oz0_sea(ims:ime,jms:jme) ) + if(.not.allocated(hfx_sea) ) allocate(hfx_sea(ims:ime,jms:jme) ) + if(.not.allocated(qfx_sea) ) allocate(qfx_sea(ims:ime,jms:jme) ) + if(.not.allocated(mavail_sea) ) allocate(mavail_sea(ims:ime,jms:jme) ) + if(.not.allocated(mol_sea) ) allocate(mol_sea(ims:ime,jms:jme) ) + if(.not.allocated(lh_sea) ) allocate(lh_sea(ims:ime,jms:jme) ) + if(.not.allocated(psih_sea) ) allocate(psih_sea(ims:ime,jms:jme) ) + if(.not.allocated(psim_sea) ) allocate(psim_sea(ims:ime,jms:jme) ) + if(.not.allocated(qgh_sea) ) allocate(qgh_sea(ims:ime,jms:jme) ) + if(.not.allocated(qsfc_sea) ) allocate(qsfc_sea(ims:ime,jms:jme) ) + if(.not.allocated(regime_sea) ) allocate(regime_sea(ims:ime,jms:jme) ) + if(.not.allocated(rmol_sea) ) allocate(rmol_sea(ims:ime,jms:jme) ) + if(.not.allocated(tsk_sea) ) allocate(tsk_sea(ims:ime,jms:jme) ) + if(.not.allocated(ust_sea) ) allocate(ust_sea(ims:ime,jms:jme) ) + if(.not.allocated(ustm_sea) ) allocate(ustm_sea(ims:ime,jms:jme) ) + if(.not.allocated(wspd_sea) ) allocate(wspd_sea(ims:ime,jms:jme) ) + if(.not.allocated(xland_sea) ) allocate(xland_sea(ims:ime,jms:jme) ) + if(.not.allocated(zol_sea) ) allocate(zol_sea(ims:ime,jms:jme) ) + if(.not.allocated(znt_sea) ) allocate(znt_sea(ims:ime,jms:jme) ) + + if(.not.allocated(cd_sea) ) allocate(cd_sea(ims:ime,jms:jme) ) + if(.not.allocated(cda_sea) ) allocate(cda_sea(ims:ime,jms:jme) ) + if(.not.allocated(ck_sea) ) allocate(ck_sea(ims:ime,jms:jme) ) + if(.not.allocated(cka_sea) ) allocate(cka_sea(ims:ime,jms:jme) ) + if(.not.allocated(t2m_sea) ) allocate(t2m_sea(ims:ime,jms:jme) ) + if(.not.allocated(th2m_sea) ) allocate(th2m_sea(ims:ime,jms:jme) ) + if(.not.allocated(q2_sea) ) allocate(q2_sea(ims:ime,jms:jme) ) + if(.not.allocated(u10_sea) ) allocate(u10_sea(ims:ime,jms:jme) ) + if(.not.allocated(v10_sea) ) allocate(v10_sea(ims:ime,jms:jme) ) + + if(.not.allocated(regime_hold)) allocate(regime_hold(ims:ime,jms:jme)) + endif + + sfclayer_select: select case (trim(sfclayer_scheme)) + + case("sf_monin_obukhov") + if(.not.allocated(fh_p)) allocate(fh_p(ims:ime,jms:jme)) + if(.not.allocated(fm_p)) allocate(fm_p(ims:ime,jms:jme)) + if(config_frac_seaice) then + if(.not.allocated(fh_sea)) allocate(fh_sea(ims:ime,jms:jme)) + if(.not.allocated(fm_sea)) allocate(fm_sea(ims:ime,jms:jme)) + endif + + case("sf_mynn") + if(.not.allocated(snowh_p)) allocate(snowh_p(ims:ime,jms:jme)) + if(.not.allocated(ch_p) ) allocate(ch_p(ims:ime,jms:jme) ) + if(.not.allocated(qcg_p) ) allocate(qcg_p(ims:ime,jms:jme) ) + if(config_frac_seaice) then + if(.not.allocated(ch_sea)) allocate(ch_sea(ims:ime,jms:jme)) + endif + + if(.not.allocated(cov_p) ) allocate(cov_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(qsq_p) ) allocate(qsq_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(tsq_p) ) allocate(tsq_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(sh3d_p) ) allocate(sh3d_p(ims:ime,kms:kme,jms:jme) ) + if(.not.allocated(elpbl_p) ) allocate(elpbl_p(ims:ime,kms:kme,jms:jme) ) + + case default + + end select sfclayer_select end subroutine allocate_sfclayer -!================================================================================================== - subroutine deallocate_sfclayer -!================================================================================================== - - if(allocated(dx_p) ) deallocate(dx_p ) - if(allocated(br_p) ) deallocate(br_p ) - if(allocated(cd_p) ) deallocate(cd_p ) - if(allocated(cda_p) ) deallocate(cda_p ) - if(allocated(chs_p) ) deallocate(chs_p ) - if(allocated(chs2_p) ) deallocate(chs2_p ) - if(allocated(ck_p) ) deallocate(ck_p ) - if(allocated(cka_p) ) deallocate(cka_p ) - if(allocated(cpm_p) ) deallocate(cpm_p ) - if(allocated(cqs2_p) ) deallocate(cqs2_p ) - if(allocated(gz1oz0_p) ) deallocate(gz1oz0_p ) - if(allocated(flhc_p) ) deallocate(flhc_p ) - if(allocated(flqc_p) ) deallocate(flqc_p ) - if(allocated(fh_p) ) deallocate(fh_p ) - if(allocated(fm_p) ) deallocate(fm_p ) - if(allocated(hfx_p) ) deallocate(hfx_p ) - if(allocated(hpbl_p) ) deallocate(hpbl_p ) - if(allocated(lh_p) ) deallocate(lh_p ) - if(allocated(mavail_p) ) deallocate(mavail_p ) - if(allocated(mol_p) ) deallocate(mol_p ) - if(allocated(psih_p) ) deallocate(psih_p ) - if(allocated(psim_p) ) deallocate(psim_p ) - if(allocated(q2_p) ) deallocate(q2_p ) - if(allocated(qfx_p) ) deallocate(qfx_p ) - if(allocated(qgh_p) ) deallocate(qgh_p ) - if(allocated(qsfc_p) ) deallocate(qsfc_p ) - if(allocated(regime_p) ) deallocate(regime_p ) - if(allocated(rmol_p) ) deallocate(rmol_p ) - if(allocated(t2m_p) ) deallocate(t2m_p ) - if(allocated(tsk_p) ) deallocate(tsk_p ) - if(allocated(th2m_p) ) deallocate(th2m_p ) - if(allocated(u10_p) ) deallocate(u10_p ) - if(allocated(ust_p) ) deallocate(ust_p ) - if(allocated(ustm_p) ) deallocate(ustm_p ) - if(allocated(v10_p) ) deallocate(v10_p ) - if(allocated(wspd_p) ) deallocate(wspd_p ) - if(allocated(xland_p) ) deallocate(xland_p ) - if(allocated(zol_p) ) deallocate(zol_p ) - if(allocated(znt_p) ) deallocate(znt_p ) +!================================================================================================================= + subroutine deallocate_sfclayer(config_frac_seaice) +!================================================================================================================= + + logical,intent(in):: config_frac_seaice +!----------------------------------------------------------------------------------------------------------------- + + if(allocated(dx_p) ) deallocate(dx_p ) + if(allocated(br_p) ) deallocate(br_p ) + if(allocated(cd_p) ) deallocate(cd_p ) + if(allocated(cda_p) ) deallocate(cda_p ) + if(allocated(chs_p) ) deallocate(chs_p ) + if(allocated(chs2_p) ) deallocate(chs2_p ) + if(allocated(ck_p) ) deallocate(ck_p ) + if(allocated(cka_p) ) deallocate(cka_p ) + if(allocated(cpm_p) ) deallocate(cpm_p ) + if(allocated(cqs2_p) ) deallocate(cqs2_p ) + if(allocated(gz1oz0_p)) deallocate(gz1oz0_p) + if(allocated(flhc_p) ) deallocate(flhc_p ) + if(allocated(flqc_p) ) deallocate(flqc_p ) + if(allocated(hfx_p) ) deallocate(hfx_p ) + if(allocated(hpbl_p) ) deallocate(hpbl_p ) + if(allocated(lh_p) ) deallocate(lh_p ) + if(allocated(mavail_p)) deallocate(mavail_p) + if(allocated(mol_p) ) deallocate(mol_p ) + if(allocated(psih_p) ) deallocate(psih_p ) + if(allocated(psim_p) ) deallocate(psim_p ) + if(allocated(q2_p) ) deallocate(q2_p ) + if(allocated(qfx_p) ) deallocate(qfx_p ) + if(allocated(qgh_p) ) deallocate(qgh_p ) + if(allocated(qsfc_p) ) deallocate(qsfc_p ) + if(allocated(regime_p)) deallocate(regime_p) + if(allocated(rmol_p) ) deallocate(rmol_p ) + if(allocated(t2m_p) ) deallocate(t2m_p ) + if(allocated(tsk_p) ) deallocate(tsk_p ) + if(allocated(th2m_p) ) deallocate(th2m_p ) + if(allocated(u10_p) ) deallocate(u10_p ) + if(allocated(ust_p) ) deallocate(ust_p ) + if(allocated(ustm_p) ) deallocate(ustm_p ) + if(allocated(v10_p) ) deallocate(v10_p ) + if(allocated(wspd_p) ) deallocate(wspd_p ) + if(allocated(xland_p) ) deallocate(xland_p ) + if(allocated(zol_p) ) deallocate(zol_p ) + if(allocated(znt_p) ) deallocate(znt_p ) + + if(config_frac_seaice) then + if(allocated(sst_p) ) deallocate(sst_p ) + if(allocated(xice_p) ) deallocate(xice_p ) + + if(allocated(br_sea) ) deallocate(br_sea ) + if(allocated(flhc_p) ) deallocate(flhc_sea ) + if(allocated(flqc_p) ) deallocate(flqc_sea ) + if(allocated(gz1oz0_sea) ) deallocate(gz1oz0_sea ) + if(allocated(mol_sea) ) deallocate(mol_sea ) + if(allocated(psih_sea) ) deallocate(psih_sea ) + if(allocated(psim_sea) ) deallocate(psim_sea ) + if(allocated(rmol_sea) ) deallocate(rmol_sea ) + if(allocated(ust_sea) ) deallocate(ust_sea ) + if(allocated(ustm_sea) ) deallocate(ustm_sea ) + if(allocated(wspd_sea) ) deallocate(wspd_sea ) + if(allocated(zol_sea) ) deallocate(zol_sea ) + if(allocated(cd_sea) ) deallocate(cd_sea ) + if(allocated(cda_sea) ) deallocate(cda_sea ) + if(allocated(ck_sea) ) deallocate(ck_sea ) + if(allocated(cka_sea) ) deallocate(cka_sea ) + if(allocated(t2m_sea) ) deallocate(t2m_sea ) + if(allocated(th2m_sea) ) deallocate(th2m_sea ) + if(allocated(q2_sea) ) deallocate(q2_sea ) + if(allocated(u10_sea) ) deallocate(u10_sea ) + if(allocated(v10_sea) ) deallocate(v10_sea ) + if(allocated(regime_hold)) deallocate(regime_hold) + + if(allocated(mavail_sea) ) deallocate(mavail_sea ) + if(allocated(tsk_sea) ) deallocate(tsk_sea ) + if(allocated(xland_sea) ) deallocate(xland_sea ) + if(allocated(znt_sea) ) deallocate(znt_sea ) + endif + + sfclayer_select: select case (trim(sfclayer_scheme)) + + case("sf_monin_obukhov") + if(allocated(fh_p)) deallocate(fh_p) + if(allocated(fm_p)) deallocate(fm_p) + if(config_frac_seaice) then + if(allocated(fh_sea)) deallocate(fh_sea) + if(allocated(fm_sea)) deallocate(fm_sea) + endif + + case("sf_mynn") + if(allocated(snowh_p)) deallocate(snowh_p) + if(allocated(ch_p) ) deallocate(ch_p ) + if(allocated(qcg_p) ) deallocate(qcg_p ) + if(config_frac_seaice) then + if(allocated(ch_sea)) deallocate(ch_sea) + endif + + if(allocated(cov_p) ) deallocate(cov_p ) + if(allocated(qsq_p) ) deallocate(qsq_p ) + if(allocated(tsq_p) ) deallocate(tsq_p ) + if(allocated(sh3d_p) ) deallocate(sh3d_p ) + if(allocated(elpbl_p) ) deallocate(elpbl_p ) + + case default + + end select sfclayer_select end subroutine deallocate_sfclayer -!================================================================================================== - subroutine sfclayer_from_MPAS(mesh,diag_physics,sfc_input) -!================================================================================================== +!================================================================================================================= + subroutine sfclayer_from_MPAS(configs,mesh,diag_physics,sfc_input,its,ite) +!================================================================================================================= !input arguments: + type(mpas_pool_type),intent(in):: configs type(mpas_pool_type),intent(in):: mesh type(mpas_pool_type),intent(in):: sfc_input type(mpas_pool_type),intent(inout):: diag_physics + integer,intent(in):: its,ite + +!local variables: + integer:: i,j,k + !local pointers: - real(kind=RKIND),dimension(:),pointer:: skintemp,xland - real(kind=RKIND),dimension(:),pointer:: dcEdge_m,hpbl,mavail - real(kind=RKIND),dimension(:),pointer:: br,cpm,chs,chs2,cqs2,fh,fm,flhc,flqc,gz1oz0,hfx, & - qfx,qgh,qsfc,lh,mol,psim,psih,regime,rmol,ust,ustm, & + logical,pointer:: config_frac_seaice + + real(kind=RKIND),pointer:: len_disp + real(kind=RKIND),dimension(:),pointer:: meshDensity + real(kind=RKIND),dimension(:),pointer:: skintemp,sst,xice,xland + real(kind=RKIND),dimension(:),pointer:: hpbl,mavail + real(kind=RKIND),dimension(:),pointer:: br,cpm,chs,chs2,cqs2,flhc,flqc,gz1oz0,hfx,qfx, & + qgh,qsfc,lh,mol,psim,psih,regime,rmol,ust,ustm, & wspd,znt,zol -!-------------------------------------------------------------------------------------------------- +!local pointers specific to monin_obukhov: + real(kind=RKIND),dimension(:),pointer:: fh,fm + +!local pointers specific to mynn: + real(kind=RKIND),dimension(:),pointer:: ch,qcg,snowh + real(kind=RKIND),dimension(:,:),pointer:: cov,el_pbl,qsq,sh3d,tsq + +!----------------------------------------------------------------------------------------------------------------- + +!input variables: + call mpas_pool_get_config(configs,'config_len_disp' ,len_disp) + call mpas_pool_get_config(configs,'config_frac_seaice',config_frac_seaice) + call mpas_pool_get_array(mesh,'meshDensity',meshDensity) - call mpas_pool_get_array(sfc_input ,'skintemp',skintemp) - call mpas_pool_get_array(sfc_input ,'xland' ,xland ) - call mpas_pool_get_array(diag_physics,'dcEdge_m',dcEdge_m) call mpas_pool_get_array(diag_physics,'hpbl' ,hpbl ) call mpas_pool_get_array(diag_physics,'mavail' ,mavail ) + call mpas_pool_get_array(sfc_input ,'skintemp',skintemp) + call mpas_pool_get_array(sfc_input ,'xland' ,xland ) + +!inout variables: call mpas_pool_get_array(diag_physics,'br' ,br ) call mpas_pool_get_array(diag_physics,'cpm' ,cpm ) call mpas_pool_get_array(diag_physics,'chs' ,chs ) call mpas_pool_get_array(diag_physics,'chs2' ,chs2 ) call mpas_pool_get_array(diag_physics,'cqs2' ,cqs2 ) - call mpas_pool_get_array(diag_physics,'fh' ,fh ) - call mpas_pool_get_array(diag_physics,'fm' ,fm ) call mpas_pool_get_array(diag_physics,'flhc' ,flhc ) call mpas_pool_get_array(diag_physics,'flqc' ,flqc ) call mpas_pool_get_array(diag_physics,'gz1oz0' ,gz1oz0 ) @@ -210,8 +367,8 @@ subroutine sfclayer_from_MPAS(mesh,diag_physics,sfc_input) call mpas_pool_get_array(diag_physics,'qsfc' ,qsfc ) call mpas_pool_get_array(diag_physics,'lh' ,lh ) call mpas_pool_get_array(diag_physics,'mol' ,mol ) - call mpas_pool_get_array(diag_physics,'psim' ,psim ) call mpas_pool_get_array(diag_physics,'psih' ,psih ) + call mpas_pool_get_array(diag_physics,'psim' ,psim ) call mpas_pool_get_array(diag_physics,'regime' ,regime ) call mpas_pool_get_array(diag_physics,'rmol' ,rmol ) call mpas_pool_get_array(diag_physics,'ust' ,ust ) @@ -223,109 +380,265 @@ subroutine sfclayer_from_MPAS(mesh,diag_physics,sfc_input) do j = jts,jte do i = its,ite !input variables: - dx_p(i,j) = dcEdge_m(i) + dx_p(i,j) = len_disp / meshDensity(i)**0.25 hpbl_p(i,j) = hpbl(i) mavail_p(i,j) = mavail(i) tsk_p(i,j) = skintemp(i) - xland_p(i,j) = xland(i) + xland_p(i,j) = xland(i) + !inout variables: br_p(i,j) = br(i) cpm_p(i,j) = cpm(i) chs_p(i,j) = chs(i) chs2_p(i,j) = chs2(i) cqs2_p(i,j) = cqs2(i) - fh_p(i,j) = fh(i) - fm_p(i,j) = fm(i) flhc_p(i,j) = flhc(i) flqc_p(i,j) = flqc(i) gz1oz0_p(i,j) = gz1oz0(i) hfx_p(i,j) = hfx(i) qfx_p(i,j) = qfx(i) qgh_p(i,j) = qgh(i) - qsfc_p(i,j) = qsfc(i) + qsfc_p(i,j) = qsfc(i) lh_p(i,j) = lh(i) - mol_p(i,j) = mol(i) + mol_p(i,j) = mol(i) psim_p(i,j) = psim(i) psih_p(i,j) = psih(i) regime_p(i,j) = regime(i) rmol_p(i,j) = rmol(i) ust_p(i,j) = ust(i) - ustm_p(i,j) = ustm(i) wspd_p(i,j) = wspd(i) - znt_p(i,j) = znt(i) - zol_p(i,j) = zol(i) - !output variables: - cd_p(i,j) = 0._RKIND - cda_p(i,j) = 0._RKIND - ck_p(i,j) = 0._RKIND - cka_p(i,j) = 0._RKIND + znt_p(i,j) = znt(i) + zol_p(i,j) = zol(i) + !output variables: q2_p(i,j) = 0._RKIND t2m_p(i,j) = 0._RKIND th2m_p(i,j) = 0._RKIND u10_p(i,j) = 0._RKIND v10_p(i,j) = 0._RKIND + + !output variables (optional): + cd_p(i,j) = 0._RKIND + cda_p(i,j) = 0._RKIND + ck_p(i,j) = 0._RKIND + cka_p(i,j) = 0._RKIND + ustm_p(i,j) = ustm(i) enddo enddo + if(config_frac_seaice) then + call mpas_pool_get_array(sfc_input,'sst' ,sst) + call mpas_pool_get_array(sfc_input,'xice',xice) + do j = jts,jte + do i = its,ite + sst_p(i,j) = sst(i) + xice_p(i,j) = xice(i) + + !input variables: + mavail_sea(i,j) = mavail(i) + tsk_sea(i,j) = skintemp(i) + xland_sea(i,j) = xland(i) + !inout variables: + br_sea(i,j) = br(i) + cpm_sea(i,j) = cpm(i) + chs_sea(i,j) = chs(i) + chs2_sea(i,j) = chs2(i) + cqs2_sea(i,j) = cqs2(i) + flhc_sea(i,j) = flhc(i) + flqc_sea(i,j) = flqc(i) + gz1oz0_sea(i,j) = gz1oz0(i) + lh_sea(i,j) = lh(i) + hfx_sea(i,j) = hfx(i) + qfx_sea(i,j) = qfx(i) + mol_sea(i,j) = mol(i) + psim_sea(i,j) = psim(i) + psih_sea(i,j) = psih(i) + qgh_sea(i,j) = qgh(i) + rmol_sea(i,j) = rmol(i) + regime_sea(i,j) = regime(i) + ust_sea(i,j) = ust(i) + ustm_sea(i,j) = ustm(i) + wspd_sea(i,j) = wspd(i) + zol_sea(i,j) = zol(i) + znt_sea(i,j) = znt(i) + regime_hold(i,j) = regime(i) + !output variables: + cd_sea(i,j) = 0._RKIND + cda_sea(i,j) = 0._RKIND + ck_sea(i,j) = 0._RKIND + cka_sea(i,j) = 0._RKIND + qsfc_sea(i,j) = 0._RKIND + q2_sea(i,j) = 0._RKIND + t2m_sea(i,j) = 0._RKIND + th2m_sea(i,j) = 0._RKIND + u10_sea(i,j) = 0._RKIND + v10_sea(i,j) = 0._RKIND + + !overwrite some local variables for sea-ice cells: + if(xice_p(i,j).ge.xice_threshold .and. xice_p(i,j).le.1._RKIND) then + xland_sea(i,j) = 2._RKIND + mavail_sea(i,j) = 1._RKIND + znt_sea(i,j) = 0.0001_RKIND + tsk_sea(i,j) = max(sst_p(i,j),271.4_RKIND) + else + xland_sea(i,j) = xland_p(i,j) + mavail_sea(i,j) = mavail_p(i,j) + znt_sea(i,j) = znt_p(i,j) + tsk_sea(i,j) = tsk_p(i,j) + endif + enddo + enddo + endif + + sfclayer_select: select case (trim(sfclayer_scheme)) + + case("sf_monin_obukhov") + call mpas_pool_get_array(diag_physics,'fh',fh) + call mpas_pool_get_array(diag_physics,'fm',fm) + + do j = jts,jte + do i = its,ite + fh_p(i,j) = fh(i) + fm_p(i,j) = fm(i) + if(config_frac_seaice) then + fh_sea(i,j) = fh(i) + fm_sea(i,j) = fm(i) + endif + enddo + enddo + + case("sf_mynn") + !input variables: + call mpas_pool_get_array(diag_physics,'qcg' ,qcg ) + call mpas_pool_get_array(sfc_input ,'snowh' ,snowh ) + call mpas_pool_get_array(diag_physics,'cov' ,cov ) + call mpas_pool_get_array(diag_physics,'el_pbl',el_pbl) + call mpas_pool_get_array(diag_physics,'qsq' ,qsq ) + call mpas_pool_get_array(diag_physics,'sh3d' ,sh3d ) + call mpas_pool_get_array(diag_physics,'tsq' ,tsq ) + !inout variables: + call mpas_pool_get_array(diag_physics,'ch',ch ) + + do j = jts,jte + do i = its,ite + !input variables: + snowh_p(i,j) = snowh(i) + qcg_p(i,j) = qcg(i) + !inout variables: + ch_p(i,j) = ch(i) + if(config_frac_seaice) then + ch_sea(i,j) = ch(i) + endif + enddo + enddo + + do j = jts,jte + do k = kts,kte + do i = its,ite + !input variables: + cov_p(i,k,j) = cov(k,i) + qsq_p(i,k,j) = qsq(k,i) + tsq_p(i,k,j) = tsq(k,i) + sh3d_p(i,k,j) = sh3d(k,i) + elpbl_p(i,k,j) = el_pbl(k,i) + enddo + enddo + enddo + + case default + + end select sfclayer_select + end subroutine sfclayer_from_MPAS -!================================================================================================== - subroutine sfclayer_to_MPAS(diag_physics) -!================================================================================================== +!================================================================================================================= + subroutine sfclayer_to_MPAS(configs,sfc_input,diag_physics,its,ite) +!================================================================================================================= + +!input arguments: + type(mpas_pool_type),intent(in):: configs + type(mpas_pool_type),intent(in):: sfc_input + integer,intent(in):: its,ite !inout arguments: type(mpas_pool_type),intent(inout):: diag_physics +!local variables: + integer:: i,j + !local pointers: - real(kind=RKIND),dimension(:),pointer:: br,cpm,chs,chs2,cqs2,fh,fm,flhc,flqc,gz1oz0,hfx, & - qfx,qgh,qsfc,lh,mol,psim,psih,regime,rmol,ust,ustm, & - wspd,znt,zol - real(kind=RKIND),dimension(:),pointer:: q2,t2m,th2m,u10,v10 + logical,pointer:: config_frac_seaice -!-------------------------------------------------------------------------------------------------- - - call mpas_pool_get_array(diag_physics,'br' , br ) - call mpas_pool_get_array(diag_physics,'cpm' , cpm ) - call mpas_pool_get_array(diag_physics,'chs' , chs ) - call mpas_pool_get_array(diag_physics,'chs2' , chs2 ) - call mpas_pool_get_array(diag_physics,'cqs2' , cqs2 ) - call mpas_pool_get_array(diag_physics,'fh' , fh ) - call mpas_pool_get_array(diag_physics,'fm' , fm ) - call mpas_pool_get_array(diag_physics,'flhc' , flhc ) - call mpas_pool_get_array(diag_physics,'flqc' , flqc ) - call mpas_pool_get_array(diag_physics,'gz1oz0', gz1oz0) - call mpas_pool_get_array(diag_physics,'hfx' , hfx ) - call mpas_pool_get_array(diag_physics,'qfx' , qfx ) - call mpas_pool_get_array(diag_physics,'qgh' , qgh ) - call mpas_pool_get_array(diag_physics,'qsfc' , qsfc ) - call mpas_pool_get_array(diag_physics,'lh' , lh ) - call mpas_pool_get_array(diag_physics,'mol' , mol ) - call mpas_pool_get_array(diag_physics,'psim' , psim ) - call mpas_pool_get_array(diag_physics,'psih' , psih ) - call mpas_pool_get_array(diag_physics,'regime', regime) - call mpas_pool_get_array(diag_physics,'rmol' , rmol ) - call mpas_pool_get_array(diag_physics,'ust' , ust ) - call mpas_pool_get_array(diag_physics,'ustm' , ustm ) - call mpas_pool_get_array(diag_physics,'wspd' , wspd ) - call mpas_pool_get_array(diag_physics,'znt' , znt ) - call mpas_pool_get_array(diag_physics,'zol' , zol ) - - call mpas_pool_get_array(diag_physics,'q2' , q2 ) - call mpas_pool_get_array(diag_physics,'t2m' , t2m ) - call mpas_pool_get_array(diag_physics,'th2m' , th2m ) - call mpas_pool_get_array(diag_physics,'u10' , u10 ) - call mpas_pool_get_array(diag_physics,'v10' , v10 ) + real(kind=RKIND),dimension(:),pointer:: br,cpm,chs,chs2,cqs2,flhc,flqc,gz1oz0,hfx,qfx, & + qgh,qsfc,lh,mol,psim,psih,regime,rmol,ust,wspd, & + znt,zol + real(kind=RKIND),dimension(:),pointer:: q2,t2m,th2m,u10,v10 + real(kind=RKIND),dimension(:),pointer:: cd,cda,ck,cka,ustm + real(kind=RKIND),dimension(:),pointer:: xice + +!local pointers specific to monin_obukhov: + real(kind=RKIND),dimension(:),pointer:: fh,fm + +!local pointers specific to mynn: + real(kind=RKIND),dimension(:),pointer:: ch,qcg + real(kind=RKIND),dimension(:,:),pointer:: cov,el_pbl,qsq,sh3d,tsq + +!----------------------------------------------------------------------------------------------------------------- + + call mpas_pool_get_config(configs,'config_frac_seaice',config_frac_seaice) + +!inout variables: + call mpas_pool_get_array(diag_physics,'br' ,br ) + call mpas_pool_get_array(diag_physics,'cpm' ,cpm ) + call mpas_pool_get_array(diag_physics,'chs' ,chs ) + call mpas_pool_get_array(diag_physics,'chs2' ,chs2 ) + call mpas_pool_get_array(diag_physics,'cqs2' ,cqs2 ) + call mpas_pool_get_array(diag_physics,'flhc' ,flhc ) + call mpas_pool_get_array(diag_physics,'flqc' ,flqc ) + call mpas_pool_get_array(diag_physics,'gz1oz0',gz1oz0) + call mpas_pool_get_array(diag_physics,'hfx' ,hfx ) + call mpas_pool_get_array(diag_physics,'qfx' ,qfx ) + call mpas_pool_get_array(diag_physics,'qgh' ,qgh ) + call mpas_pool_get_array(diag_physics,'qsfc' ,qsfc ) + call mpas_pool_get_array(diag_physics,'lh' ,lh ) + call mpas_pool_get_array(diag_physics,'mol' ,mol ) + call mpas_pool_get_array(diag_physics,'psih' ,psih ) + call mpas_pool_get_array(diag_physics,'psim' ,psim ) + call mpas_pool_get_array(diag_physics,'regime',regime) + call mpas_pool_get_array(diag_physics,'rmol' ,rmol ) + call mpas_pool_get_array(diag_physics,'ust' ,ust ) + call mpas_pool_get_array(diag_physics,'wspd' ,wspd ) + call mpas_pool_get_array(diag_physics,'znt' ,znt ) + call mpas_pool_get_array(diag_physics,'zol' ,zol ) + +!output variables: + call mpas_pool_get_array(diag_physics,'q2' ,q2 ) + call mpas_pool_get_array(diag_physics,'t2m' ,t2m ) + call mpas_pool_get_array(diag_physics,'th2m' ,th2m ) + call mpas_pool_get_array(diag_physics,'u10' ,u10 ) + call mpas_pool_get_array(diag_physics,'v10' ,v10 ) + +!output variables (optional): + call mpas_pool_get_array(diag_physics,'cd' ,cd ) + call mpas_pool_get_array(diag_physics,'cda' ,cda ) + call mpas_pool_get_array(diag_physics,'ck' ,ck ) + call mpas_pool_get_array(diag_physics,'cka' ,cka ) + call mpas_pool_get_array(diag_physics,'ustm' ,ustm ) + +!output variables (optional): + call mpas_pool_get_array(diag_physics,'cd' ,cd ) + call mpas_pool_get_array(diag_physics,'cda' ,cda ) + call mpas_pool_get_array(diag_physics,'ck' ,ck ) + call mpas_pool_get_array(diag_physics,'cka' ,cka ) do j = jts,jte do i = its,ite + !inout variables: br(i) = br_p(i,j) cpm(i) = cpm_p(i,j) chs(i) = chs_p(i,j) chs2(i) = chs2_p(i,j) cqs2(i) = cqs2_p(i,j) - fh(i) = fh_p(i,j) - fm(i) = fm_p(i,j) flhc(i) = flhc_p(i,j) flqc(i) = flqc_p(i,j) gz1oz0(i) = gz1oz0_p(i,j) @@ -340,73 +653,174 @@ subroutine sfclayer_to_MPAS(diag_physics) regime(i) = regime_p(i,j) rmol(i) = rmol_p(i,j) ust(i) = ust_p(i,j) - ustm(i) = ustm_p(i,j) wspd(i) = wspd_p(i,j) zol(i) = zol_p(i,j) znt(i) = znt_p(i,j) - !diagnostics: + !output variables: q2(i) = q2_p(i,j) t2m(i) = t2m_p(i,j) th2m(i) = th2m_p(i,j) u10(i) = u10_p(i,j) v10(i) = v10_p(i,j) + !output variables (optional): + cd(i) = cd_p(i,j) + cda(i) = cda_p(i,j) + ck(i) = ck_p(i,j) + cka(i) = cka_p(i,j) + ustm(i) = ustm_p(i,j) enddo enddo + if(config_frac_seaice) then + call mpas_pool_get_array(sfc_input,'xice',xice) + do j = jts,jte + do i = its,ite + if(xice(i).ge.xice_threshold .and. xice(i).le.1._RKIND) then + br(i) = br_p(i,j)*xice(i) + (1._RKIND-xice(i))*br_sea(i,j) + flhc(i) = flhc_p(i,j)*xice(i) + (1._RKIND-xice(i))*flhc_sea(i,j) + flqc(i) = flqc_p(i,j)*xice(i) + (1._RKIND-xice(i))*flqc_sea(i,j) + gz1oz0(i) = gz1oz0_p(i,j)*xice(i) + (1._RKIND-xice(i))*gz1oz0_sea(i,j) + mol(i) = mol_p(i,j)*xice(i) + (1._RKIND-xice(i))*mol_sea(i,j) + psih(i) = psih_p(i,j)*xice(i) + (1._RKIND-xice(i))*psih_sea(i,j) + psim(i) = psim_p(i,j)*xice(i) + (1._RKIND-xice(i))*psim_sea(i,j) + rmol(i) = rmol_p(i,j)*xice(i) + (1._RKIND-xice(i))*rmol_sea(i,j) + ust(i) = ust_p(i,j)*xice(i) + (1._RKIND-xice(i))*ust_sea(i,j) + wspd(i) = wspd_p(i,j)*xice(i) + (1._RKIND-xice(i))*wspd_sea(i,j) + zol(i) = zol_p(i,j)*xice(i) + (1._RKIND-xice(i))*zol_sea(i,j) + if(xice(i) .ge. 0.5_RKIND) regime(i) = regime_hold(i,j) + !output variables: + q2(i) = q2_p(i,j)*xice(i) + (1._RKIND-xice(i))*q2_sea(i,j) + t2m(i) = t2m_p(i,j)*xice(i) + (1._RKIND-xice(i))*t2m_sea(i,j) + th2m(i) = th2m_p(i,j)*xice(i) + (1._RKIND-xice(i))*th2m_sea(i,j) + u10(i) = u10_p(i,j)*xice(i) + (1._RKIND-xice(i))*u10_sea(i,j) + v10(i) = v10_p(i,j)*xice(i) + (1._RKIND-xice(i))*v10_sea(i,j) + !output variables (optional): + cd(i) = cd_p(i,j)*xice(i) + (1._RKIND-xice(i))*cd_sea(i,j) + cda(i) = cda_p(i,j)*xice(i) + (1._RKIND-xice(i))*cda_sea(i,j) + ck(i) = ck_p(i,j)*xice(i) + (1._RKIND-xice(i))*ck_sea(i,j) + cka(i) = cka_p(i,j)*xice(i) + (1._RKIND-xice(i))*cka_sea(i,j) + ustm(i) = ustm_p(i,j)*xice(i) + (1._RKIND-xice(i))*ustm_sea(i,j) + endif + enddo + enddo + endif + + sfclayer_select: select case (trim(sfclayer_scheme)) + + case("sf_monin_obukhov") + call mpas_pool_get_array(diag_physics,'fh',fh) + call mpas_pool_get_array(diag_physics,'fm',fm) + + do j = jts,jte + do i = its,ite + fh(i) = fh_p(i,j) + fm(i) = fm_p(i,j) + enddo + enddo + if(config_frac_seaice) then + call mpas_pool_get_array(sfc_input,'xice',xice) + do j = jts,jte + do i = its,ite + if(xice(i).ge.xice_threshold .and. xice(i).le.1._RKIND) then + fh(i) = fh_p(i,j)*xice(i) + (1._RKIND-xice(i))*fh_sea(i,j) + fm(i) = fm_p(i,j)*xice(i) + (1._RKIND-xice(i))*fm_sea(i,j) + endif + enddo + enddo + endif + + case("sf_mynn") + call mpas_pool_get_array(diag_physics,'ch',ch) + + do j = jts,jte + do i = its,ite + ch(i) = ch_p(i,j) + enddo + enddo + if(config_frac_seaice) then + call mpas_pool_get_array(sfc_input,'xice',xice) + do j = jts,jte + do i = its,ite + if(xice(i).ge.xice_threshold .and. xice(i).le.1._RKIND) then + ch(i) = ch_p(i,j)*xice(i) + (1._RKIND-xice(i))*ch_sea(i,j) + endif + enddo + enddo + endif + + case default + + end select sfclayer_select + end subroutine sfclayer_to_MPAS -!================================================================================================== +!================================================================================================================= subroutine init_sfclayer -!================================================================================================== +!================================================================================================================= !local variables: logical, parameter:: allowed_to_read = .false. !actually not used in subroutine sfclayinit. -!-------------------------------------------------------------------------------------------------- -! write(0,*) -! write(0,*) '--- enter sfclayer_initialization:' +!----------------------------------------------------------------------------------------------------------------- + sfclayer_select: select case (trim(sfclayer_scheme)) - case("monin_obukhov") -! write(0,*) ' enter monin_obukhov initialization:' + case("sf_monin_obukhov") call sfclayinit(allowed_to_read) -! write(0,*) ' end monin_obukhov initialization' + + case("sf_mynn") + call mynn_sf_init_driver(allowed_to_read) case default end select sfclayer_select -! write(0,*) '--- end sfclayer_initialization' + end subroutine init_sfclayer -!================================================================================================== - subroutine driver_sfclayer(mesh,diag_physics,sfc_input) -!================================================================================================== +!================================================================================================================= + subroutine driver_sfclayer(itimestep,configs,mesh,diag_physics,sfc_input,its,ite) +!================================================================================================================= !input and inout arguments: type(mpas_pool_type),intent(in):: mesh + type(mpas_pool_type),intent(in):: configs type(mpas_pool_type),intent(in):: sfc_input + + integer,intent(in):: its,ite + integer,intent(in):: itimestep + +!inout arguments: type(mpas_pool_type),intent(inout):: diag_physics !local pointers: + logical,pointer:: config_do_restart,config_frac_seaice real(kind=RKIND),dimension(:),pointer:: areaCell !local variables: + integer:: initflag real(kind=RKIND):: dx -!-------------------------------------------------------------------------------------------------- -! write(0,*) -! write(0,*) '--- enter subroutine driver_sfclayer:' +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine driver_sfclayer:' + + call mpas_pool_get_config(configs,'config_do_restart' ,config_do_restart ) + call mpas_pool_get_config(configs,'config_frac_seaice',config_frac_seaice) call mpas_pool_get_array(mesh,'areaCell',areaCell) !copy all MPAS arrays to rectanguler grid: - call sfclayer_from_MPAS(mesh,diag_physics,sfc_input) + call sfclayer_from_MPAS(configs,mesh,diag_physics,sfc_input,its,ite) + + dx = sqrt(maxval(areaCell)) + + initflag = 1 + if(config_do_restart .or. itimestep > 1) initflag = 0 sfclayer_select: select case (trim(sfclayer_scheme)) - case("monin_obukhov") - dx = sqrt(maxval(areaCell)) + case("sf_monin_obukhov") call sfclay( & p3d = pres_hyd_p , psfc = psfc_p , t3d = t_p , & u3d = u_p , v3d = v_p , qv3d = qv_p , & @@ -435,18 +849,114 @@ subroutine driver_sfclayer(mesh,diag_physics,sfc_input) ims = ims , ime = ime , jms = jms , jme = jme , kms = kms , kme = kme , & its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & ) - + + if(config_frac_seaice) then + call sfclay( & + p3d = pres_hyd_p , psfc = psfc_p , t3d = t_p , & + u3d = u_p , v3d = v_p , qv3d = qv_p , & + dz8w = dz_p , cp = cp , g = gravity , & + rovcp = rcp , R = R_d , xlv = xlv , & + chs = chs_sea , chs2 = chs2_sea , cqs2 = cqs2_sea , & + cpm = cpm_sea , znt = znt_sea , ust = ust_sea , & + pblh = hpbl_p , mavail = mavail_sea , zol = zol_sea , & + mol = mol_sea , regime = regime_sea , psim = psim_sea , & + psih = psih_sea , fm = fm_sea , fh = fh_sea , & + xland = xland_sea , hfx = hfx_sea , qfx = qfx_sea , & + lh = lh_sea , tsk = tsk_sea , flhc = flhc_sea , & + flqc = flqc_sea , qgh = qgh_sea , qsfc = qsfc_sea , & + rmol = rmol_sea , u10 = u10_sea , v10 = v10_sea , & + th2 = th2m_sea , t2 = t2m_sea , q2 = q2_sea , & + gz1oz0 = gz1oz0_sea , wspd = wspd_sea , br = br_sea , & + isfflx = isfflx , dx = dx , svp1 = svp1 , & + svp2 = svp2 , svp3 = svp3 , svpt0 = svpt0 , & + ep1 = ep_1 , ep2 = ep_2 , karman = karman , & + eomeg = eomeg , stbolt = stbolt , P1000mb = P0 , & + dxCell = dx_p , ustm = ustm_sea , ck = ck_sea , & + cka = cka_sea , cd = cd_sea , cda = cda_sea , & + isftcflx = isftcflx , iz0tlnd = iz0tlnd , & + scm_force_flux = scm_force_flux , & + ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde , & + ims = ims , ime = ime , jms = jms , jme = jme , kms = kms , kme = kme , & + its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & + ) + endif + + case("sf_mynn") + call sfclay_mynn( & + p3d = pres_hyd_p , pi3d = pi_p , psfcpa = psfc_p , & + th3d = th_p , t3d = t_p , u3d = u_p , & + v3d = v_p , qv3d = qv_p , qc3d = qc_p , & + rho3d = rho_p , dz8w = dz_p , cp = cp , & + g = gravity , rovcp = rcp , R = R_d , & + xlv = xlv , chs = chs_p , chs2 = chs2_p , & + cqs2 = cqs2_p , cpm = cpm_p , znt = znt_p , & + ust = ust_p , pblh = hpbl_p , mavail = mavail_p , & + zol = zol_p , mol = mol_p , regime = regime_p , & + psim = psim_p , psih = psih_p , xland = xland_p , & + hfx = hfx_p , qfx = qfx_p , lh = lh_p , & + tsk = tsk_p , flhc = flhc_p , flqc = flqc_p , & + qgh = qgh_p , qsfc = qsfc_p , rmol = rmol_p , & + u10 = u10_p , v10 = v10_p , th2 = th2m_p , & + t2 = t2m_p , q2 = q2_p , snowh = snowh_p , & + gz1oz0 = gz1oz0_p , wspd = wspd_p , br = br_p , & + isfflx = isfflx , dx = dx , svp1 = svp1 , & + svp2 = svp2 , svp3 = svp3 , svpt0 = svpt0 , & + ep1 = ep_1 , ep2 = ep_2 , karman = karman , & + dxCell = dx_p , ustm = ustm_p , ck = ck_p , & + cka = cka_p , cd = cd_p , cda = cda_p , & + isftcflx = isftcflx , iz0tlnd = iz0tlnd , itimestep = initflag , & + ch = ch_p , cov = cov_p , tsq = tsq_p , & + qsq = qsq_p , sh3d = sh3d_p , el_pbl = elpbl_p , & + qcg = qcg_p , bl_mynn_cloudpdf = bl_mynn_cloudpdf , & + ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde , & + ims = ims , ime = ime , jms = jms , jme = jme , kms = kms , kme = kme , & + its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & + ) + + if(config_frac_seaice) then + call sfclay_mynn( & + p3d = pres_hyd_p , pi3d = pi_p , psfcpa = psfc_p , & + th3d = th_p , t3d = t_p , u3d = u_p , & + v3d = v_p , qv3d = qv_p , qc3d = qc_p , & + rho3d = rho_p , dz8w = dz_p , cp = cp , & + g = gravity , rovcp = rcp , R = R_d , & + xlv = xlv , chs = chs_sea , chs2 = chs2_sea , & + cqs2 = cqs2_sea , cpm = cpm_sea , znt = znt_sea , & + ust = ust_sea , pblh = hpbl_p , mavail = mavail_sea , & + zol = zol_sea , mol = mol_sea , regime = regime_sea , & + psim = psim_sea , psih = psih_sea , xland = xland_sea , & + hfx = hfx_sea , qfx = qfx_sea , lh = lh_sea , & + tsk = tsk_sea , flhc = flhc_sea , flqc = flqc_sea , & + qgh = qgh_sea , qsfc = qsfc_sea , rmol = rmol_sea , & + u10 = u10_sea , v10 = v10_sea , th2 = th2m_sea , & + t2 = t2m_sea , q2 = q2_sea , snowh = snowh_p , & + gz1oz0 = gz1oz0_sea , wspd = wspd_sea , br = br_sea , & + isfflx = isfflx , dx = dx , svp1 = svp1 , & + svp2 = svp2 , svp3 = svp3 , svpt0 = svpt0 , & + ep1 = ep_1 , ep2 = ep_2 , karman = karman , & + dxCell = dx_p , ustm = ustm_sea , ck = ck_sea , & + cka = cka_sea , cd = cd_sea , cda = cda_sea , & + isftcflx = isftcflx , iz0tlnd = iz0tlnd , itimestep = initflag , & + ch = ch_sea , cov = cov_p , tsq = tsq_p , & + qsq = qsq_p , sh3d = sh3d_p , el_pbl = elpbl_p , & + qcg = qcg_p , bl_mynn_cloudpdf = bl_mynn_cloudpdf , & + ids = ids , ide = ide , jds = jds , jde = jde , kds = kds , kde = kde , & + ims = ims , ime = ime , jms = jms , jme = jme , kms = kms , kme = kme , & + its = its , ite = ite , jts = jts , jte = jte , kts = kts , kte = kte & + ) + endif + case default end select sfclayer_select !copy local arrays to MPAS grid: - call sfclayer_to_MPAS(diag_physics) + call sfclayer_to_MPAS(configs,sfc_input,diag_physics,its,ite) -! write(0,*) '--- end subroutine driver_sfclayer' +!write(0,*) '--- end subroutine driver_sfclayer.' end subroutine driver_sfclayer -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_driver_sfclayer -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_finalize.F b/src/core_atmosphere/physics/mpas_atmphys_finalize.F new file mode 100644 index 0000000000..81d3832911 --- /dev/null +++ b/src/core_atmosphere/physics/mpas_atmphys_finalize.F @@ -0,0 +1,98 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!================================================================================================================= + module mpas_atmphys_finalize + use mpas_derived_types + use mpas_pool_routines + + use module_mp_thompson + + implicit none + private + public:: atmphys_finalize + +!mpas_atmphys_finalize contains subroutines that deallocate arrays used in physics parameterizations, i.e. arrays +!that need to remain allocated as long as MPAS runs and are not properly deallocated in the sourcecode itself. +!Laura D. Fowler (laura@ucar.edu) / 2016-03-22. + +!add-ons and modifications to sourcecode: +!---------------------------------------- +! * added subroutine mp_thompson_deallocate which deallocate arrays used in the initialization of the Thompson +! cloud microphysics scheme. these arrays contain coefficients for collection,collision,freezing, ... +! Laura D. Fowler (laura@ucar.edu) / 2016-03-22. + + + contains + + +!================================================================================================================= + subroutine atmphys_finalize(configs) +!================================================================================================================= + +!input arguments: + type(mpas_pool_type),intent(in):: configs + +!local variables and pointers: + character(len=StrKIND),pointer:: config_microp_scheme + +!----------------------------------------------------------------------------------------------------------------- + + call mpas_pool_get_config(configs,'config_microp_scheme',config_microp_scheme) + + if(trim(config_microp_scheme) == 'mp_thompson') & + call mp_thompson_deallocate + + end subroutine atmphys_finalize + +!================================================================================================================= + subroutine mp_thompson_deallocate +!================================================================================================================= +!write(0,*) +!write(0,*) '--- enter subroutine mp_thompson_deallocate:' + + if(allocated(tcg_racg) ) deallocate(tcg_racg ) + if(allocated(tmr_racg) ) deallocate(tmr_racg ) + if(allocated(tcr_gacr) ) deallocate(tcr_gacr ) + if(allocated(tmg_gacr) ) deallocate(tmg_gacr ) + if(allocated(tnr_racg) ) deallocate(tnr_racg ) + if(allocated(tnr_gacr) ) deallocate(tnr_gacr ) + if(allocated(tcs_racs1)) deallocate(tcs_racs1) + if(allocated(tmr_racs1)) deallocate(tmr_racs1) + if(allocated(tcs_racs2)) deallocate(tcs_racs2) + if(allocated(tmr_racs2)) deallocate(tmr_racs2) + if(allocated(tcr_sacr1)) deallocate(tcr_sacr1) + if(allocated(tms_sacr1)) deallocate(tms_sacr1) + if(allocated(tcr_sacr2)) deallocate(tcr_sacr2) + if(allocated(tms_sacr2)) deallocate(tms_sacr2) + if(allocated(tnr_racs1)) deallocate(tnr_racs1) + if(allocated(tnr_racs2)) deallocate(tnr_racs2) + if(allocated(tnr_sacr1)) deallocate(tnr_sacr1) + if(allocated(tnr_sacr2)) deallocate(tnr_sacr2) + if(allocated(tpi_qcfz) ) deallocate(tpi_qcfz ) + if(allocated(tni_qcfz) ) deallocate(tni_qcfz ) + if(allocated(tpi_qrfz) ) deallocate(tpi_qrfz ) + if(allocated(tpg_qrfz) ) deallocate(tpg_qrfz ) + if(allocated(tni_qrfz) ) deallocate(tni_qrfz ) + if(allocated(tnr_qrfz) ) deallocate(tnr_qrfz ) + if(allocated(tps_iaus) ) deallocate(tps_iaus ) + if(allocated(tni_iaus) ) deallocate(tni_iaus ) + if(allocated(tpi_ide) ) deallocate(tpi_ide ) + if(allocated(t_efrw) ) deallocate(t_efrw ) + if(allocated(t_efsw) ) deallocate(t_efsw ) + if(allocated(tnr_rev) ) deallocate(tnr_rev ) + if(allocated(tpc_wev) ) deallocate(tpc_wev ) + if(allocated(tnc_wev) ) deallocate(tnc_wev ) + if(allocated(tnccn_act)) deallocate(tnccn_act) + +!write(0,*) '--- end subroutine mp_thompson_deallocate' + + end subroutine mp_thompson_deallocate + +!================================================================================================================= + end module mpas_atmphys_finalize +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_functions.F b/src/core_atmosphere/physics/mpas_atmphys_functions.F new file mode 100644 index 0000000000..f63a7f6002 --- /dev/null +++ b/src/core_atmosphere/physics/mpas_atmphys_functions.F @@ -0,0 +1,217 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!================================================================================================================= + module mpas_atmphys_functions + + implicit none + private + public:: gammln,gammp,wgamma,rslf,rsif + + + contains + + +!================================================================================================================= +!NOTE: functions rslf and rsif are taken from module_mp_thompson temporarily for computing +! the diagnostic relative humidity. These two functions will be removed from this module +! when the Thompson cloud microphysics scheme will be restored to MPAS-Dev. +! Laura D. Fowler (birch.mmm.ucar.edu) / 2013-07-11. + +!+---+-----------------------------------------------------------------+ + SUBROUTINE GCF(GAMMCF,A,X,GLN) +! --- RETURNS THE INCOMPLETE GAMMA FUNCTION Q(A,X) EVALUATED BY ITS +! --- CONTINUED FRACTION REPRESENTATION AS GAMMCF. ALSO RETURNS +! --- LN(GAMMA(A)) AS GLN. THE CONTINUED FRACTION IS EVALUATED BY +! --- A MODIFIED LENTZ METHOD. +! --- USES GAMMLN + IMPLICIT NONE + INTEGER, PARAMETER:: ITMAX=100 + REAL, PARAMETER:: gEPS=3.E-7 + REAL, PARAMETER:: FPMIN=1.E-30 + REAL, INTENT(IN):: A, X + REAL:: GAMMCF,GLN + INTEGER:: I + REAL:: AN,B,C,D,DEL,H + GLN=GAMMLN(A) + B=X+1.-A + C=1./FPMIN + D=1./B + H=D + DO 11 I=1,ITMAX + AN=-I*(I-A) + B=B+2. + D=AN*D+B + IF(ABS(D).LT.FPMIN)D=FPMIN + C=B+AN/C + IF(ABS(C).LT.FPMIN)C=FPMIN + D=1./D + DEL=D*C + H=H*DEL + IF(ABS(DEL-1.).LT.gEPS)GOTO 1 + 11 CONTINUE + PRINT *, 'A TOO LARGE, ITMAX TOO SMALL IN GCF' + 1 GAMMCF=EXP(-X+A*LOG(X)-GLN)*H + END SUBROUTINE GCF +! (C) Copr. 1986-92 Numerical Recipes Software 2.02 +!+---+-----------------------------------------------------------------+ + SUBROUTINE GSER(GAMSER,A,X,GLN) +! --- RETURNS THE INCOMPLETE GAMMA FUNCTION P(A,X) EVALUATED BY ITS +! --- ITS SERIES REPRESENTATION AS GAMSER. ALSO RETURNS LN(GAMMA(A)) +! --- AS GLN. +! --- USES GAMMLN + IMPLICIT NONE + INTEGER, PARAMETER:: ITMAX=100 + REAL, PARAMETER:: gEPS=3.E-7 + REAL, INTENT(IN):: A, X + REAL:: GAMSER,GLN + INTEGER:: N + REAL:: AP,DEL,SUM + GLN=GAMMLN(A) + IF(X.LE.0.)THEN + IF(X.LT.0.) PRINT *, 'X < 0 IN GSER' + GAMSER=0. + RETURN + ENDIF + AP=A + SUM=1./A + DEL=SUM + DO 11 N=1,ITMAX + AP=AP+1. + DEL=DEL*X/AP + SUM=SUM+DEL + IF(ABS(DEL).LT.ABS(SUM)*gEPS)GOTO 1 + 11 CONTINUE + PRINT *,'A TOO LARGE, ITMAX TOO SMALL IN GSER' + 1 GAMSER=SUM*EXP(-X+A*LOG(X)-GLN) + END SUBROUTINE GSER +! (C) Copr. 1986-92 Numerical Recipes Software 2.02 +!+---+-----------------------------------------------------------------+ + REAL FUNCTION GAMMLN(XX) +! --- RETURNS THE VALUE LN(GAMMA(XX)) FOR XX > 0. + IMPLICIT NONE + REAL, INTENT(IN):: XX + DOUBLE PRECISION, PARAMETER:: STP = 2.5066282746310005D0 + DOUBLE PRECISION, DIMENSION(6), PARAMETER:: & + COF = (/76.18009172947146D0, -86.50532032941677D0, & + 24.01409824083091D0, -1.231739572450155D0, & + .1208650973866179D-2, -.5395239384953D-5/) + DOUBLE PRECISION:: SER,TMP,X,Y + INTEGER:: J + + X=XX + Y=X + TMP=X+5.5D0 + TMP=(X+0.5D0)*LOG(TMP)-TMP + SER=1.000000000190015D0 + DO 11 J=1,6 + Y=Y+1.D0 + SER=SER+COF(J)/Y +11 CONTINUE + GAMMLN=TMP+LOG(STP*SER/X) + END FUNCTION GAMMLN +! (C) Copr. 1986-92 Numerical Recipes Software 2.02 +!+---+-----------------------------------------------------------------+ + REAL FUNCTION GAMMP(A,X) +! --- COMPUTES THE INCOMPLETE GAMMA FUNCTION P(A,X) +! --- SEE ABRAMOWITZ AND STEGUN 6.5.1 +! --- USES GCF,GSER + IMPLICIT NONE + REAL, INTENT(IN):: A,X + REAL:: GAMMCF,GAMSER,GLN + GAMMP = 0. + IF((X.LT.0.) .OR. (A.LE.0.)) THEN + PRINT *, 'BAD ARGUMENTS IN GAMMP' + RETURN + ELSEIF(X.LT.A+1.)THEN + CALL GSER(GAMSER,A,X,GLN) + GAMMP=GAMSER + ELSE + CALL GCF(GAMMCF,A,X,GLN) + GAMMP=1.-GAMMCF + ENDIF + END FUNCTION GAMMP +! (C) Copr. 1986-92 Numerical Recipes Software 2.02 +!+---+-----------------------------------------------------------------+ + REAL FUNCTION WGAMMA(y) + + IMPLICIT NONE + REAL, INTENT(IN):: y + + WGAMMA = EXP(GAMMLN(y)) + + END FUNCTION WGAMMA +!+---+-----------------------------------------------------------------+ +! THIS FUNCTION CALCULATES THE LIQUID SATURATION VAPOR MIXING RATIO AS +! A FUNCTION OF TEMPERATURE AND PRESSURE +! + REAL FUNCTION RSLF(P,T) + + IMPLICIT NONE + REAL, INTENT(IN):: P, T + REAL:: ESL,X + REAL, PARAMETER:: C0= .611583699E03 + REAL, PARAMETER:: C1= .444606896E02 + REAL, PARAMETER:: C2= .143177157E01 + REAL, PARAMETER:: C3= .264224321E-1 + REAL, PARAMETER:: C4= .299291081E-3 + REAL, PARAMETER:: C5= .203154182E-5 + REAL, PARAMETER:: C6= .702620698E-8 + REAL, PARAMETER:: C7= .379534310E-11 + REAL, PARAMETER:: C8=-.321582393E-13 + + X=MAX(-80.,T-273.16) + +! ESL=612.2*EXP(17.67*X/(T-29.65)) + ESL=C0+X*(C1+X*(C2+X*(C3+X*(C4+X*(C5+X*(C6+X*(C7+X*C8))))))) + ESL=MIN(ESL, P*0.15) ! Even with P=1050mb and T=55C, the sat. vap. pres only contributes to + RSLF=.622*ESL/(P-ESL) ! ~15% of total pres. + +! ALTERNATIVE +! ; Source: Murphy and Koop, Review of the vapour pressure of ice and +! supercooled water for atmospheric applications, Q. J. R. +! Meteorol. Soc (2005), 131, pp. 1539-1565. +! ESL = EXP(54.842763 - 6763.22 / T - 4.210 * ALOG(T) + 0.000367 * T +! + TANH(0.0415 * (T - 218.8)) * (53.878 - 1331.22 +! / T - 9.44523 * ALOG(T) + 0.014025 * T)) + + END FUNCTION RSLF +!+---+-----------------------------------------------------------------+ +! THIS FUNCTION CALCULATES THE ICE SATURATION VAPOR MIXING RATIO AS A +! FUNCTION OF TEMPERATURE AND PRESSURE +! + REAL FUNCTION RSIF(P,T) + + IMPLICIT NONE + REAL, INTENT(IN):: P, T + REAL:: ESI,X + REAL, PARAMETER:: C0= .609868993E03 + REAL, PARAMETER:: C1= .499320233E02 + REAL, PARAMETER:: C2= .184672631E01 + REAL, PARAMETER:: C3= .402737184E-1 + REAL, PARAMETER:: C4= .565392987E-3 + REAL, PARAMETER:: C5= .521693933E-5 + REAL, PARAMETER:: C6= .307839583E-7 + REAL, PARAMETER:: C7= .105785160E-9 + REAL, PARAMETER:: C8= .161444444E-12 + + X=MAX(-80.,T-273.16) + ESI=C0+X*(C1+X*(C2+X*(C3+X*(C4+X*(C5+X*(C6+X*(C7+X*C8))))))) + ESI=MIN(ESI, P*0.15) + RSIF=.622*ESI/(P-ESI) + +! ALTERNATIVE +! ; Source: Murphy and Koop, Review of the vapour pressure of ice and +! supercooled water for atmospheric applications, Q. J. R. +! Meteorol. Soc (2005), 131, pp. 1539-1565. +! ESI = EXP(9.550426 - 5723.265/T + 3.53068*ALOG(T) - 0.00728332*T) + + END FUNCTION RSIF + +!================================================================================================================= + end module mpas_atmphys_functions +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_init.F b/src/core_atmosphere/physics/mpas_atmphys_init.F index eda19b3856..aefcbd4b79 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_init.F +++ b/src/core_atmosphere/physics/mpas_atmphys_init.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_init use mpas_kind_types use mpas_derived_types @@ -26,47 +26,54 @@ module mpas_atmphys_init private public:: physics_init -!>\brief MPAS main initialization subroutine for all physics parameterizations. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_init: -!> --------------------------------- -!> physics_init : call initialization of individual physics parameterizations. -!> init_dir_forphys: needed for initialization of "reconstruct" subroutines. -!> r3_normalize : needed for initialization of "reconstruct" subroutines. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * added structure diag in calls to subroutine init_radiation_lw and init_radiation_sw. -!> Laura D. Fowler (laura@ucar.edu) / 2013-07-01. -!> * added call to subroutine init_o3climatology. reads monthly-mean climatological ozone data -!> and interpolates ozone data to the MPAS grid. -!> Laura D. Fowler (laura@ucar.edu) / 2013-07-03. -!> * added the calculation of the mean distance between cell centers. -!> Laura D. Fowler (laura@ucar.edu) / 2013-08-22. -!> * added initialization of variable xicem. -!> Laura D. Fowler (laura@ucar.edu) / 2013-08-24. -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. -!> * added initialization of the accumulated surface pressure. Added initialization of the -!> tendency and accumulated tendency of the surface pressure. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. -!> * renamed config_conv_deep_scheme to config_convection_scheme. -!> Laura D. Fowler (laura@ucar.edu) / 2014-09-18. +!MPAS main initialization subroutine for all physics parameterizations. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_init: +! --------------------------------- +! physics_init : call initialization of individual physics parameterizations. +! init_dir_forphys: needed for initialization of "reconstruct" subroutines. +! r3_normalize : needed for initialization of "reconstruct" subroutines. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * added structure diag in calls to subroutine init_radiation_lw and init_radiation_sw. +! Laura D. Fowler (laura@ucar.edu) / 2013-07-01. +! * added call to subroutine init_o3climatology. reads monthly-mean climatological ozone data and interpolates +! ozone data to the MPAS grid. +! Laura D. Fowler (laura@ucar.edu) / 2013-07-03. +! * added the calculation of the mean distance between cell centers. +! Laura D. Fowler (laura@ucar.edu) / 2013-08-22. +! * added initialization of variable xicem. +! Laura D. Fowler (laura@ucar.edu) / 2013-08-24. +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * added initialization of the accumulated surface pressure. Added initialization of the tendency and the +! accumulated tendency of the surface pressure. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * renamed config_conv_deep_scheme to config_convection_scheme. +! Laura D. Fowler (laura@ucar.edu) / 2014-09-18. +! * changed the argument list in the call to subroutine microphysics_init, needed to include the Thompson +! parameterization of cloud microphysics. +! Laura D. Fowler (laura@ucar.edu) / 2015-03-28. +! * modified the initialization of i_rainc and i_rainnc, now that the convection and cloud microphysics +! parameterizations are in "packages." +! Laura D. Fowler (laura@ucar.edu) / 2106-04-13. +! * removed the calculation of the variable dcEdge_m which is no longer needed in the different physics +! parameterizations. +! Laura D. Fowler (laura@ucar.edu) / 2016-10-18. contains -!================================================================================================== +!================================================================================================================= subroutine physics_init(dminfo,clock,configs,mesh,diag,tend,state,time_lev,diag_physics, & atm_input,sfc_input) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info),intent(in):: dminfo @@ -97,16 +104,12 @@ subroutine physics_init(dminfo,clock,configs,mesh,diag,tend,state,time_lev,diag_ config_radt_sw_scheme integer,pointer:: nCellsSolve,nLags - integer,dimension(:),pointer :: nEdgesOnCell integer,dimension(:),pointer :: i_rainc,i_rainnc integer,dimension(:),pointer :: i_acswdnb,i_acswdnbc,i_acswdnt,i_acswdntc, & i_acswupb,i_acswupbc,i_acswupt,i_acswuptc, & i_aclwdnb,i_aclwdnbc,i_aclwdnt,i_aclwdntc, & i_aclwupb,i_aclwupbc,i_aclwupt,i_aclwuptc - integer,dimension(:,:),pointer:: edgesOnCell - - real(kind=RKIND),dimension(:),pointer :: dcEdge,dcEdge_m real(kind=RKIND),dimension(:),pointer :: acswdnb,acswdnbc,acswdnt,acswdntc, & acswupb,acswupbc,acswupt,acswuptc, & aclwdnb,aclwdnbc,aclwdnt,aclwdntc, & @@ -116,6 +119,14 @@ subroutine physics_init(dminfo,clock,configs,mesh,diag,tend,state,time_lev,diag_ real(kind=RKIND),dimension(:),pointer :: sst,sstsk,tmn,xice,xicem real(kind=RKIND),dimension(:,:),pointer:: tlag + real(kind=RKIND),dimension(:),pointer :: t_oml, t_oml_initial, t_oml_200m_initial + real(kind=RKIND),dimension(:),pointer :: h_oml, h_oml_initial, hu_oml, hv_oml + real(kind=RKIND), pointer :: config_oml_hml0 + integer,pointer:: nCells + logical,pointer:: config_oml1d + + + !local variables and arrays: type(MPAS_Time_Type):: currTime @@ -123,7 +134,7 @@ subroutine physics_init(dminfo,clock,configs,mesh,diag,tend,state,time_lev,diag_ integer:: ierr,julday integer:: iCell,iLag,iEdge,nEdges_m -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- ! write(0,*) ! write(0,*) '--- enter subroutine physics_init:' @@ -139,14 +150,6 @@ subroutine physics_init(dminfo,clock,configs,mesh,diag,tend,state,time_lev,diag_ call mpas_pool_get_dimension(mesh,'nCellsSolve',nCellsSolve) call mpas_pool_get_dimension(mesh,'nLags' ,nLags ) - call mpas_pool_get_array(mesh,'nEdgesOnCell',nEdgesOnCell) - call mpas_pool_get_array(mesh,'edgesOnCell' ,edgesOnCell ) - call mpas_pool_get_array(mesh,'dcEdge' ,dcEdge ) - - call mpas_pool_get_array(diag_physics,'dcEdge_m' ,dcEdge_m ) - call mpas_pool_get_array(diag_physics,'i_rainc' ,i_rainc ) - call mpas_pool_get_array(diag_physics,'i_rainnc' ,i_rainnc ) - call mpas_pool_get_array(diag_physics,'i_acswdnb' ,i_acswdnb ) call mpas_pool_get_array(diag_physics,'i_acswdnbc' ,i_acswdnbc ) call mpas_pool_get_array(diag_physics,'i_acswdnt' ,i_acswdnt ) @@ -194,24 +197,20 @@ subroutine physics_init(dminfo,clock,configs,mesh,diag,tend,state,time_lev,diag_ call mpas_pool_get_array(sfc_input,'tmn' ,tmn ) call mpas_pool_get_array(sfc_input,'xice',xice) + call mpas_pool_get_array(diag_physics,'t_oml' ,t_oml) + call mpas_pool_get_array(diag_physics,'t_oml_initial' ,t_oml_initial) + call mpas_pool_get_array(diag_physics,'t_oml_200m_initial',t_oml_200m_initial) + call mpas_pool_get_array(diag_physics,'h_oml' ,h_oml) + call mpas_pool_get_array(diag_physics,'h_oml_initial' ,h_oml_initial) + call mpas_pool_get_array(diag_physics,'hu_oml' ,hu_oml) + call mpas_pool_get_array(diag_physics,'hv_oml' ,hv_oml) + call mpas_pool_get_config(configs,'config_oml1d' ,config_oml1d ) + call mpas_pool_get_config(configs,'config_oml_hml0' ,config_oml_hml0 ) + call mpas_pool_get_dimension(mesh,'nCells',nCells) + currTime = mpas_get_clock_time(clock,MPAS_NOW,ierr) call mpas_get_time(curr_time=currTime,DoY=julday,ierr=ierr) -!calculation of the mean distance between cell centers: - if(.not. config_do_restart) then - do iCell = 1, nCellsSolve - dcEdge_m(iCell) = 0._RKIND - nEdges_m = nEdgesOnCell(iCell) - do iEdge = 1, nEdges_m - dcEdge_m(iCell) = dcEdge_m(iCell) + dcEdge(edgesOnCell(iEdge,iCell)) - enddo - dcEdge_m(iCell) = dcEdge_m(iCell) / nEdges_m -! write(0,102) iCell,nEdges_m,(dcEdge(edgesOnCell(iEdge,iCell)),iEdge=1,nEdges_m),dcEdge_m(iCell) - enddo - endif - 101 format(8i9,10(1x,e15.8)) - 102 format(2i9,10(1x,e15.8)) - !initialization of east-north directions to convert u-tendencies from cell centers to cell !edges: call init_dirs_forphys(mesh) @@ -219,9 +218,15 @@ subroutine physics_init(dminfo,clock,configs,mesh,diag,tend,state,time_lev,diag_ !initialization of counters i_rainc and i_rainnc. i_rainc and i_rainnc track the number of !times the accumulated convective (rainc) and grid-scale (rainnc) rain exceed the prescribed !threshold value: - if(.not. config_do_restart) then + if(.not. config_do_restart .and. config_convection_scheme.ne.'off') then + call mpas_pool_get_array(diag_physics,'i_rainc',i_rainc) do iCell = 1, nCellsSolve i_rainc(iCell) = 0 + enddo + endif + if(.not. config_do_restart .and. config_microp_scheme.ne.'off') then + call mpas_pool_get_array(diag_physics,'i_rainnc',i_rainnc) + do iCell = 1, nCellsSolve i_rainnc(iCell) = 0 enddo endif @@ -286,6 +291,42 @@ subroutine physics_init(dminfo,clock,configs,mesh,diag,tend,state,time_lev,diag_ enddo endif +! initialized the 1D ocean mixed-layer model (code from wrf module_sf_oml) + if (config_oml1d) then + if (.not. config_do_restart) then + write(0,*) '--- initialization of 1D ocean mixed layer model ' + do iCell = 1, nCellsSolve + t_oml(iCell) = sst(iCell) + t_oml_initial(iCell) = sst(iCell) + end do + if (config_oml_hml0 .gt. 0) then + do iCell = 1, nCellsSolve + h_oml(iCell) = config_oml_hml0 + h_oml_initial(iCell) = config_oml_hml0 + hu_oml(iCell) = 0. + hv_oml(iCell) = 0. + t_oml_200m_initial(iCell) = sst(iCell) - 5. + end do + else if (config_oml_hml0 .eq. 0) then +! initializing with climatological mixed layer depth only + do iCell = 1, nCellsSolve + h_oml(iCell) = h_oml_initial(iCell) + hu_oml(iCell) = 0. + hv_oml(iCell) = 0. + t_oml_200m_initial(iCell) = sst(iCell) - 5. + end do + else + do iCell = 1, nCellsSolve + h_oml(iCell) = h_oml_initial(iCell) + ! WRF COMMENT: + ! fill in near coast area with SST: 200 K was set as missing value in ocean pre-processing code + if( (t_oml_200m_initial(iCell) > 200.) .and. (t_oml_200m_initial(iCell) <= 200.) ) & + t_oml_200m_initial(iCell) = sst(iCell) + end do + end if + end if + end if + !initialization of temperatures needed for updating the deep soil temperature: if(.not. config_do_restart) then do iCell = 1, nCellsSolve @@ -313,7 +354,8 @@ subroutine physics_init(dminfo,clock,configs,mesh,diag,tend,state,time_lev,diag_ call init_convection(mesh,configs,diag_physics) !initialization of cloud microphysics processes: - if(config_microp_scheme .ne. 'off') call microphysics_init + if(config_microp_scheme .ne. 'off') & + call microphysics_init(dminfo,mesh,sfc_input,diag_physics) !initialization of surface layer processes: if(config_sfclayer_scheme .ne. 'off') call init_sfclayer @@ -357,9 +399,9 @@ subroutine physics_init(dminfo,clock,configs,mesh,diag,tend,state,time_lev,diag_ end subroutine physics_init -!================================================================================================== +!================================================================================================================= subroutine init_dirs_forphys(mesh) -!================================================================================================== +!================================================================================================================= !inout arguments: !---------------- @@ -373,7 +415,7 @@ subroutine init_dirs_forphys(mesh) !local variables: integer:: iCell -!--------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_dimension(mesh,'nCells',nCells) @@ -399,15 +441,15 @@ subroutine init_dirs_forphys(mesh) end subroutine init_dirs_forphys -!================================================================================================== +!================================================================================================================= subroutine r3_normalize(ax, ay, az) -!================================================================================================== +!================================================================================================================= !normalizes the vector (ax, ay, az) real (kind=RKIND), intent(inout) :: ax, ay, az real (kind=RKIND) :: mi -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- mi = 1.0 / sqrt(ax**2 + ay**2 + az**2) ax = ax * mi @@ -416,6 +458,6 @@ subroutine r3_normalize(ax, ay, az) end subroutine r3_normalize -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_init -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_init_microphysics.F b/src/core_atmosphere/physics/mpas_atmphys_init_microphysics.F new file mode 100644 index 0000000000..e2a93876a1 --- /dev/null +++ b/src/core_atmosphere/physics/mpas_atmphys_init_microphysics.F @@ -0,0 +1,88 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! + +!================================================================================================================= + module mpas_atmphys_init_microphysics + use mpas_dmpar + use mpas_kind_types + use mpas_derived_types + use mpas_pool_routines + + use mpas_atmphys_utilities +!use module_mp_thompson, only: is_aerosol_aware,naCCN0,naCCN1,naIN0,naIN1,ntb_arc,ntb_arw,ntb_art,ntb_arr, & +! ntb_ark,tnccn_act + + implicit none + private + public:: init_thompson_clouddroplets_forMPAS + +!MPAS main initialization of the Thompson parameterization of cloud microphysics with nucleation of cloud +!droplets based on distributions of CCNs and INs (aerosol-aware parameterization). +!Laura D. Fowler (send comments to laura@ucar.edu). +!2016-03-28. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * added "use mpas_dmpar" at the top of the module. +! Laura D. Fowler (laura@ucar.edu) / 2016-04-04. + + + contains + + +!================================================================================================================= + subroutine init_thompson_clouddroplets_forMPAS(mesh,sfc_input,diag_physics) +!================================================================================================================= + +!input variables: + type(mpas_pool_type),intent(in):: mesh + type(mpas_pool_type),intent(in):: sfc_input + +!inout variables: + type(mpas_pool_type),intent(inout):: diag_physics + +!local variables and pointers: + integer,pointer:: nCellsSolve + integer,dimension(:),pointer:: landmask + + real(kind=RKIND),dimension(:),pointer:: nt_c,mu_c + + integer:: iCell + +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine init_thompson_clouddroplets_forMPAS:' + + call mpas_pool_get_dimension(mesh,'nCellsSolve',nCellsSolve) + + call mpas_pool_get_array(sfc_input,'landmask',landmask) + call mpas_pool_get_array(diag_physics,'nt_c',nt_c) + call mpas_pool_get_array(diag_physics,'mu_c',mu_c) + +!... initialize the prescribed number of cloud droplets, and mu_c (parameter in the exponential of the generalized +!gamma distribution) as a function of the land-cean mask. as set in the thompson cloud microphysics scheme, nt_c +!is set to 100 per cc (100.E6 m^-3) for maritime cases and 300 per cc (300.E6 m^-3) for continental cases. + do iCell = 1, nCellsSolve + if(landmask(iCell) .eq. 1) then + nt_c(iCell) = 300.e6 + elseif(landmask(iCell) .eq. 0) then + nt_c(iCell) = 100.e6 + endif + mu_c(iCell) = MIN(15., (1000.e6/nt_c(iCell) + 2.)) + enddo + +!write(0,*) '--- end subroutine init_thompson_clouddroplets_forMPAS.' + + end subroutine init_thompson_clouddroplets_forMPAS + +!================================================================================================================= + end module mpas_atmphys_init_microphysics +!================================================================================================================= + + + diff --git a/src/core_atmosphere/physics/mpas_atmphys_initialize_real.F b/src/core_atmosphere/physics/mpas_atmphys_initialize_real.F index d34ac4b5d2..ba983bf451 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_initialize_real.F +++ b/src/core_atmosphere/physics/mpas_atmphys_initialize_real.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_initialize_real use mpas_kind_types use mpas_dmpar @@ -21,43 +21,43 @@ module mpas_atmphys_initialize_real public:: physics_initialize_real -!>\brief MPAS initialization of surface properties for real case initialization. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_initialize_real: -!> -------------------------------------------- -!> physics_initialize_real : main subroutine (called from subroutine init_atm_setup_test_case in -!> ./src/core_init_atmosphere/mpas_init_atm_test_cases.F). -!> init_soil_layers : main subroutine for initialization of soil properties. -!> init_soil_layers_depth : initialize height and depth of soil layers needed in NOAH scheme. -!> init_soil_layers_properties: initialize soil temperature, soil moisture, etc. -!> adjust_input_soiltemp : adjust the deep soil temperature to sea-level values. -!> physics_init_sst : initialize the skin temperature to the SSTs over oceans. -!> physics_init_seaice : correct vegetation and soil typs as function of fractional sea ice. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * revised entire module: -!> -> changed nCells to nCellsSolve in every subroutine. -!> -> removed modifying snoalb (surface albedo over snow) over sea-ice points. -!> -> revised subroutine physics_init_sst. -!> -> revised subroutine physics_init_seaice. -!> Laura D. Fowler (laura@ucar.edu) / 2013-08-02. -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * In subroutine physics_init_seaice, assign the sea-ice land use category as a function of -!> the land use category input file (MODIS OR USGS). -!> Dominikus Heinzeller (IMK) / 2014-07-24. +!MPAS initialization of surface properties for real case initialization. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_initialize_real: +! -------------------------------------------- +! physics_initialize_real : main subroutine (called from subroutine init_atm_setup_test_case in +! ./src/core_init_atmosphere/mpas_init_atm_test_cases.F). +! init_soil_layers : main subroutine for initialization of soil properties. +! init_soil_layers_depth : initialize height and depth of soil layers needed in NOAH scheme. +! init_soil_layers_properties: initialize soil temperature, soil moisture, etc. +! adjust_input_soiltemp : adjust the deep soil temperature to sea-level values. +! physics_init_sst : initialize the skin temperature to the SSTs over oceans. +! physics_init_seaice : correct vegetation and soil typs as function of fractional sea ice. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * revised entire module: +! -> changed nCells to nCellsSolve in every subroutine. +! -> removed modifying snoalb (surface albedo over snow) over sea-ice points. +! -> revised subroutine physics_init_sst. +! -> revised subroutine physics_init_seaice. +! Laura D. Fowler (laura@ucar.edu) / 2013-08-02. +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * In subroutine physics_init_seaice, assign the sea-ice land use category as a function of +! the land use category input file (MODIS OR USGS). +! Dominikus Heinzeller (IMK) / 2014-07-24. contains -!================================================================================================== + +!================================================================================================================= subroutine physics_initialize_real(mesh, fg, dminfo, dims, configs) -!================================================================================================== +!================================================================================================================= + !input arguments: type (mpas_pool_type), intent(in) :: mesh type (dm_info), intent(in) :: dminfo @@ -91,10 +91,9 @@ subroutine physics_initialize_real(mesh, fg, dminfo, dims, configs) !temporary: integer:: iSoil,nSoilLevels -!-------------------------------------------------------------------------------------------------- - -! write(0,*) -! write(0,*) '--- enter physics_initialize_real:' +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter physics_initialize_real:' call mpas_pool_get_config(configs, 'config_sfc_prefix', config_sfc_prefix) call mpas_pool_get_config(configs, 'config_start_time', config_start_time) @@ -178,9 +177,9 @@ subroutine physics_initialize_real(mesh, fg, dminfo, dims, configs) end subroutine physics_initialize_real -!================================================================================================== +!================================================================================================================= subroutine init_soil_layers(mesh,fg,dminfo,dims,configs) -!================================================================================================== +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(in):: mesh @@ -191,7 +190,7 @@ subroutine init_soil_layers(mesh,fg,dminfo,dims,configs) !inout arguments: type(mpas_pool_type),intent(inout):: fg -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- !adjust the annual mean deep soil temperature: call adjust_input_soiltemps(mesh,fg,dims) @@ -204,9 +203,9 @@ subroutine init_soil_layers(mesh,fg,dminfo,dims,configs) end subroutine init_soil_layers -!================================================================================================== +!================================================================================================================= subroutine adjust_input_soiltemps(mesh, fg, dims) -!================================================================================================== +!================================================================================================================= !input arguments: type (mpas_pool_type), intent(in) :: mesh @@ -224,8 +223,7 @@ subroutine adjust_input_soiltemps(mesh, fg, dims) real(kind=RKIND),dimension(:),pointer :: skintemp,soiltemp,tmn real(kind=RKIND),dimension(:,:),pointer:: st_fg -!-------------------------------------------------------------------------------------------------- - +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_dimension(dims, 'nCellsSolve', nCellsSolve) call mpas_pool_get_dimension(dims, 'nFGSoilLevels', nFGSoilLevels) @@ -261,9 +259,9 @@ subroutine adjust_input_soiltemps(mesh, fg, dims) end subroutine adjust_input_soiltemps -!================================================================================================== +!================================================================================================================= subroutine init_soil_layers_depth(mesh, fg, dims, configs) -!================================================================================================== +!================================================================================================================= !input arguments: type (mpas_pool_type), intent(in) :: mesh @@ -281,10 +279,9 @@ subroutine init_soil_layers_depth(mesh, fg, dims, configs) real(kind=RKIND),dimension(:,:),pointer:: dzs_fg,zs_fg real(kind=RKIND),dimension(:,:),pointer:: dzs,zs -!-------------------------------------------------------------------------------------------------- - -! write(0,*) -! write(0,*) '--- enter subroutine init_soil_layers_depth:' +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine init_soil_layers_depth:' call mpas_pool_get_dimension(dims, 'nCellsSolve', nCellsSolve) call mpas_pool_get_dimension(dims, 'nSoilLevels', nSoilLevels) @@ -328,9 +325,11 @@ subroutine init_soil_layers_depth(mesh, fg, dims, configs) end subroutine init_soil_layers_depth -!================================================================================================== +!================================================================================================================= subroutine init_soil_layers_properties(mesh, fg, dminfo, dims, configs) -!================================================================================================== +!================================================================================================================= + + use mpas_abort, only : mpas_dmpar_global_abort !input arguments: type (mpas_pool_type), intent(in) :: mesh @@ -355,10 +354,11 @@ subroutine init_soil_layers_properties(mesh, fg, dminfo, dims, configs) integer, pointer :: config_nsoillevels -!-------------------------------------------------------------------------------------------------- + character(len=StrKIND) :: errstring +!----------------------------------------------------------------------------------------------------------------- !write(0,*) -! write(0,*) '--- enter subroutine init_soil_layers_properties:' +!write(0,*) '--- enter subroutine init_soil_layers_properties:' call mpas_pool_get_dimension(dims, 'nCellsSolve', nCellsSolve) call mpas_pool_get_dimension(dims, 'nSoilLevels', nSoilLevels) @@ -394,11 +394,11 @@ subroutine init_soil_layers_properties(mesh, fg, dminfo, dims, configs) enddo enddo if(num_st .gt. 0) then - write(0,*) 'Error in interpolation of st_fg to MPAS grid: num_st =', num_st - call mpas_dmpar_abort(dminfo) + write(errstring,*) 'Error in interpolation of st_fg to MPAS grid: num_st =', num_st + call mpas_dmpar_global_abort(trim(errstring)) elseif(num_sm .gt. 0) then - write(0,*) 'Error in interpolation of sm_fg to MPAS grid: num_sm =', num_sm - call mpas_dmpar_abort(dminfo) + write(errstring,*) 'Error in interpolation of sm_fg to MPAS grid: num_sm =', num_sm + call mpas_dmpar_global_abort(trim(errstring)) endif if(config_nsoillevels .ne. 4) & @@ -506,9 +506,9 @@ subroutine init_soil_layers_properties(mesh, fg, dminfo, dims, configs) end subroutine init_soil_layers_properties -!================================================================================================== +!================================================================================================================= subroutine physics_init_sst(mesh, input, dims, configs) -!================================================================================================== +!================================================================================================================= !input arguments: type (mpas_pool_type), intent(in) :: mesh @@ -529,9 +529,9 @@ subroutine physics_init_sst(mesh, input, dims, configs) logical, pointer :: config_frac_seaice -!-------------------------------------------------------------------------------------------------- -! write(0,*) -! write(0,*) '--- enter subroutine physics_init_sst:' +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine physics_init_sst:' call mpas_pool_get_config(configs, 'config_frac_seaice', config_frac_seaice) @@ -572,13 +572,13 @@ subroutine physics_init_sst(mesh, input, dims, configs) num_seaice_changes call physics_message(mess) -! write(0,*) '--- end subroutine physics_init_sst:' +!write(0,*) '--- end subroutine physics_init_sst:' end subroutine physics_init_sst -!================================================================================================== +!================================================================================================================= subroutine physics_init_seaice(mesh, input, dims, configs) -!================================================================================================== +!================================================================================================================= !input arguments: type (mpas_pool_type), intent(in) :: mesh @@ -612,9 +612,9 @@ subroutine physics_init_seaice(mesh, input, dims, configs) real(kind=RKIND),parameter:: xice_tsk_threshold = 271. real(kind=RKIND),parameter:: total_depth = 3. ! 3-meter soil depth. -!-------------------------------------------------------------------------------------------------- -! write(0,*) -! write(0,*) '--- enter physics_init_seaice:' +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter physics_init_seaice:' call mpas_pool_get_config(configs, 'config_frac_seaice', config_frac_seaice) call mpas_pool_get_config(configs, 'config_landuse_data', config_landuse_data) @@ -715,11 +715,11 @@ subroutine physics_init_seaice(mesh, input, dims, configs) enddo 101 format(i9,5(1x,e15.8)) -! write(0,*) '--- end physics_init_seaice:' +!write(0,*) '--- end physics_init_seaice:' end subroutine physics_init_seaice -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_initialize_real -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_interface.F b/src/core_atmosphere/physics/mpas_atmphys_interface.F index 2a87e457b5..06b0e972ef 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_interface.F +++ b/src/core_atmosphere/physics/mpas_atmphys_interface.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_interface use mpas_kind_types use mpas_derived_types @@ -22,58 +22,57 @@ module mpas_atmphys_interface microphysics_from_MPAS, & microphysics_to_MPAS - integer:: i,j,k - -!>\brief interface for conversion between variables used in the MPAS dynamical core and variables -!> needed in the physics parameterizations. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_interface_nhyd: -!> ------------------------------------------- -!> allocate_forall_physics : allocation of all meteorological variables needed in the physics. -!> deallocate_forall_physics: deallocation of all meteorological variables needed in the physics. -!> MPAS_to_physics : conversion of input "MPAS" variables to "physics" variables. -!> microphysics_from_MPAS : initialize local arrays needed in cloud microphysics schemes. -!> microphysics_to_MPAS : copy local arrays needed in cloud microphysics schemesto MPAS arrays. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * In subroutine MPAS_to_physics, moved the calculation of the local arrays qv_p,qc_p, and -!> qr_p above the calculation of th_p so that we do not need to use the pointer qv. -!> * In subroutine microphysics_from_MPAS, moved the calculation of the local arrays qv_p,qc_p, -!> and qr_p above the calculation of th_p so that we do not need to use the pointer qv. -!> * In subroutine microphysics_to_MPAS, since microphysics schemes update the temperature and -!> water vapor mixing ratio, we first update the total pressure and exner functions. Then, we -!> update the modified potential temperature and calculate the diabatic tendency due to cloud -!> microphysics processes. -!> Laura D. Fowler (laura@ucar.edu) / 2013-11-07. -!> * Replaced the variable g (that originally pointed to gravity) with gravity, for simplicity. -!> Laura D. Fowler (laura@ucar.edu) / 2014-03-21. -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * Modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. -!> * Added calculation of the surface pressure tendency. Moved the calculation of znu_p below -!> the calculation of the surface pressure to avoid dividing by zero if the surface pressure -!> is not output in the init file. -!> Laura D. Fowler (birch.mmm.ucar.ecu) / 2014-06-23. -!> * Renamed module mpas_atmphys_interface_nhyd to mpas_atmphys_interface. -!> Laura D. Fowler (laura@ucar.edu) / 2014-09-19. -!> * In subroutine microphysics_to_MPAS, reverted the calculation of cloud microphysics tendency -!> rt_diabatic_tend, and update of the state variables to git hash identifier 7a66f273e182f4. -!> This change reflects the fact that we want to compute rt_diabatic_tend at constant volume. -!> Laura D. Fowler (laura@ucar.edu) / 2014-1-015. +!Interface for conversion between variables used in the MPAS dynamical core and variables needed in the +!physics parameterizations. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_interface_nhyd: +! ------------------------------------------- +! allocate_forall_physics : allocation of all meteorological variables needed in the physics. +! deallocate_forall_physics: deallocation of all meteorological variables needed in the physics. +! MPAS_to_physics : conversion of input "MPAS" variables to "physics" variables. +! microphysics_from_MPAS : initialize local arrays needed in cloud microphysics schemes. +! microphysics_to_MPAS : copy local arrays needed in cloud microphysics schemesto MPAS arrays. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * in subroutine MPAS_to_physics, moved the calculation of the local arrays qv_p,qc_p, and qr_p above the +! calculation of th_p so that we do not need to use the pointer qv. +! * in subroutine microphysics_from_MPAS, moved the calculation of the local arrays qv_p,qc_p, and qr_p above +! the calculation of th_p so that we do not need to use the pointer qv. +! * in subroutine microphysics_to_MPAS, since microphysics schemes update the temperature and water vapor +! mixing ratio, we first update the total pressure and exner functions. Then, we update the modified +! potential temperature and calculate the diabatic tendency due to cloud microphysics processes. +! Laura D. Fowler (laura@ucar.edu) / 2013-11-07. +! * replaced the variable g (that originally pointed to gravity) with gravity, for simplicity. +! Laura D. Fowler (laura@ucar.edu) / 2014-03-21. +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * added calculation of the surface pressure tendency. Moved the calculation of znu_p below the calculation +! of the surface pressure to avoid dividing by zero if the surface pressure is not output in the init file. +! Laura D. Fowler (birch.mmm.ucar.ecu) / 2014-06-23. +! * renamed module mpas_atmphys_interface_nhyd to mpas_atmphys_interface. +! Laura D. Fowler (laura@ucar.edu) / 2014-09-19. +! * in subroutine microphysics_to_MPAS, reverted the calculation of cloud microphysics tendency rt_diabatic_tend, +! and update of the state variables to git hash identifier 7a66f273e182f4. This change reflects the fact that +! we want to compute rt_diabatic_tend at constant volume. +! Laura D. Fowler (laura@ucar.edu) / 2014-01-015. +! * added the initialization of local variables needed in the parameterization of the MYNN surface layer scheme +! and planetary boundary layer scheme from WRF 3.6.1. +! Laura D. Fowler (laura@ucar.edu) / 2016-04-11. +! * corrected the calculation of the surface pressure, mainly extrapolation of the air density to the surface. +! Laura D. Fowler (laura@ucar.edu) / 2016-04-25. contains -!================================================================================================== +!================================================================================================================= subroutine allocate_forall_physics -!================================================================================================== +!================================================================================================================= if(.not.allocated(psfc_p) ) allocate(psfc_p(ims:ime,jms:jme) ) if(.not.allocated(ptop_p) ) allocate(ptop_p(ims:ime,jms:jme) ) @@ -106,6 +105,14 @@ subroutine allocate_forall_physics if(.not.allocated(qs_p) ) allocate(qs_p(ims:ime,kms:kme,jms:jme) ) if(.not.allocated(qg_p) ) allocate(qg_p(ims:ime,kms:kme,jms:jme) ) + pbl_select: select case (trim(pbl_scheme)) + case("bl_mynn") + if(.not.allocated(ni_p)) allocate(ni_p(ims:ime,kms:kme,jms:jme)) + + case default + + end select pbl_select + !... arrays used for calculating the hydrostatic pressure and exner function: if(.not.allocated(psfc_hyd_p) ) allocate(psfc_hyd_p(ims:ime,jms:jme) ) if(.not.allocated(psfc_hydd_p) ) allocate(psfc_hydd_p(ims:ime,jms:jme) ) @@ -117,9 +124,9 @@ subroutine allocate_forall_physics end subroutine allocate_forall_physics -!================================================================================================== +!================================================================================================================= subroutine deallocate_forall_physics -!================================================================================================== +!================================================================================================================= if(allocated(psfc_p) ) deallocate(psfc_p ) if(allocated(ptop_p) ) deallocate(ptop_p ) @@ -152,6 +159,14 @@ subroutine deallocate_forall_physics if(allocated(qs_p) ) deallocate(qs_p ) if(allocated(qg_p) ) deallocate(qg_p ) + pbl_select: select case (trim(pbl_scheme)) + case("bl_mynn") + if(allocated(ni_p)) deallocate(ni_p) + + case default + + end select pbl_select + if(allocated(psfc_hyd_p) ) deallocate(psfc_hyd_p ) if(allocated(psfc_hydd_p) ) deallocate(psfc_hydd_p ) if(allocated(pres_hyd_p) ) deallocate(pres_hyd_p ) @@ -162,15 +177,16 @@ subroutine deallocate_forall_physics end subroutine deallocate_forall_physics -!================================================================================================== - subroutine MPAS_to_physics(mesh,state,time_lev,diag,diag_physics) -!================================================================================================== +!================================================================================================================= + subroutine MPAS_to_physics(mesh,state,time_lev,diag,diag_physics,its,ite) +!================================================================================================================= !input variables: type(mpas_pool_type),intent(in):: mesh type(mpas_pool_type),intent(in):: state type(mpas_pool_type),intent(in):: diag + integer,intent(in):: its,ite integer,intent(in):: time_lev !inout variables: @@ -178,6 +194,8 @@ subroutine MPAS_to_physics(mesh,state,time_lev,diag,diag_physics) !local pointers: integer,pointer:: index_qv,index_qc,index_qr,index_qi,index_qs,index_qg + integer,pointer:: index_ni + real(kind=RKIND),dimension(:),pointer :: latCell,lonCell real(kind=RKIND),dimension(:),pointer :: fzm,fzp,rdzw real(kind=RKIND),dimension(:),pointer :: surface_pressure,plrad @@ -185,6 +203,7 @@ subroutine MPAS_to_physics(mesh,state,time_lev,diag,diag_physics) real(kind=RKIND),dimension(:,:),pointer :: zz,exner,pressure_b,rtheta_p,rtheta_b real(kind=RKIND),dimension(:,:),pointer :: rho_zz,theta_m,pressure_p,u,v,w real(kind=RKIND),dimension(:,:),pointer :: qv,qc,qr,qi,qs,qg + real(kind=RKIND),dimension(:,:),pointer :: ni real(kind=RKIND),dimension(:,:,:),pointer:: scalars !local variables: @@ -193,17 +212,16 @@ subroutine MPAS_to_physics(mesh,state,time_lev,diag,diag_physics) real(kind=RKIND):: z0,z1,z2,w1,w2 real(kind=RKIND):: rho_a,rho1,rho2,tem1,tem2 -!-------------------------------------------------------------------------------------------------- - -! write(0,*) -! write(0,*) '--- enter subroutine MPAS_to_phys:' -! write(0,*) 'ims=',ims,' ime=',ime -! write(0,*) 'jms=',jms,' jme=',jme -! write(0,*) 'kms=',kms,' kme=',kme -! write(0,*) -! write(0,*) 'its=',its,' ite=',ite -! write(0,*) 'jts=',jts,' jte=',jte -! write(0,*) 'kts=',kts,' kte=',kte +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine MPAS_to_phys:' +!write(0,*) 'ims=',ims,' ime=',ime +!write(0,*) 'jms=',jms,' jme=',jme +!write(0,*) 'kms=',kms,' kme=',kme +!write(0,*) +!write(0,*) 'its=',its,' ite=',ite +!write(0,*) 'jts=',jts,' jte=',jte +!write(0,*) 'kts=',kts,' kte=',kte !initialization: call mpas_pool_get_array(mesh,'latCell',latCell) @@ -255,7 +273,7 @@ subroutine MPAS_to_physics(mesh,state,time_lev,diag,diag_physics) qi_p(i,k,j) = max(0.,qi(k,i)) qs_p(i,k,j) = max(0.,qs(k,i)) qg_p(i,k,j) = max(0.,qg(k,i)) - + !arrays located at theta points: u_p(i,k,j) = u(k,i) v_p(i,k,j) = v(k,i) @@ -276,6 +294,23 @@ subroutine MPAS_to_physics(mesh,state,time_lev,diag,diag_physics) enddo enddo + pbl_select: select case (trim(pbl_scheme)) + case("bl_mynn") + call mpas_pool_get_dimension(state,'index_ni',index_ni) + ni => scalars(index_ni,:,:) + + do j = jts,jte + do k = kts,kte + do i = its,ite + ni_p(i,k,j) = max(0.,ni(k,i)) + enddo + enddo + enddo + + case default + + end select pbl_select + !calculation of the surface pressure using hydrostatic assumption down to the surface:: do j = jts,jte do i = its,ite @@ -283,8 +318,10 @@ subroutine MPAS_to_physics(mesh,state,time_lev,diag,diag_physics) tem2 = zgrid(3,i)-zgrid(2,i) rho1 = rho_zz(1,i) * zz(1,i) * (1. + qv_p(i,1,j)) rho2 = rho_zz(2,i) * zz(2,i) * (1. + qv_p(i,2,j)) +! surface_pressure(i) = 0.5*gravity*(zgrid(2,i)-zgrid(1,i)) & +! * (rho1 + 0.5*(rho2-rho1)*tem1/(tem1+tem2)) surface_pressure(i) = 0.5*gravity*(zgrid(2,i)-zgrid(1,i)) & - * (rho1 + 0.5*(rho2-rho1)*tem1/(tem1+tem2)) + * (rho1 - 0.5*(rho2-rho1)*tem1/(tem1+tem2)) surface_pressure(i) = surface_pressure(i) + pressure_p(1,i) + pressure_b(1,i) enddo @@ -307,6 +344,7 @@ subroutine MPAS_to_physics(mesh,state,time_lev,diag,diag_physics) !check that the pressure in the layer above the surface is greater than that in the layer !above it: + 201 format(3i8,10(1x,e15.8)) do j = jts,jte do i = its,ite if(pres_p(i,1,j) .le. pres_p(i,2,j)) then @@ -366,7 +404,8 @@ subroutine MPAS_to_physics(mesh,state,time_lev,diag,diag_physics) w2 = 1.-w1 t2_p(i,k,j) = w1*t_p(i,k,j)+w2*t_p(i,k+1,j) pres2_p(i,k,j) = w1*pres_p(i,k,j)+w2*pres_p(i,k+1,j) - psfc_p(i,j) = pres2_p(i,k,j) +! psfc_p(i,j) = pres2_p(i,k,j) + psfc_p(i,j) = surface_pressure(i) enddo enddo @@ -404,40 +443,38 @@ subroutine MPAS_to_physics(mesh,state,time_lev,diag,diag_physics) enddo enddo -!formats: - 201 format(3i8,10(1x,e15.8)) - 202 format(2i6,10(1x,e15.8)) - 203 format(i6,10(1x,e15.8)) - end subroutine MPAS_to_physics -!================================================================================================== - subroutine microphysics_from_MPAS(mesh,state,time_lev,diag) -!================================================================================================== +!================================================================================================================= + subroutine microphysics_from_MPAS(mesh,state,time_lev,diag,diag_physics,its,ite) +!================================================================================================================= !input variables: + type(mpas_pool_type),intent(in):: mesh type(mpas_pool_type),intent(in):: state type(mpas_pool_type),intent(in):: diag - type(mpas_pool_type),intent(in):: mesh + type(mpas_pool_type),intent(in):: diag_physics + integer,intent(in):: its,ite integer:: time_lev !local pointers: integer,pointer:: index_qv,index_qc,index_qr,index_qi,index_qs,index_qg - real(kind=RKIND),dimension(:,:),pointer :: zgrid - real(kind=RKIND),dimension(:,:),pointer :: zz,exner,pressure_b,rtheta_p,rtheta_b + integer,pointer:: index_ni,index_nr + real(kind=RKIND),dimension(:),pointer :: nt_c,mu_c + real(kind=RKIND),dimension(:,:),pointer :: zgrid,w + real(kind=RKIND),dimension(:,:),pointer :: zz,exner,pressure_b real(kind=RKIND),dimension(:,:),pointer :: rho_zz,theta_m,pressure_p real(kind=RKIND),dimension(:,:),pointer :: qv,qc,qr,qi,qs,qg + real(kind=RKIND),dimension(:,:),pointer :: ni,nr + real(kind=RKIND),dimension(:,:),pointer :: rainprod,evapprod + real(kind=RKIND),dimension(:,:),pointer :: re_cloud,re_ice,re_snow real(kind=RKIND),dimension(:,:,:),pointer:: scalars !local variables: integer:: i,k,j -!-------------------------------------------------------------------------------------------------- - -!initialization: -! write(0,*) -! write(0,*) '--- enter subroutine microphysics_from_MPAS:' +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_array(mesh,'zgrid',zgrid) call mpas_pool_get_array(mesh,'zz' ,zz ) @@ -445,26 +482,37 @@ subroutine microphysics_from_MPAS(mesh,state,time_lev,diag) call mpas_pool_get_array(diag,'exner' ,exner ) call mpas_pool_get_array(diag,'pressure_base',pressure_b) call mpas_pool_get_array(diag,'pressure_p' ,pressure_p) - call mpas_pool_get_array(diag,'rtheta_base' ,rtheta_b ) - call mpas_pool_get_array(diag,'rtheta_p' ,rtheta_p ) + + call mpas_pool_get_array(diag_physics,'nt_c' ,nt_c ) + call mpas_pool_get_array(diag_physics,'mu_c' ,mu_c ) + call mpas_pool_get_array(diag_physics,'rainprod',rainprod) + call mpas_pool_get_array(diag_physics,'evapprod',evapprod) + call mpas_pool_get_array(diag_physics,'re_cloud',re_cloud) + call mpas_pool_get_array(diag_physics,'re_ice' ,re_ice ) + call mpas_pool_get_array(diag_physics,'re_snow' ,re_snow ) call mpas_pool_get_array(state,'rho_zz' ,rho_zz ,time_lev) call mpas_pool_get_array(state,'theta_m',theta_m,time_lev) + call mpas_pool_get_array(state,'w' ,w ,time_lev) - call mpas_pool_get_dimension(state,'index_qv',index_qv) - call mpas_pool_get_dimension(state,'index_qc',index_qc) - call mpas_pool_get_dimension(state,'index_qr',index_qr) - call mpas_pool_get_dimension(state,'index_qi',index_qi) - call mpas_pool_get_dimension(state,'index_qs',index_qs) - call mpas_pool_get_dimension(state,'index_qg',index_qg) + call mpas_pool_get_dimension(state,'index_qv' ,index_qv ) + call mpas_pool_get_dimension(state,'index_qc' ,index_qc ) + call mpas_pool_get_dimension(state,'index_qr' ,index_qr ) + call mpas_pool_get_dimension(state,'index_qi' ,index_qi ) + call mpas_pool_get_dimension(state,'index_qs' ,index_qs ) + call mpas_pool_get_dimension(state,'index_qg' ,index_qg ) + call mpas_pool_get_dimension(state,'index_ni' ,index_ni ) + call mpas_pool_get_dimension(state,'index_nr' ,index_nr ) call mpas_pool_get_array(state,'scalars',scalars,time_lev) - qv => scalars(index_qv,:,:) - qc => scalars(index_qc,:,:) - qr => scalars(index_qr,:,:) - qi => scalars(index_qi,:,:) - qs => scalars(index_qs,:,:) - qg => scalars(index_qg,:,:) + qv => scalars(index_qv,:,:) + qc => scalars(index_qc,:,:) + qr => scalars(index_qr,:,:) + qi => scalars(index_qi,:,:) + qs => scalars(index_qs,:,:) + qg => scalars(index_qg,:,:) + ni => scalars(index_ni,:,:) + nr => scalars(index_nr,:,:) !initialize variables needed in the cloud microphysics schemes: do j = jts, jte @@ -482,6 +530,7 @@ subroutine microphysics_from_MPAS(mesh,state,time_lev,diag) z_p(i,k,j) = zgrid(k,i) dz_p(i,k,j) = zgrid(k+1,i) - zgrid(k,i) + w_p(i,k,j) = w(k,i) enddo enddo enddo @@ -489,7 +538,7 @@ subroutine microphysics_from_MPAS(mesh,state,time_lev,diag) !additional initialization as function of cloud microphysics scheme: microp_select_init: select case(microp_scheme) - case ("wsm6") + case ("mp_thompson","mp_wsm6") do j = jts, jte do k = kts, kte do i = its, ite @@ -500,40 +549,67 @@ subroutine microphysics_from_MPAS(mesh,state,time_lev,diag) enddo enddo + microp2_select: select case(microp_scheme) + + case("mp_thompson") + do j = jts,jte + do i = its,ite + muc_p(i,j) = mu_c(i) + ntc_p(i,j) = nt_c(i) + enddo + enddo + do j = jts, jte + do k = kts, kte + do i = its, ite + ni_p(i,k,j) = ni(k,i) + nr_p(i,k,j) = nr(k,i) + rainprod_p(i,k,j) = rainprod(k,i) + evapprod_p(i,k,j) = evapprod(k,i) + recloud_p(i,k,j) = re_cloud(k,i) + reice_p(i,k,j) = re_ice(k,i) + resnow_p(i,k,j) = re_snow(k,i) + enddo + enddo + enddo + + case default + + end select microp2_select + case default end select microp_select_init -! write(0,*) '--- end subroutine microphysics_from_MPAS.' - -!formats: - 201 format(2i6,10(1x,e15.8)) - end subroutine microphysics_from_MPAS -!================================================================================================== - subroutine microphysics_to_MPAS(mesh,state,time_lev,diag,tend,itimestep) -!================================================================================================== +!================================================================================================================= + subroutine microphysics_to_MPAS(mesh,state,time_lev,diag,diag_physics,tend,itimestep,its,ite) +!================================================================================================================= !input variables: - integer,intent(in):: itimestep - integer,intent(in):: time_lev type(mpas_pool_type),intent(in):: mesh + integer,intent(in):: itimestep,time_lev + integer,intent(in):: its,ite + !output variables: type(mpas_pool_type),intent(inout):: state type(mpas_pool_type),intent(inout):: diag type(mpas_pool_type),intent(inout):: tend + type(mpas_pool_type),intent(inout):: diag_physics !local pointers: integer,pointer:: index_qv,index_qc,index_qr,index_qi,index_qs,index_qg - real(kind=RKIND),dimension(:),pointer :: rdzw + integer,pointer:: index_ni,index_nr real(kind=RKIND),dimension(:),pointer :: surface_pressure,tend_sfc_pressure real(kind=RKIND),dimension(:,:),pointer :: zgrid real(kind=RKIND),dimension(:,:),pointer :: zz,exner,exner_b,pressure_b,rtheta_p,rtheta_b real(kind=RKIND),dimension(:,:),pointer :: rho_zz,theta_m,pressure_p real(kind=RKIND),dimension(:,:),pointer :: rt_diabatic_tend real(kind=RKIND),dimension(:,:),pointer :: qv,qc,qr,qi,qs,qg + real(kind=RKIND),dimension(:,:),pointer :: ni,nr + real(kind=RKIND),dimension(:,:),pointer :: rainprod,evapprod + real(kind=RKIND),dimension(:,:),pointer :: re_cloud,re_ice,re_snow real(kind=RKIND),dimension(:,:,:),pointer:: scalars !local variables: @@ -541,13 +617,8 @@ subroutine microphysics_to_MPAS(mesh,state,time_lev,diag,tend,itimestep) integer:: i,k,j real(kind=RKIND):: rho1,rho2,tem1,tem2 -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- -! write(0,*) -! write(0,*) '--- enter subroutine microphysics_to_MPAS:' - -!initialization: - call mpas_pool_get_array(mesh,'rdzw' ,rdzw ) call mpas_pool_get_array(mesh,'zz' ,zz ) call mpas_pool_get_array(mesh,'zgrid',zgrid) @@ -559,25 +630,35 @@ subroutine microphysics_to_MPAS(mesh,state,time_lev,diag,tend,itimestep) call mpas_pool_get_array(diag,'rtheta_p' ,rtheta_p ) call mpas_pool_get_array(diag,'surface_pressure',surface_pressure) + call mpas_pool_get_array(diag_physics,'rainprod',rainprod) + call mpas_pool_get_array(diag_physics,'evapprod',evapprod) + call mpas_pool_get_array(diag_physics,'re_cloud',re_cloud) + call mpas_pool_get_array(diag_physics,'re_ice' ,re_ice ) + call mpas_pool_get_array(diag_physics,'re_snow' ,re_snow ) + call mpas_pool_get_array(tend,'tend_sfc_pressure',tend_sfc_pressure) call mpas_pool_get_array(state,'rho_zz' ,rho_zz ,time_lev) call mpas_pool_get_array(state,'theta_m',theta_m,time_lev) - call mpas_pool_get_dimension(state,'index_qv',index_qv) - call mpas_pool_get_dimension(state,'index_qc',index_qc) - call mpas_pool_get_dimension(state,'index_qr',index_qr) - call mpas_pool_get_dimension(state,'index_qi',index_qi) - call mpas_pool_get_dimension(state,'index_qs',index_qs) - call mpas_pool_get_dimension(state,'index_qg',index_qg) + call mpas_pool_get_dimension(state,'index_qv' ,index_qv ) + call mpas_pool_get_dimension(state,'index_qc' ,index_qc ) + call mpas_pool_get_dimension(state,'index_qr' ,index_qr ) + call mpas_pool_get_dimension(state,'index_qi' ,index_qi ) + call mpas_pool_get_dimension(state,'index_qs' ,index_qs ) + call mpas_pool_get_dimension(state,'index_qg' ,index_qg ) + call mpas_pool_get_dimension(state,'index_ni' ,index_ni ) + call mpas_pool_get_dimension(state,'index_nr' ,index_nr ) call mpas_pool_get_array(state,'scalars',scalars,time_lev) - qv => scalars(index_qv,:,:) - qc => scalars(index_qc,:,:) - qr => scalars(index_qr,:,:) - qi => scalars(index_qi,:,:) - qs => scalars(index_qs,:,:) - qg => scalars(index_qg,:,:) + qv => scalars(index_qv,:,:) + qc => scalars(index_qc,:,:) + qr => scalars(index_qr,:,:) + qi => scalars(index_qi,:,:) + qs => scalars(index_qs,:,:) + qg => scalars(index_qg,:,:) + ni => scalars(index_ni,:,:) + nr => scalars(index_nr,:,:) call mpas_pool_get_array(tend,'rt_diabatic_tend',rt_diabatic_tend) @@ -617,8 +698,10 @@ subroutine microphysics_to_MPAS(mesh,state,time_lev,diag,tend,itimestep) rho2 = rho_zz(2,i) * zz(2,i) * (1. + qv_p(i,2,j)) tend_sfc_pressure(i) = surface_pressure(i) +! surface_pressure(i) = 0.5*gravity*(zgrid(2,i)-zgrid(1,i)) & +! * (rho1 + 0.5*(rho2-rho1)*tem1/(tem1+tem2)) surface_pressure(i) = 0.5*gravity*(zgrid(2,i)-zgrid(1,i)) & - * (rho1 + 0.5*(rho2-rho1)*tem1/(tem1+tem2)) + * (rho1 - 0.5*(rho2-rho1)*tem1/(tem1+tem2)) surface_pressure(i) = surface_pressure(i) + pressure_p(1,i) + pressure_b(1,i) tend_sfc_pressure(i) = (surface_pressure(i)-tend_sfc_pressure(i)) / dt_dyn enddo @@ -627,12 +710,10 @@ subroutine microphysics_to_MPAS(mesh,state,time_lev,diag,tend,itimestep) !variables specific to different cloud microphysics schemes: microp_select_init: select case(microp_scheme) - case ("wsm6") - + case ("mp_thompson","mp_wsm6") do j = jts, jte do k = kts, kte do i = its, ite - !mass mixing ratios: qi(k,i) = qi_p(i,k,j) qs(k,i) = qs_p(i,k,j) qg(k,i) = qg_p(i,k,j) @@ -640,17 +721,33 @@ subroutine microphysics_to_MPAS(mesh,state,time_lev,diag,tend,itimestep) enddo enddo - case default + microp2_select: select case(microp_scheme) - end select microp_select_init + case("mp_thompson") + do j = jts, jte + do k = kts, kte + do i = its, ite + ni(k,i) = ni_p(i,k,j) + nr(k,i) = nr_p(i,k,j) + rainprod(k,i) = rainprod_p(i,k,j) + evapprod(k,i) = evapprod_p(i,k,j) + re_cloud(k,i) = recloud_p(i,k,j) + re_ice(k,i) = reice_p(i,k,j) + re_snow(k,i) = resnow_p(i,k,j) + enddo + enddo + enddo -!formats: - 201 format(2i6,10(1x,e15.8)) + case default -! write(0,*) '--- end subroutine microphysics_to_MPAS' + end select microp2_select + + case default + + end select microp_select_init end subroutine microphysics_to_MPAS -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_interface -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_landuse.F b/src/core_atmosphere/physics/mpas_atmphys_landuse.F index 4f37f0a8ab..6d6f7f3d4e 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_landuse.F +++ b/src/core_atmosphere/physics/mpas_atmphys_landuse.F @@ -9,7 +9,7 @@ #define DM_BCAST_MACRO(A) call mpas_dmpar_bcast_reals(dminfo,size(A),A) #define DM_BCAST_INTEGER(A) call mpas_dmpar_bcast_int(dminfo,A) -!================================================================================================== +!================================================================================================================= module mpas_atmphys_landuse use mpas_dmpar use mpas_kind_types @@ -29,58 +29,58 @@ module mpas_atmphys_landuse integer,parameter:: frac_seaice = 0 ! = 1: treats seaice as fractional field. ! = 0: ice/no-ice flag. -!>\brief -!> This module reads the file LANDUSE.TBL which defines the land type of each cell, depending on -!> the origin of the input data, as defined by the value of the variable "sfc_input_data". -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_landuse: -!> ------------------------------------ -!> landuse_init_forMPAS: main initialization for land use types. - -!> The allowed values for sfc_input_data are: -!> input_sfc_data = OLD. (13 land types / summer and winter). -!> input_sfc_data = USGS. (33 land types / summer and winter). -!> input_sfc_data = MODIFIED_IGBP_MODIS_NOAH (33 land types / summer and winter). -!> input_sfc_data = SiB (16 land types / summer and winter). -!> input_sfc_data = LW12 ( 3 land types / all seasons). -!> -!> Given the value of the input index lu_index, and the julian day julday, landuse_init_forMPAS -!> initializes the variables: -!> .. background roughness length (z0). -!> .. background surface albedo (sfc_albbck). -!> .. background surface emissivity (sfc_emibck). -!> .. roughness length (znt). -!> .. surface albedo (sfc_albedo). -!> .. surface emissivity (sfc_emiss). -!> .. land mask (xland). -!> .. thermal inertia (thc). -!> .. surface moisture availability (mavail). -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * deleted initialization of xicem and xland. -!> - xicem is now initialized in physics_init. -!> - xland is now initialized in physics_initialize_real and updated in physics_update_sst if -!> needed. -!> Laura D. Fowler (laura@ucar.edu) / 2013-08-24. -!> * added initialization of the background surface albedo over snow. -!> Laura D. Fowler (laura@ucar.edu) / 2013-10-19. -!> * Modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. -!> * In subroutine landuse_init_forMPAS, added the definition of isurban as a function of the -!> input landuse data file. -!> Dominikus Heinzeller (IMK) / 2014-07-24. +!This module reads the file LANDUSE.TBL which defines the land type of each cell, depending on +!the origin of the input data, as defined by the value of the variable "sfc_input_data". +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_landuse: +! ------------------------------------ +! landuse_init_forMPAS: main initialization for land use types. + +! The allowed values for sfc_input_data are: +! input_sfc_data = OLD. (13 land types / summer and winter). +! input_sfc_data = USGS. (33 land types / summer and winter). +! input_sfc_data = MODIFIED_IGBP_MODIS_NOAH (33 land types / summer and winter). +! input_sfc_data = SiB (16 land types / summer and winter). +! input_sfc_data = LW12 ( 3 land types / all seasons). +! +! Given the value of the input index lu_index, and the julian day julday, landuse_init_forMPAS +! initializes the variables: +! .. background roughness length (z0). +! .. background surface albedo (sfc_albbck). +! .. background surface emissivity (sfc_emibck). +! .. roughness length (znt). +! .. surface albedo (sfc_albedo). +! .. surface emissivity (sfc_emiss). +! .. land mask (xland). +! .. thermal inertia (thc). +! .. surface moisture availability (mavail). +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * deleted initialization of xicem and xland. +! - xicem is now initialized in physics_init. +! - xland is now initialized in physics_initialize_real and updated in physics_update_sst if needed. +! Laura D. Fowler (laura@ucar.edu) / 2013-08-24. +! * added initialization of the background surface albedo over snow. +! Laura D. Fowler (laura@ucar.edu) / 2013-10-19. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * in subroutine landuse_init_forMPAS, added the definition of isurban as a function of the +! input landuse data file. +! Dominikus Heinzeller (IMK) / 2014-07-24. +! * in subroutine landuse_int_forMPAS, added the initialization of variable ust to a very small value. this was +! needed when the surface layer scheme was updated to that used in WRF version 3.8.1 +! Laura D. Fowler (laura@ucar.edu) / 2016-10-27. + contains -!================================================================================================== +!================================================================================================================= subroutine landuse_init_forMPAS(dminfo,julday,mesh,configs,diag_physics,sfc_input) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info),intent(in):: dminfo @@ -105,7 +105,7 @@ subroutine landuse_init_forMPAS(dminfo,julday,mesh,configs,diag_physics,sfc_inpu real(kind=RKIND),dimension(:),pointer:: latCell real(kind=RKIND),dimension(:),pointer:: snoalb,snowc,xice real(kind=RKIND),dimension(:),pointer:: albbck,embck,xicem,xland,z0 - real(kind=RKIND),dimension(:),pointer:: mavail,sfc_albedo,sfc_emiss,thc,znt + real(kind=RKIND),dimension(:),pointer:: mavail,sfc_albedo,sfc_emiss,thc,ust,znt !local variables: character(len=StrKIND) :: lutype @@ -124,7 +124,9 @@ subroutine landuse_init_forMPAS(dminfo,julday,mesh,configs,diag_physics,sfc_inpu real(kind=RKIND):: li real(kind=RKIND),dimension(max_cats,max_seas):: albd,slmo,sfem,sfz0,therin,scfx,sfhc -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine landuse_init_forMPAS:' call mpas_pool_get_config(configs,'config_do_restart' ,config_do_restart ) call mpas_pool_get_config(configs,'config_frac_seaice',config_frac_seaice) @@ -143,19 +145,22 @@ subroutine landuse_init_forMPAS(dminfo,julday,mesh,configs,diag_physics,sfc_inpu call mpas_pool_get_array(sfc_input,'xland' , xland ) call mpas_pool_get_array(sfc_input,'sfc_albbck', albbck ) + nullify(mavail) + nullify(ust) call mpas_pool_get_array(diag_physics,'sfc_emibck', embck ) call mpas_pool_get_array(diag_physics,'mavail' , mavail ) call mpas_pool_get_array(diag_physics,'sfc_albedo', sfc_albedo) call mpas_pool_get_array(diag_physics,'sfc_emiss' , sfc_emiss ) call mpas_pool_get_array(diag_physics,'thc' , thc ) + call mpas_pool_get_array(diag_physics,'ust' , ust ) call mpas_pool_get_array(diag_physics,'xicem' , xicem ) call mpas_pool_get_array(diag_physics,'z0' , z0 ) call mpas_pool_get_array(diag_physics,'znt' , znt ) - write(0,*) -! write(0,*) '--- enter subroutine landuse_init_forMPAS: julian day=', julday - write(0,*) '--- config_frac_seaice = ',config_frac_seaice - write(0,*) '--- xice_threshold = ',xice_threshold +!write(0,*) +!write(0,*) '--- enter subroutine landuse_init_forMPAS: julian day=', julday +!write(0,*) '--- config_frac_seaice = ',config_frac_seaice +!write(0,*) '--- xice_threshold = ',xice_threshold !reads in the landuse properties from landuse.tbl: if(dminfo % my_proc_id == IO_NODE) then @@ -238,9 +243,9 @@ subroutine landuse_init_forMPAS(dminfo,julday,mesh,configs,diag_physics,sfc_inpu DM_BCAST_MACRO(therin) DM_BCAST_MACRO(sfhc) DM_BCAST_MACRO(scfx) - write(0,*) '--- isice =',isice - write(0,*) '--- iswater =',iswater - write(0,*) '--- isurban =',isurban +!write(0,*) '--- isice =',isice +!write(0,*) '--- iswater =',iswater +!write(0,*) '--- isurban =',isurban if(config_do_restart) then write(0,*) '--- config_do_restart =', config_do_restart write(0,*) '--- skip the end of landuse_init_forMPAS' @@ -274,11 +279,13 @@ subroutine landuse_init_forMPAS(dminfo,julday,mesh,configs,diag_physics,sfc_inpu endif thc(iCell) = therin(is,isn) / 100. z0(iCell) = sfz0(is,isn) / 100. - znt(iCell) = z0(iCell) - mavail(iCell) = slmo(is,isn) + znt(iCell) = z0(icell) embck(iCell) = sfem(is,isn) sfc_emiss(iCell) = embck(iCell) + if(associated(mavail)) mavail(iCell) = slmo(is,isn) + if(associated(ust)) ust(iCell) = 0.0001 + !set sea-ice points to land with ice/snow surface properties: if(xice(iCell) .ge. xice_threshold) then albbck(iCell) = albd(isice,isn) / 100. @@ -295,12 +302,13 @@ subroutine landuse_init_forMPAS(dminfo,julday,mesh,configs,diag_physics,sfc_inpu thc(iCell) = therin(isice,isn) / 100. z0(icell) = sfz0(isice,isn) / 100. znt(iCell) = z0(iCell) - mavail(iCell) = slmo(isice,isn) + + if(associated(mavail)) mavail(iCell) = slmo(isice,isn) endif enddo -! write(0,*) '--- end subroutine landuse_init_forMPAS' +!write(0,*) '--- end subroutine landuse_init_forMPAS' !formats: 101 format(i6,8(1x,e15.8)) @@ -308,6 +316,6 @@ subroutine landuse_init_forMPAS(dminfo,julday,mesh,configs,diag_physics,sfc_inpu end subroutine landuse_init_forMPAS -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_landuse -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_lsm_noahinit.F b/src/core_atmosphere/physics/mpas_atmphys_lsm_noahinit.F index b08e852ea1..e2e25090b5 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_lsm_noahinit.F +++ b/src/core_atmosphere/physics/mpas_atmphys_lsm_noahinit.F @@ -11,7 +11,7 @@ #define DM_BCAST_REAL(A) call mpas_dmpar_bcast_real(dminfo,A) #define DM_BCAST_REALS(A) call mpas_dmpar_bcast_reals(dminfo,size(A),A) -!================================================================================================== +!================================================================================================================= module mpas_atmphys_lsm_noahinit use mpas_dmpar use mpas_kind_types @@ -28,31 +28,29 @@ module mpas_atmphys_lsm_noahinit public:: noah_init_forMPAS -!>\brief initialization of the NOAH 4-layer land surface scheme. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_lsm_noahinit: -!> ----------------------------------------- -!> noah_init_forMPAS: call lsminit from subroutine init_lsm (module mpas_atmphyse_driver_lsm.F). -!> lsminit : main initialization subroutine for the NOAH 4-layer land surface scheme. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * replaced the variable grav with gravity, for simplicity. -!> Laura D. Fowler (laura@ucar.edu) / 2014-03-21. -!> * added "use mpas_kind_types" at the top of the module. -!> Laura D. Fowler (laura@ucar.edu) / 2014-09-18. +!Initialization of the NOAH 4-layer land surface scheme. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_lsm_noahinit: +! ----------------------------------------- +! noah_init_forMPAS: call lsminit from subroutine init_lsm (module mpas_atmphyse_driver_lsm.F). +! lsminit : main initialization subroutine for the NOAH 4-layer land surface scheme. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * replaced the variable grav with gravity, for simplicity. +! Laura D. Fowler (laura@ucar.edu) / 2014-03-21. +! * added "use mpas_kind_types" at the top of the module. +! Laura D. Fowler (laura@ucar.edu) / 2014-09-18. contains -!================================================================================================== +!================================================================================================================= subroutine noah_init_forMPAS(dminfo,mesh,configs,diag_physics,sfc_input) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info):: dminfo @@ -63,16 +61,16 @@ subroutine noah_init_forMPAS(dminfo,mesh,configs,diag_physics,sfc_input) type(mpas_pool_type),intent(inout):: diag_physics type(mpas_pool_type),intent(inout):: sfc_input -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- !read formatted files needed for land-surface model: call lsminit(dminfo,mesh,configs,diag_physics,sfc_input) end subroutine noah_init_forMPAS -!================================================================================================== +!================================================================================================================= subroutine lsminit(dminfo,mesh,configs,diag_physics,sfc_input) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info),intent(in):: dminfo @@ -105,7 +103,7 @@ subroutine lsminit(dminfo,mesh,configs,diag_physics,sfc_input) real(kind=RKIND),parameter:: hlice = 3.335e5 real(kind=RKIND),parameter:: t0 = 273.15 -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_array(sfc_input,'mminlu' ,mminlu ) call mpas_pool_get_config(configs,'input_soil_data' ,mminsl ) @@ -125,9 +123,11 @@ subroutine lsminit(dminfo,mesh,configs,diag_physics,sfc_input) call mpas_pool_get_array(sfc_input,'snowh' , snowh ) !reads the NOAH LSM tables: - call physics_message( ' initialize NOAH LSM tables' ) + write(0,*) + call physics_message('--- initialize NOAH LSM tables' ) call soil_veg_gen_parm(dminfo,mminlu,mminsl) - call physics_message( ' end initialize NOAH LSM tables' ) + call physics_message('--- end initialize NOAH LSM tables' ) + write(0,*) if(.not.restart) then @@ -202,9 +202,9 @@ subroutine lsminit(dminfo,mesh,configs,diag_physics,sfc_input) end subroutine lsminit -!================================================================================================== +!================================================================================================================= subroutine soil_veg_gen_parm(dminfo,mminlu,mminsl) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info),intent(in):: dminfo @@ -239,8 +239,8 @@ subroutine soil_veg_gen_parm(dminfo,mminlu,mminsl) ! !-----READ IN VEGETATION PROPERTIES FROM VEGPARM.TBL -!-------------------------------------------------------------------------------------------------- - +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) !write(0,*) ' enter subroutine soil_veg_gen_parm:' !read in the vegetation properties from vegparm.tbl: @@ -521,6 +521,6 @@ subroutine soil_veg_gen_parm(dminfo,mminlu,mminsl) end subroutine soil_veg_gen_parm -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_lsm_noahinit -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_manager.F b/src/core_atmosphere/physics/mpas_atmphys_manager.F index 3d95091275..f553da6a56 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_manager.F +++ b/src/core_atmosphere/physics/mpas_atmphys_manager.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_manager use mpas_kind_types use mpas_derived_types @@ -28,8 +28,6 @@ module mpas_atmphys_manager real(kind=RKIND), public:: curr_julday !Current Julian day (= 0.0 at 0Z on January 1st). real(kind=RKIND), public:: gmt !Greenwich mean time hour of model start (hr) - integer, private:: i,k,j,n - !defines alarms for calling the long- and short-wave radiation codes, for calling the convec- !tion scheme: character(len=*), parameter:: radtlwAlarmID = 'radtlw' @@ -71,42 +69,43 @@ module mpas_atmphys_manager real(kind=RKIND) :: utc_h -!>\brief main time manager for physics parameterizations. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_manager: -!> ------------------------------------ -!> physics_timetracker: check alarms and update boundary conditions if needed. -!> physics_run_init : create and initialize alarms used for physics parameterizations. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * added initialization of variable sf_surface_physics in subroutine physics_run_init. see -!> definition of sf_surface_physics in mpas_atmphys_vars.F -!> Laura D. Fowler (laura@ucar.edu) / 2013-03-11. -!> * removed the namelist option config_conv_shallow_scheme and associated sourcecode. -!> Laura D. Fowler (laura@ucar.edu) / 2013-05-29. -!> * added call to subroutine o3climatology_from_MPAS to interpolate the climatological ozone -!> mixing ratios to the current julian day. -!> Laura D. Fowler (laura@ucar.edu) / 2013-07-03. -!> * added domain%dminfo in call to subroutine physics_update_sst to print local and global -!> min and max values of the updated sea-surface temperatures and sea-ice fractions. -!> Laura D. Fowler (laura@ucar.edu) / 2013-07-24. -!> * modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. -!> * renamed config_conv_deep_scheme to config_convection_scheme. -!> Laura D. Fowler (laura@ucar.edu) / 2014-09-18. +!Main time manager for physics parameterizations. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_manager: +! ------------------------------------ +! physics_timetracker: check alarms and update boundary conditions if needed. +! physics_run_init : create and initialize alarms used for physics parameterizations. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * added initialization of variable sf_surface_physics in subroutine physics_run_init. see +! definition of sf_surface_physics in mpas_atmphys_vars.F +! Laura D. Fowler (laura@ucar.edu) / 2013-03-11. +! * removed the namelist option config_conv_shallow_scheme and associated sourcecode. +! Laura D. Fowler (laura@ucar.edu) / 2013-05-29. +! * added call to subroutine o3climatology_from_MPAS to interpolate the climatological ozone +! mixing ratios to the current julian day. +! Laura D. Fowler (laura@ucar.edu) / 2013-07-03. +! * added domain%dminfo in call to subroutine physics_update_sst to print local and global +! min and max values of the updated sea-surface temperatures and sea-ice fractions. +! Laura D. Fowler (laura@ucar.edu) / 2013-07-24. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * renamed config_conv_deep_scheme to config_convection_scheme. +! Laura D. Fowler (laura@ucar.edu) / 2014-09-18. +! * added initialization of variables has_reqc,has_reqi,and has_reqs needed in the calls to radiation codes +! rrtmg_lwrad and rrmtg_swrad. +! Laura D. Fowler (laura@ucar.edu) / 2016-07-007. contains -!================================================================================================== +!================================================================================================================= subroutine physics_timetracker(domain,dt,clock,itimestep,xtime_s) -!================================================================================================== +!================================================================================================================= !input arguments: integer,intent(in):: itimestep @@ -151,9 +150,9 @@ subroutine physics_timetracker(domain,dt,clock,itimestep,xtime_s) real(kind=RKIND):: utc_s real(kind=RKIND):: xtime_m -!================================================================================================== -! write(0,*) -! write(0,*) '--- enter subroutine physics_timetracker: itimestep = ', itimestep +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine physics_timetracker: itimestep = ', itimestep call mpas_pool_get_config(domain%blocklist%configs,'config_convection_scheme',config_convection_scheme) call mpas_pool_get_config(domain%blocklist%configs,'config_radt_lw_scheme' ,config_radt_lw_scheme ) @@ -346,9 +345,9 @@ subroutine physics_timetracker(domain,dt,clock,itimestep,xtime_s) end subroutine physics_timetracker -!================================================================================================== +!================================================================================================================= subroutine physics_run_init(configs,mesh,state,clock,stream_manager) -!================================================================================================== +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(in):: mesh @@ -376,9 +375,11 @@ subroutine physics_run_init(configs,mesh,state,clock,stream_manager) config_camrad_abs_update, & config_greeness_update + logical,pointer:: config_sst_update logical,pointer:: config_frac_seaice + logical,pointer:: config_microp_re + - integer,pointer:: config_n_microp integer,pointer:: cam_dim1 integer,pointer:: nMonths integer,pointer:: nAerosols,nAerLevels,nOznLevels @@ -392,9 +393,9 @@ subroutine physics_run_init(configs,mesh,state,clock,stream_manager) character(len=StrKIND) :: stream_interval integer:: ierr -!================================================================================================== -! write(0,*) -! write(0,*) '--- enter subroutine physics_run_init:' +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine physics_run_init:' call mpas_pool_get_config(configs,'config_convection_scheme',config_convection_scheme) call mpas_pool_get_config(configs,'config_gwdo_scheme' ,config_gwdo_scheme ) @@ -406,15 +407,16 @@ subroutine physics_run_init(configs,mesh,state,clock,stream_manager) call mpas_pool_get_config(configs,'config_radt_sw_scheme' ,config_radt_sw_scheme ) call mpas_pool_get_config(configs,'config_sfclayer_scheme' ,config_sfclayer_scheme ) - call mpas_pool_get_config(configs,'config_conv_interval' ,config_conv_interval ) - call mpas_pool_get_config(configs,'config_pbl_interval' ,config_pbl_interval ) - call mpas_pool_get_config(configs,'config_radtlw_interval' ,config_radtlw_interval ) - call mpas_pool_get_config(configs,'config_radtsw_interval' ,config_radtsw_interval ) - call mpas_pool_get_config(configs,'config_bucket_update' ,config_bucket_update ) - call mpas_pool_get_config(configs,'config_camrad_abs_update' ,config_camrad_abs_update ) - call mpas_pool_get_config(configs,'config_greeness_update' ,config_greeness_update ) - call mpas_pool_get_config(configs,'config_frac_seaice' ,config_frac_seaice ) - call mpas_pool_get_config(configs,'config_n_microp' ,config_n_microp ) + call mpas_pool_get_config(configs,'config_conv_interval' ,config_conv_interval ) + call mpas_pool_get_config(configs,'config_pbl_interval' ,config_pbl_interval ) + call mpas_pool_get_config(configs,'config_radtlw_interval' ,config_radtlw_interval ) + call mpas_pool_get_config(configs,'config_radtsw_interval' ,config_radtsw_interval ) + call mpas_pool_get_config(configs,'config_bucket_update' ,config_bucket_update ) + call mpas_pool_get_config(configs,'config_camrad_abs_update',config_camrad_abs_update) + call mpas_pool_get_config(configs,'config_greeness_update' ,config_greeness_update ) + call mpas_pool_get_config(configs,'config_sst_update' ,config_sst_update ) + call mpas_pool_get_config(configs,'config_frac_seaice' ,config_frac_seaice ) + call mpas_pool_get_config(configs,'config_microp_re' ,config_microp_re ) call mpas_pool_get_config(configs,'config_dt',config_dt) @@ -450,7 +452,7 @@ subroutine physics_run_init(configs,mesh,state,clock,stream_manager) ' KDS =', i4,3x,'KDE =', i8,//, & ' ITS =', i4,3x,'ITE =', i8,/, & ' JTS =', i4,3x,'JTE =', i8,/, & - ' KTS =', i4,3x,'KTE =', i8) + ' KTS =', i4,3x,'KTE =', i8,/) startTime = mpas_get_clock_time(clock, MPAS_START_TIME, ierr) call mpas_get_time(curr_time=startTime,YYYY=yr,H=h,M=m,S=s,S_n=s_n,S_d=s_d,DoY=DoY,ierr=ierr) @@ -548,9 +550,9 @@ subroutine physics_run_init(configs,mesh,state,clock,stream_manager) call physics_error_fatal('subroutine physics_init: error creating alarm greeness') !set alarm for updating the surface boundary conditions: - call MPAS_stream_mgr_get_property(stream_manager, 'surface', MPAS_STREAM_PROPERTY_RECORD_INTV, stream_interval, & - direction=MPAS_STREAM_INPUT, ierr=ierr) - if(trim(stream_interval) /= 'none') then + if (config_sst_update) then + call MPAS_stream_mgr_get_property(stream_manager, 'surface', MPAS_STREAM_PROPERTY_RECORD_INTV, stream_interval, & + direction=MPAS_STREAM_INPUT, ierr=ierr) call mpas_set_timeInterval(alarmTimeStep,timeString=stream_interval,ierr=ierr) alarmStartTime = startTime call mpas_add_clock_alarm(clock,sfcbdyAlarmID,alarmStartTime,alarmTimeStep,ierr=ierr) @@ -644,8 +646,8 @@ subroutine physics_run_init(configs,mesh,state,clock,stream_manager) jms=1 ; jme=1 kms=1 ; kme = nVertLevels+1 - ids=ims ; ide=ime - jds=jms ; jde=jme + ids=ims ; ide=ime + 1 + jds=jms ; jde=jme + 1 kds=kms ; kde=kme its=ims ; ite = ime @@ -674,10 +676,19 @@ subroutine physics_run_init(configs,mesh,state,clock,stream_manager) !initialization of local physics time-steps: !... dynamics: - dt_dyn = config_dt + dt_dyn = config_dt !... cloud microphysics: - n_microp = config_n_microp - dt_microp = dt_dyn/n_microp !for now. + dt_microp = dt_dyn + n_microp = 1 + if(trim(microp_scheme)=='mp_thompson') then + dt_microp = 90._RKIND + n_microp = max(nint(dt_dyn/dt_microp),1) + dt_microp = dt_dyn / n_microp + if(dt_dyn <= dt_microp) dt_microp = dt_dyn + endif + write(0,*) '--- specifics on cloud microphysics option microp_scheme = ',trim(microp_scheme) + write(0,*) '--- dt_microp = ', dt_microp + write(0,*) '--- n_microp = ', n_microp !... convection: l_conv = .false. n_cu = nint(dt_cu/dt_dyn) @@ -725,9 +736,27 @@ subroutine physics_run_init(configs,mesh,state,clock,stream_manager) xice_threshold = 0.02 endif +!initialization for the calculation of the cloud effective radii of cloud water, cloud ice, and snow: + has_reqc = 0 + has_reqi = 0 + has_reqs = 0 + if(config_microp_re) then + if(trim(microp_scheme)=='mp_thompson' .or. & + trim(microp_scheme)=='mp_wsm6') then + if(trim(radt_lw_scheme)=='rrtmg_lw' .and. trim(radt_sw_scheme)=='rrtmg_sw') then + has_reqc = 1 + has_reqi = 1 + has_reqs = 1 + endif + endif + endif + write(0,*) '--- has_reqc = ', has_reqc + write(0,*) '--- has_reqi = ', has_reqi + write(0,*) '--- has_reqs = ', has_reqs + end subroutine physics_run_init -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_manager -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_o3climatology.F b/src/core_atmosphere/physics/mpas_atmphys_o3climatology.F index 5d445f4e8a..bcad245ae6 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_o3climatology.F +++ b/src/core_atmosphere/physics/mpas_atmphys_o3climatology.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_o3climatology use mpas_kind_types use mpas_derived_types @@ -27,40 +27,38 @@ module mpas_atmphys_o3climatology integer,parameter:: latsiz = 64 integer,parameter:: lonsiz = 1 -!>\brief -!> mpas_atmphys_o3climatology contains the subroutines needed to initialize,interpolate,and update -!> the climatological monthly-mean ozone volume mixing ratios o3clim to the MPAS grid. Input data -!> files are the same as the ones used in the CAM long- and short-wave radiation codes. -!> when namelist parameter config_o3climatology is set to true, o3clim is used in the RRTMG long- -!> wave and short-wave radiation codes,and replaces the annual-mean ozone sounding used by default. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-07-03. - -!>\details -!> subroutines in mpas_atmphys_o3climatology: -!> ------------------------------------------ -!> init_o3climatology : read the CAM ozone data files. -!> update_o3climatology : interpolates the ozone volume mixing ratio to the current Julian day -!> as done for the greeness fraction in the MPAS time manager. -!> o3climatology_from_MPAS: interpolates the ozone volume mixing ratio to the current Julian day -!> as in the CAM radiation codes. -!> vinterp_ozn : vertical interpolation of the ozone volume mixing ratios from fixed -!> ozone pressure levels to the MPAS pressure levels. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +!mpas_atmphys_o3climatology contains the subroutines needed to initialize,interpolate,and update +!the climatological monthly-mean ozone volume mixing ratios o3clim to the MPAS grid. Input data +!files are the same as the ones used in the CAM long- and short-wave radiation codes. +!when namelist parameter config_o3climatology is set to true, o3clim is used in the RRTMG long- +!wave and short-wave radiation codes,and replaces the annual-mean ozone sounding used by default. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-07-03. +! +! subroutines in mpas_atmphys_o3climatology: +! ------------------------------------------ +! init_o3climatology : read the CAM ozone data files. +! update_o3climatology : interpolates the ozone volume mixing ratio to the current Julian day +! as done for the greeness fraction in the MPAS time manager. +! o3climatology_from_MPAS: interpolates the ozone volume mixing ratio to the current Julian day +! as in the CAM radiation codes. +! vinterp_ozn : vertical interpolation of the ozone volume mixing ratios from fixed +! ozone pressure levels to the MPAS pressure levels. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. contains -!================================================================================================== +!================================================================================================================= subroutine init_o3climatology(mesh,atm_input) -!================================================================================================== +!================================================================================================================= !This subroutine assumes a uniform distribution of ozone concentration. It should be replaced !with monthly climatology varying ozone distribution. @@ -90,9 +88,9 @@ subroutine init_o3climatology(mesh,atm_input) real(kind=RKIND),dimension(latsiz):: lat_ozone real(kind=RKIND),dimension(:,:,:,:),allocatable:: ozmixin -!-------------------------------------------------------------------------------------------------- -! write(0,*) -! write(0,*) '--- enter subroutine physics_init_o3:' +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine physics_init_o3:' call mpas_pool_get_dimension(mesh,'nCells',nCells) call mpas_pool_get_dimension(mesh,'nMonths',num_months) @@ -196,9 +194,9 @@ subroutine init_o3climatology(mesh,atm_input) end subroutine init_o3climatology -!================================================================================================== +!================================================================================================================= subroutine update_o3climatology(current_date,mesh,atm_input,diag_physics) -!================================================================================================== +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(in) :: mesh @@ -216,7 +214,7 @@ subroutine update_o3climatology(current_date,mesh,atm_input,diag_physics) !local variables: integer:: iCell,iLev -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- ! write(0,*) ! write(0,*) '--- enter subroutine physics_update_o3:' @@ -233,9 +231,9 @@ subroutine update_o3climatology(current_date,mesh,atm_input,diag_physics) end subroutine update_o3climatology -!================================================================================================== +!================================================================================================================= subroutine o3climatology_from_MPAS(julian,mesh,atm_input,diag_physics) -!================================================================================================== +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(in):: mesh @@ -264,8 +262,7 @@ subroutine o3climatology_from_MPAS(julian,mesh,atm_input,diag_physics) real(kind=r8):: fact1_r8, fact2_r8 real(kind=RKIND):: fact1,fact2 -!-------------------------------------------------------------------------------------------------- - +!----------------------------------------------------------------------------------------------------------------- !write(0,*) !write(0,*) '--- enter subroutine o3climatology_from_MPAS:' @@ -331,7 +328,7 @@ subroutine o3climatology_from_MPAS(julian,mesh,atm_input,diag_physics) end subroutine o3climatology_from_MPAS -!================================================================================================== +!================================================================================================================= subroutine vinterp_ozn (lchnk, ncol, pcols, pver, pmid_in, pin_in, levsiz, ozmix_in, o3vmr_out) !-------------------------------------------------------------------------------------------------- ! @@ -341,15 +338,15 @@ subroutine vinterp_ozn (lchnk, ncol, pcols, pver, pmid_in, pin_in, levsiz, ozmix ! ! Author: Bruce Briegleb ! -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- ! use shr_kind_mod, only: r8 => shr_kind_r8 ! use ppgrid ! use phys_grid, only: get_lat_all_p, get_lon_all_p ! use comozp ! use abortutils, only: endrun -!-------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- implicit none -!-------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- ! ! Arguments ! @@ -489,6 +486,6 @@ subroutine vinterp_ozn (lchnk, ncol, pcols, pver, pmid_in, pin_in, levsiz, ozmix return end subroutine vinterp_ozn -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_o3climatology -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_packages.F b/src/core_atmosphere/physics/mpas_atmphys_packages.F new file mode 100644 index 0000000000..43f8977640 --- /dev/null +++ b/src/core_atmosphere/physics/mpas_atmphys_packages.F @@ -0,0 +1,172 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!================================================================================================================= + module mpas_atmphys_packages + use mpas_kind_types + use mpas_derived_types,only : mpas_pool_type,mpas_io_context_type + use mpas_pool_routines,only : mpas_pool_get_config,mpas_pool_get_package + use mpas_io_units,only : stderrUnit + + implicit none + private + public:: atmphys_setup_packages + +!mpas_atmphys_packages contains the definitions of all physics packages. +!Laura D. Fowler (laura@ucar.edu) / 2016-03-10. + + + contains + + +!================================================================================================================= + function atmphys_setup_packages(configs,packages,iocontext) result(ierr) +!================================================================================================================= + +!inout arguments: + type (mpas_pool_type), intent(inout) :: configs + type (mpas_pool_type), intent(inout) :: packages + type (mpas_io_context_type), intent(inout) :: iocontext + +!local variables: + character(len=StrKIND),pointer:: config_microp_scheme + character(len=StrKIND),pointer:: config_convection_scheme + character(len=StrKIND),pointer:: config_pbl_scheme + logical,pointer:: mp_kessler_in,mp_thompson_in,mp_wsm6_in + logical,pointer:: cu_grell_freitas_in,cu_kain_fritsch_in,cu_tiedtke_in + logical,pointer:: bl_mynn_in,bl_ysu_in + + integer :: ierr + +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- enter subroutine atmphys_setup_packages:' + + ierr = 0 + + write(0,*) '----- Setting up package variables -----' + write(0,*) + +!--- initialization of all packages for parameterizations of cloud microphysics: + + call mpas_pool_get_config(configs,'config_microp_scheme',config_microp_scheme) + + nullify(mp_kessler_in) + call mpas_pool_get_package(packages,'mp_kessler_inActive',mp_kessler_in) + + nullify(mp_thompson_in) + call mpas_pool_get_package(packages,'mp_thompson_inActive',mp_thompson_in) + + nullify(mp_wsm6_in) + call mpas_pool_get_package(packages,'mp_wsm6_inActive',mp_wsm6_in) + + if(.not.associated(mp_kessler_in) .or. & + .not.associated(mp_thompson_in) .or. & + .not.associated(mp_wsm6_in)) then + write(stderrUnit,*) '====================================================================================' + write(stderrUnit,*) '* Error while setting up packages for cloud microphysics options in atmosphere core.' + write(stderrUnit,*) '====================================================================================' + ierr = 1 + return + endif + + mp_kessler_in = .false. + mp_thompson_in = .false. + mp_wsm6_in = .false. + + if(config_microp_scheme == 'mp_kessler') then + mp_kessler_in = .true. + elseif(config_microp_scheme == 'mp_thompson') then + mp_thompson_in = .true. + elseif(config_microp_scheme == 'mp_wsm6') then + mp_wsm6_in = .true. + endif + + write(0,*) ' mp_kessler_in = ', mp_kessler_in + write(0,*) ' mp_thompson_in = ', mp_thompson_in + write(0,*) ' mp_wsm6_in = ', mp_wsm6_in + +!--- initialization of all packages for parameterizations of convection: + + call mpas_pool_get_config(configs,'config_convection_scheme',config_convection_scheme) + + nullify(cu_grell_freitas_in) + call mpas_pool_get_package(packages,'cu_grell_freitas_inActive',cu_grell_freitas_in) + + nullify(cu_kain_fritsch_in) + call mpas_pool_get_package(packages,'cu_kain_fritsch_inActive',cu_kain_fritsch_in) + + nullify(cu_tiedtke_in) + call mpas_pool_get_package(packages,'cu_tiedtke_inActive',cu_tiedtke_in) + + if(.not.associated(cu_grell_freitas_in) .or. & + .not.associated(cu_kain_fritsch_in) .or. & + .not.associated(cu_tiedtke_in) ) then + write(stderrUnit,*) '====================================================================================' + write(stderrUnit,*) '* Error while setting up packages for convection options in atmosphere core.' + write(stderrUnit,*) '====================================================================================' + ierr = 1 + return + endif + + cu_grell_freitas_in = .false. + cu_kain_fritsch_in = .false. + cu_tiedtke_in = .false. + + if(config_convection_scheme=='cu_grell_freitas') then + cu_grell_freitas_in = .true. + elseif(config_convection_scheme == 'cu_kain_fritsch') then + cu_kain_fritsch_in = .true. + elseif(config_convection_scheme == 'cu_tiedtke' .or. & + config_convection_scheme == 'cu_ntiedtke') then + cu_tiedtke_in = .true. + endif + + write(0,*) ' cu_grell_freitas_in = ', cu_grell_freitas_in + write(0,*) ' cu_kain_fritsch_in = ', cu_kain_fritsch_in + write(0,*) ' cu_tiedtke_in = ', cu_tiedtke_in + +!--- initialization of all packages for parameterizations of surface layer and planetary boundary layer: + + call mpas_pool_get_config(configs,'config_pbl_scheme',config_pbl_scheme) + + nullify(bl_mynn_in) + call mpas_pool_get_package(packages,'bl_mynn_inActive',bl_mynn_in) + + nullify(bl_ysu_in) + call mpas_pool_get_package(packages,'bl_ysu_inActive',bl_ysu_in) + + if(.not.associated(bl_mynn_in) .or. & + .not.associated(bl_ysu_in)) then + write(stderrUnit,*) '====================================================================================' + write(stderrUnit,*) '* Error while setting up packages for planetary layer options in atmosphere core.' + write(stderrUnit,*) '====================================================================================' + ierr = 1 + return + endif + + bl_mynn_in = .false. + bl_ysu_in = .false. + + if(config_pbl_scheme=='bl_mynn') then + bl_mynn_in = .true. + elseif(config_pbl_scheme == 'bl_ysu') then + bl_ysu_in = .true. + endif + + write(0,*) ' bl_mynn_in = ', bl_mynn_in + write(0,*) ' bl_ysu_in = ', bl_ysu_in + write(0,*) + + end function atmphys_setup_packages + +!================================================================================================================= + end module mpas_atmphys_packages +!================================================================================================================= + + + diff --git a/src/core_atmosphere/physics/mpas_atmphys_rrtmg_lwinit.F b/src/core_atmosphere/physics/mpas_atmphys_rrtmg_lwinit.F index c6a5b95d95..5cf275ea4f 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_rrtmg_lwinit.F +++ b/src/core_atmosphere/physics/mpas_atmphys_rrtmg_lwinit.F @@ -7,7 +7,7 @@ ! #define DM_BCAST_MACRO(A) call mpas_dmpar_bcast_reals(dminfo,size(A),A) -!================================================================================================== +!================================================================================================================= module mpas_atmphys_rrtmg_lwinit use mpas_dmpar use mpas_kind_types @@ -23,30 +23,28 @@ module mpas_atmphys_rrtmg_lwinit public:: rrtmg_initlw_forMPAS -!>\brief main initialization module for the RRTMG long wave radiation code, based on the MPI -!> decomposition used in MPAS. It replaces the initialization in module_ra_rrtgm_lw.F. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_rrtmg_lwinit: -!> ----------------------------------------- -!> rrtmg_lwinit_forMPAS: call rrtmg_lwlookuptable from subroutine init_radiation_lw. -!> rrtmg_lwlookuptable : read and broadcast all input data on MPAS nodes using MPI decomposition. -!> +!Main initialization module for the RRTMG long wave radiation code, based on the MPI decomposition +!used in MPAS. It replaces the initialization in module_ra_rrtgm_lw.F. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_rrtmg_lwinit: +! ----------------------------------------- +! rrtmg_lwinit_forMPAS: call rrtmg_lwlookuptable from subroutine init_radiation_lw. +! rrtmg_lwlookuptable : read and broadcast all input data on MPAS nodes using MPI decomposition. contains -!================================================================================================== + +!================================================================================================================= subroutine rrtmg_initlw_forMPAS(dminfo) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info):: dminfo -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- !read in absorption coefficients and other data: call rrtmg_lwlookuptable(dminfo) @@ -57,9 +55,9 @@ subroutine rrtmg_initlw_forMPAS(dminfo) end subroutine rrtmg_initlw_forMPAS -!================================================================================================== +!================================================================================================================= subroutine rrtmg_lwlookuptable(dminfo) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info),intent(in):: dminfo @@ -68,7 +66,8 @@ subroutine rrtmg_lwlookuptable(dminfo) integer:: i,istat,rrtmg_unit logical:: opened character(len=StrKIND):: errmess -!-------------------------------------------------------------------------------------------------- + +!----------------------------------------------------------------------------------------------------------------- !get a unit to open init file: istat = -999 @@ -125,7 +124,7 @@ subroutine rrtmg_lwlookuptable(dminfo) end subroutine rrtmg_lwlookuptable -!================================================================================================== +!================================================================================================================= ! ************************************************************************** ! RRTMG Longwave Radiative Transfer Model @@ -1591,6 +1590,6 @@ subroutine lw_kgb16(rrtmg_unit,dminfo) end subroutine lw_kgb16 -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_rrtmg_lwinit -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_rrtmg_swinit.F b/src/core_atmosphere/physics/mpas_atmphys_rrtmg_swinit.F index c07c44e88d..7b93e1b1b6 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_rrtmg_swinit.F +++ b/src/core_atmosphere/physics/mpas_atmphys_rrtmg_swinit.F @@ -9,7 +9,7 @@ #define DM_BCAST_MACRO(A) call mpas_dmpar_bcast_reals(dminfo,size(A),A) #define DM_BCAST_REAL(A) call mpas_dmpar_bcast_real(dminfo,A) -!================================================================================================== +!================================================================================================================= module mpas_atmphys_rrtmg_swinit use mpas_dmpar use mpas_kind_types @@ -25,29 +25,28 @@ module mpas_atmphys_rrtmg_swinit public:: rrtmg_initsw_forMPAS -!>\brief main initialization module for the RRTMG short wave radiation code, based on the MPI -!> decomposition used in MPAS. It replaces the initialization in module_ra_rrtgm_sw.F. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_rrtmg_swinit: -!> ----------------------------------------- -!> rrtmg_swinit_forMPAS: call rrtmg_swlookuptable from subroutine init_radiation_sw. -!> rrtmg_swlookuptable : read and broadcast all input data on MPAS nodes using MPI decomposition. -!> rrtmg_swinit : added initialization specific to rrtmg_sw. - +!Main initialization module for the RRTMG short wave radiation code, based on the MPI decomposition +!used in MPAS. It replaces the initialization in module_ra_rrtgm_sw.F. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_rrtmg_swinit: +! ----------------------------------------- +! rrtmg_swinit_forMPAS: call rrtmg_swlookuptable from subroutine init_radiation_sw. +! rrtmg_swlookuptable : read and broadcast all input data on MPAS nodes using MPI decomposition. +! rrtmg_swinit : added initialization specific to rrtmg_sw. + contains -!================================================================================================== + +!================================================================================================================= subroutine rrtmg_initsw_forMPAS(dminfo) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info):: dminfo -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- !read in absorption coefficients and other data: call rrtmg_swlookuptable(dminfo) @@ -58,9 +57,9 @@ subroutine rrtmg_initsw_forMPAS(dminfo) end subroutine rrtmg_initsw_forMPAS -!================================================================================================== +!================================================================================================================= subroutine rrtmg_swlookuptable(dminfo) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info),intent(in):: dminfo @@ -69,7 +68,7 @@ subroutine rrtmg_swlookuptable(dminfo) integer:: i,istat,rrtmg_unit logical:: opened character(len=StrKIND):: errmess -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- !get a unit to open init file: if(dminfo % my_proc_id == IO_NODE) then @@ -124,7 +123,7 @@ subroutine rrtmg_swlookuptable(dminfo) end subroutine rrtmg_swlookuptable -!================================================================================================== +!================================================================================================================= ! ************************************************************************** ! RRTMG Shortwave Radiative Transfer Model @@ -1207,6 +1206,6 @@ subroutine sw_kgb29(rrtmg_unit,dminfo) end subroutine sw_kgb29 -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_rrtmg_swinit -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_todynamics.F b/src/core_atmosphere/physics/mpas_atmphys_todynamics.F index 2b07c74736..1627e686c0 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_todynamics.F +++ b/src/core_atmosphere/physics/mpas_atmphys_todynamics.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_todynamics use mpas_kind_types use mpas_derived_types @@ -13,91 +13,107 @@ module mpas_atmphys_todynamics use mpas_dmpar use mpas_atmphys_constants, only: R_d,R_v,degrad + use mpas_atmphys_vars, only: pbl_scheme,convection_scheme implicit none private - public:: physics_addtend - - -!>\brief interface between the physics parameterizations and the non-hydrostatic dynamical core. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_todynamics: -!> --------------------------------------- -!> physics_addtend: add and mass-weigh tendencies before being added to dynamics tendencies. -!> tend_toEdges : interpolate wind-tendencies from centers to edges of grid-cells. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. -!> * renamed config_conv_deep_scheme to config_convection_scheme. -!> Laura D. Fowler (laura@ucar.edu) / 2014-09-18. + public:: physics_addtend, physics_get_tend + + +!Interface between the physics parameterizations and the non-hydrostatic dynamical core. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! +! subroutines in mpas_atmphys_todynamics: +! --------------------------------------- +! physics_addtend: add and mass-weigh tendencies before being added to dynamics tendencies. +! tend_toEdges : interpolate wind-tendencies from centers to edges of grid-cells. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * added calculation of the advective tendency of the potential temperature due to horizontal +! and vertical advection, and horizontal mixing (diffusion). +! Laura D. Fowler (birch.mmm.ucar.edu) / 2013-11-19. +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * renamed config_conv_deep_scheme to config_convection_scheme. +! Laura D. Fowler (laura@ucar.edu) / 2014-09-18. +! * renamed "tiedtke" with "cu_tiedtke". +! Laura D. Fowler (laura@ucar.edu) / 2016-03-22. +! * modified the sourcecode to accomodate the packages "cu_kain_fritsch_in" and "cu_tiedtke_in". +! Laura D. Fowler (laura@ucar.edu) / 2016-03-24. +! * added the calculation of rthdynten which is the tendency of potential temperature due to horizontal and +! vertical advections needed in the Grell-Freitas scheme. +! Laura D. Fowler (laura@ucar.edu) / 2016-03-30. +! * added the option bl_mynn for the calculation of the tendency for the cloud ice number concentration. +! Laura D. Fowler (laura@ucar.edu) / 2016-04-11. +! * in subroutine physics_get_tend_work, added the option cu_ntiedtke in the calculation of rucuten_Edge. +! Laura D. Fowler (laura@ucar.edu) / 2016-10-28. contains -!================================================================================================== - subroutine physics_addtend(block, mesh, state, diag, tend, tend_physics, configs, rk_step) -!================================================================================================== +!================================================================================================================= + subroutine physics_addtend(block, mesh, state, diag, tend, tend_physics, configs, rk_step, dynamics_substep) +!================================================================================================================= + + use mpas_atm_dimensions !input variables: -!---------------- type(block_type),intent(in),target:: block type(mpas_pool_type),intent(in):: mesh type(mpas_pool_type),intent(in):: state - type(mpas_pool_type),intent(in):: diag - type(mpas_pool_type),intent(inout):: tend_physics type(mpas_pool_type),intent(in):: configs integer, intent(in):: rk_step + integer, intent(in):: dynamics_substep !inout variables: -!---------------- + type(mpas_pool_type),intent(inout):: diag type(mpas_pool_type),intent(inout):: tend + type(mpas_pool_type),intent(inout):: tend_physics !local variables: -!---------------- + character(len=StrKIND), pointer :: config_pbl_scheme, config_convection_scheme, & + config_radt_lw_scheme, config_radt_sw_scheme + integer:: i,iCell,k,n integer,pointer:: index_qv, index_qc, index_qr, index_qi, index_qs, index_qg - integer,pointer:: nCells,nCellsSolve,nEdges,nEdgesSolve,nVertLevels + integer,pointer:: index_ni + integer,pointer:: nCells,nCellsSolve,nEdges,nEdgesSolve real(kind=RKIND),dimension(:,:),pointer:: mass ! time level 2 rho_zz real(kind=RKIND),dimension(:,:),pointer:: mass_edge ! diag rho_edge - real(kind=RKIND),dimension(:,:),pointer:: theta_m,qv ! time level 1 + real(kind=RKIND),dimension(:,:),pointer:: theta_m ! time level 1 real(kind=RKIND),dimension(:,:,:),pointer:: scalars real(kind=RKIND),dimension(:,:),pointer:: rthblten,rqvblten,rqcblten, & rqiblten,rublten,rvblten + real(kind=RKIND),dimension(:,:),pointer:: rniblten real(kind=RKIND),dimension(:,:),pointer:: rthcuten,rqvcuten,rqccuten, & rqrcuten,rqicuten,rqscuten, & rucuten,rvcuten real(kind=RKIND),dimension(:,:),pointer:: rthratenlw,rthratensw + real(kind=RKIND),dimension(:,:),pointer:: rthdynten - real(kind=RKIND),dimension(:,:),pointer:: tend_theta,tend_u + real(kind=RKIND),dimension(:,:),pointer:: tend_rtheta_adv + real(kind=RKIND),dimension(:,:),pointer:: tend_u_phys !nick + real(kind=RKIND),dimension(:,:),pointer :: tend_theta,tend_theta_euler,tend_diabatic,tend_u real(kind=RKIND),dimension(:,:,:),pointer:: tend_scalars - real(kind=RKIND),dimension(:,:),pointer:: tend_qv,tend_qc,tend_qr,tend_qi,tend_qs,tend_qg + real(kind=RKIND):: coeff real(kind=RKIND):: tem - real(kind=RKIND),dimension(:,:),allocatable:: rublten_Edge,rucuten_Edge + real(kind=RKIND),dimension(:,:),pointer:: rublten_Edge,rucuten_Edge - character(len=StrKIND), pointer :: config_pbl_scheme, config_convection_scheme, & - config_radt_lw_scheme, config_radt_sw_scheme - -!ldf (2011-12-16): real(kind=RKIND),dimension(:,:),allocatable:: theta,tend_th -!ldf end. -!================================================================================================== +!================================================================================================================= call mpas_pool_get_dimension(mesh, 'nCells', nCells) call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) call mpas_pool_get_dimension(mesh, 'nEdges', nEdges) call mpas_pool_get_dimension(mesh, 'nEdgesSolve', nEdgesSolve) - call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) call mpas_pool_get_config(configs, 'config_pbl_scheme', config_pbl_scheme) call mpas_pool_get_config(configs, 'config_convection_scheme', config_convection_scheme) @@ -109,136 +125,285 @@ subroutine physics_addtend(block, mesh, state, diag, tend, tend_physics, configs call mpas_pool_get_array(state, 'rho_zz', mass, 2) call mpas_pool_get_array(diag , 'rho_edge', mass_edge) + call mpas_pool_get_array(diag , 'tend_rtheta_adv', tend_rtheta_adv) + + call mpas_pool_get_array(diag , 'tend_u_phys', tend_u_phys) !nick + call mpas_pool_get_dimension(state, 'index_qv', index_qv) call mpas_pool_get_dimension(state, 'index_qc', index_qc) call mpas_pool_get_dimension(state, 'index_qr', index_qr) call mpas_pool_get_dimension(state, 'index_qi', index_qi) call mpas_pool_get_dimension(state, 'index_qs', index_qs) call mpas_pool_get_dimension(state, 'index_qg', index_qg) - qv => scalars(index_qv,:,:) + call mpas_pool_get_dimension(state, 'index_ni', index_ni) call mpas_pool_get_array(tend_physics, 'rublten', rublten) call mpas_pool_get_array(tend_physics, 'rvblten', rvblten) + call mpas_pool_get_array(tend_physics, 'rublten_Edge', rublten_Edge) call mpas_pool_get_array(tend_physics, 'rthblten', rthblten) call mpas_pool_get_array(tend_physics, 'rqvblten', rqvblten) call mpas_pool_get_array(tend_physics, 'rqcblten', rqcblten) call mpas_pool_get_array(tend_physics, 'rqiblten', rqiblten) + call mpas_pool_get_array(tend_physics, 'rniblten', rniblten) call mpas_pool_get_array(tend_physics, 'rucuten', rucuten) call mpas_pool_get_array(tend_physics, 'rvcuten', rvcuten) + call mpas_pool_get_array(tend_physics, 'rucuten_Edge', rucuten_Edge) call mpas_pool_get_array(tend_physics, 'rthcuten', rthcuten) call mpas_pool_get_array(tend_physics, 'rqvcuten', rqvcuten) call mpas_pool_get_array(tend_physics, 'rqccuten', rqccuten) call mpas_pool_get_array(tend_physics, 'rqrcuten', rqrcuten) call mpas_pool_get_array(tend_physics, 'rqicuten', rqicuten) call mpas_pool_get_array(tend_physics, 'rqscuten', rqscuten) + call mpas_pool_get_array(tend_physics, 'rthdynten', rthdynten) + + call mpas_pool_get_array(tend,'rt_diabatic_tend',tend_diabatic) call mpas_pool_get_array(tend_physics, 'rthratenlw', rthratenlw) call mpas_pool_get_array(tend_physics, 'rthratensw', rthratensw) - call mpas_pool_get_array(tend, 'u', tend_u) - call mpas_pool_get_array(tend, 'theta_m', tend_theta) - call mpas_pool_get_array(tend, 'scalars_tend', tend_scalars) - tend_qv => tend_scalars(index_qv,:,:) - tend_qc => tend_scalars(index_qc,:,:) - tend_qr => tend_scalars(index_qr,:,:) - tend_qi => tend_scalars(index_qi,:,:) - tend_qs => tend_scalars(index_qs,:,:) - tend_qg => tend_scalars(index_qg,:,:) + call mpas_pool_get_array(tend,'u' , tend_u ) + call mpas_pool_get_array(tend,'theta_m' , tend_theta ) + call mpas_pool_get_array(tend,'theta_euler' ,tend_theta_euler) + call mpas_pool_get_array(tend,'scalars_tend',tend_scalars ) !initialize the tendency for the potential temperature and all scalars due to PBL, convection, !and longwave and shortwave radiation: allocate(theta(nVertLevels,nCellsSolve) ) allocate(tend_th(nVertLevels,nCellsSolve)) tend_th = 0._RKIND - tend_qv = 0._RKIND - tend_qc = 0._RKIND - tend_qr = 0._RKIND - tend_qi = 0._RKIND - tend_qs = 0._RKIND - tend_qg = 0._RKIND - -!add coupled tendencies due to PBL processes: - if(config_pbl_scheme .ne. 'off') then - allocate(rublten_Edge(nVertLevels,nEdges)) - rublten_Edge(:,:) = 0. - call tend_toEdges(block,mesh,rublten,rvblten,rublten_Edge) - do i = 1, nEdgesSolve - do k = 1, nVertLevels - tend_u(k,i)=tend_u(k,i)+rublten_Edge(k,i)*mass_edge(k,i) - enddo - enddo - deallocate(rublten_Edge) - do i = 1, nCellsSolve - do k = 1, nVertLevels - tend_th(k,i) = tend_th(k,i) + rthblten(k,i)*mass(k,i) - tend_qv(k,i) = tend_qv(k,i) + rqvblten(k,i)*mass(k,i) - tend_qc(k,i) = tend_qc(k,i) + rqcblten(k,i)*mass(k,i) - tend_qi(k,i) = tend_qi(k,i) + rqiblten(k,i)*mass(k,i) - enddo - enddo - endif - -!add coupled tendencies due to convection: - if(config_convection_scheme .ne. 'off') then + tend_scalars(:,:,:) = 0._RKIND - if(config_convection_scheme .eq. 'tiedtke') then - allocate(rucuten_Edge(nVertLevels,nEdges)) - rucuten_Edge(:,:) = 0. - call tend_toEdges(block,mesh,rucuten,rvcuten,rucuten_Edge) - do i = 1, nEdgesSolve - do k = 1, nVertLevels - tend_u(k,i)=tend_u(k,i)+rucuten_Edge(k,i)*mass_edge(k,i) - enddo - enddo - deallocate(rucuten_Edge) - endif + call physics_addtend_work(block, mesh, nCells, nEdges, nCellsSolve, nEdgesSolve, & + rk_step, dynamics_substep, & + config_pbl_scheme, config_convection_scheme, config_radt_lw_scheme, config_radt_sw_scheme, & + index_qv, index_qc, index_qr, index_qi, index_qs, index_ni, & + rublten, rvblten, mass_edge, rublten_Edge, tend_u, & + rucuten, rvcuten, rucuten_Edge, & + tend_th, tend_scalars, mass, rthblten, rqvblten, rqcblten, rqiblten, rniblten, & + rthcuten, rqvcuten, rqccuten, rqrcuten, rqicuten, rqscuten, & + rthratenlw, rthratensw, rthdynten, & + tend_u_phys, tend_rtheta_adv, tend_diabatic, & + theta, theta_m, scalars, tend_theta, tend_theta_euler & + ) - do i = 1, nCellsSolve - do k = 1, nVertLevels - tend_th(k,i) = tend_th(k,i) + rthcuten(k,i)*mass(k,i) - tend_qv(k,i) = tend_qv(k,i) + rqvcuten(k,i)*mass(k,i) - tend_qc(k,i) = tend_qc(k,i) + rqccuten(k,i)*mass(k,i) - tend_qr(k,i) = tend_qr(k,i) + rqrcuten(k,i)*mass(k,i) - tend_qi(k,i) = tend_qi(k,i) + rqicuten(k,i)*mass(k,i) - tend_qs(k,i) = tend_qs(k,i) + rqscuten(k,i)*mass(k,i) - enddo - enddo - endif -!add coupled tendencies due to longwave radiation: - if(config_radt_lw_scheme .ne. 'off') then - do i = 1, nCellsSolve - do k = 1, nVertLevels - tend_th(k,i) = tend_th(k,i) + rthratenlw(k,i)*mass(k,i) - enddo - enddo - endif - -!add coupled tendencies due to shortwave radiation: - if(config_radt_sw_scheme .ne. 'off') then - do i = 1, nCellsSolve - do k = 1, nVertLevels - tend_th(k,i) = tend_th(k,i) + rthratensw(k,i)*mass(k,i) - enddo - enddo - endif - -!if non-hydrostatic core, convert the tendency for the potential temperature to a -!tendency for the modified potential temperature: - do i = 1, nCellsSolve - do k = 1, nVertLevels - theta(k,i) = theta_m(k,i) / (1. + R_v/R_d * qv(k,i)) - tend_th(k,i) = (1. + R_v/R_d * qv(k,i)) * tend_th(k,i) & - + R_v/R_d * theta(k,i) * tend_qv(k,i) - tend_theta(k,i) = tend_theta(k,i) + tend_th(k,i) - enddo - enddo deallocate(theta) deallocate(tend_th) - if(rk_step .eq. 3) then +! if(rk_step .eq. 3) then +! write(0,*) +! write(0,*) '--- enter subroutine physics_addtend:' +! write(0,*) 'max rthblten = ',maxval(rthblten(:,1:nCellsSolve)) +! write(0,*) 'min rthblten = ',minval(rthblten(:,1:nCellsSolve)) +! write(0,*) 'max rthcuten = ',maxval(rthcuten(:,1:nCellsSolve)) +! write(0,*) 'min rthcuten = ',minval(rthcuten(:,1:nCellsSolve)) +! write(0,*) 'max rthratenlw = ',maxval(rthratenlw(:,1:nCellsSolve)) +! write(0,*) 'min rthratenlw = ',minval(rthratenlw(:,1:nCellsSolve)) +! write(0,*) 'max rthratensw = ',maxval(rthratensw(:,1:nCellsSolve)) +! write(0,*) 'min rthratensw = ',minval(rthratensw(:,1:nCellsSolve)) +! write(0,*) '--- end subroutine physics_addtend' +! write(0,*) +! endif + +!formats: + 201 format(2i6,10(1x,e15.8)) + 202 format(3i6,10(1x,e15.8)) + + end subroutine physics_addtend + + +!================================================================================================================= + subroutine physics_get_tend( block, mesh, state, diag, tend, tend_physics, configs, rk_step, dynamics_substep, & + tend_ru_physics, tend_rtheta_physics, tend_rho_physics ) +!================================================================================================================= + + use mpas_atm_dimensions + +!input variables: + type(block_type),intent(in),target:: block + type(mpas_pool_type),intent(in):: mesh + type(mpas_pool_type),intent(in):: state + type(mpas_pool_type),intent(in):: configs + integer, intent(in):: rk_step + integer, intent(in):: dynamics_substep + +!inout variables: + type(mpas_pool_type),intent(inout):: diag + type(mpas_pool_type),intent(inout):: tend + type(mpas_pool_type),intent(inout):: tend_physics + + real(kind=RKIND),dimension(:,:) :: tend_ru_physics, tend_rtheta_physics, tend_rho_physics + +!local variables: + character(len=StrKIND), pointer :: config_pbl_scheme, config_convection_scheme, & + config_radt_lw_scheme, config_radt_sw_scheme + + integer:: i,iCell,k,n + integer,pointer:: index_qv, index_qc, index_qr, index_qi, index_qs, index_qg + integer,pointer:: index_ni + integer,pointer:: nCells,nCellsSolve,nEdges,nEdgesSolve + + real(kind=RKIND),dimension(:,:),pointer:: mass ! time level 2 rho_zz + real(kind=RKIND),dimension(:,:),pointer:: mass_edge ! diag rho_edge + real(kind=RKIND),dimension(:,:),pointer:: theta_m ! time level 1 + real(kind=RKIND),dimension(:,:,:),pointer:: scalars + real(kind=RKIND),dimension(:,:),pointer:: rthblten,rqvblten,rqcblten, & + rqiblten,rublten,rvblten + real(kind=RKIND),dimension(:,:),pointer:: rniblten + real(kind=RKIND),dimension(:,:),pointer:: rthcuten,rqvcuten,rqccuten, & + rqrcuten,rqicuten,rqscuten, & + rucuten,rvcuten + real(kind=RKIND),dimension(:,:),pointer:: rthratenlw,rthratensw + real(kind=RKIND),dimension(:,:),pointer:: rthdynten + + real(kind=RKIND),dimension(:,:),pointer:: tend_rtheta_adv + real(kind=RKIND),dimension(:,:),pointer:: tend_u_phys !nick + real(kind=RKIND),dimension(:,:),pointer :: tend_theta,tend_theta_euler,tend_diabatic,tend_u + real(kind=RKIND),dimension(:,:,:),pointer:: tend_scalars + real(kind=RKIND):: coeff + + real(kind=RKIND):: tem + real(kind=RKIND),dimension(:,:),pointer:: rublten_Edge,rucuten_Edge + + real(kind=RKIND),dimension(:,:),allocatable:: theta,tend_th + + +!================================================================================================================= + call mpas_pool_get_dimension(mesh, 'nCells', nCells) + call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(mesh, 'nEdges', nEdges) + call mpas_pool_get_dimension(mesh, 'nEdgesSolve', nEdgesSolve) + + call mpas_pool_get_config(configs, 'config_pbl_scheme', config_pbl_scheme) + call mpas_pool_get_config(configs, 'config_convection_scheme', config_convection_scheme) + call mpas_pool_get_config(configs, 'config_radt_lw_scheme', config_radt_lw_scheme) + call mpas_pool_get_config(configs, 'config_radt_sw_scheme', config_radt_sw_scheme) + + call mpas_pool_get_array(state, 'theta_m', theta_m, 1) + call mpas_pool_get_array(state, 'scalars', scalars, 1) + call mpas_pool_get_array(state, 'rho_zz', mass, 2) + call mpas_pool_get_array(diag , 'rho_edge', mass_edge) + + call mpas_pool_get_array(diag , 'tend_rtheta_adv', tend_rtheta_adv) + + call mpas_pool_get_array(diag , 'tend_u_phys', tend_u_phys) !nick + + call mpas_pool_get_dimension(state, 'index_qv', index_qv) + call mpas_pool_get_dimension(state, 'index_qc', index_qc) + call mpas_pool_get_dimension(state, 'index_qr', index_qr) + call mpas_pool_get_dimension(state, 'index_qi', index_qi) + call mpas_pool_get_dimension(state, 'index_qs', index_qs) + call mpas_pool_get_dimension(state, 'index_qg', index_qg) + call mpas_pool_get_dimension(state, 'index_ni', index_ni) + + call mpas_pool_get_array(tend_physics, 'rublten', rublten) + call mpas_pool_get_array(tend_physics, 'rvblten', rvblten) + call mpas_pool_get_array(tend_physics, 'rublten_Edge', rublten_Edge) + call mpas_pool_get_array(tend_physics, 'rthblten', rthblten) + call mpas_pool_get_array(tend_physics, 'rqvblten', rqvblten) + call mpas_pool_get_array(tend_physics, 'rqcblten', rqcblten) + call mpas_pool_get_array(tend_physics, 'rqiblten', rqiblten) + call mpas_pool_get_array(tend_physics, 'rniblten', rniblten) + + call mpas_pool_get_array(tend_physics, 'rucuten', rucuten) + call mpas_pool_get_array(tend_physics, 'rvcuten', rvcuten) + call mpas_pool_get_array(tend_physics, 'rucuten_Edge', rucuten_Edge) + call mpas_pool_get_array(tend_physics, 'rthcuten', rthcuten) + call mpas_pool_get_array(tend_physics, 'rqvcuten', rqvcuten) + call mpas_pool_get_array(tend_physics, 'rqccuten', rqccuten) + call mpas_pool_get_array(tend_physics, 'rqrcuten', rqrcuten) + call mpas_pool_get_array(tend_physics, 'rqicuten', rqicuten) + call mpas_pool_get_array(tend_physics, 'rqscuten', rqscuten) + call mpas_pool_get_array(tend_physics, 'rthdynten', rthdynten) + + call mpas_pool_get_array(tend,'rt_diabatic_tend',tend_diabatic) + + call mpas_pool_get_array(tend_physics, 'rthratenlw', rthratenlw) + call mpas_pool_get_array(tend_physics, 'rthratensw', rthratensw) + + call mpas_pool_get_array(tend,'u' , tend_u ) + call mpas_pool_get_array(tend,'theta_m' , tend_theta ) + call mpas_pool_get_array(tend,'theta_euler' ,tend_theta_euler) + call mpas_pool_get_array(tend,'scalars_tend',tend_scalars ) + +!initialize the tendency for the potential temperature and all scalars due to PBL, convection, +!and longwave and shortwave radiation: +! allocate(theta(nVertLevels,nCellsSolve) ) + allocate(tend_th(nVertLevels,nCellsSolve)) + tend_th = 0._RKIND + + tend_scalars(:,:,:) = 0._RKIND + + tend_ru_physics(:,:) = 0._RKIND + tend_rtheta_physics(:,:) = 0._RKIND + tend_rho_physics(:,:) = 0._RKIND ! NB: rho tendency is not currently supplied by physics, but this + ! field may be later filled with IAU or other tendencies + + ! + ! In case some variables are not allocated due to their associated packages, + ! we need to make their pointers associated here to avoid triggering run-time + ! checks when calling physics_get_tend_work + ! + if (.not. associated(rucuten)) allocate(rucuten(0,0)) + if (.not. associated(rvcuten)) allocate(rvcuten(0,0)) + if (.not. associated(rqrcuten)) allocate(rqrcuten(0,0)) + if (.not. associated(rqscuten)) allocate(rqscuten(0,0)) + if (.not. associated(rniblten)) allocate(rniblten(0,0)) + if (.not. associated(rthdynten)) allocate(rthdynten(0,0)) + if (.not. associated(rublten)) allocate(rublten(0,0)) + if (.not. associated(rvblten)) allocate(rvblten(0,0)) + if (.not. associated(rthblten)) allocate(rthblten(0,0)) + if (.not. associated(rqvblten)) allocate(rqvblten(0,0)) + if (.not. associated(rqcblten)) allocate(rqcblten(0,0)) + if (.not. associated(rqiblten)) allocate(rqiblten(0,0)) + if (.not. associated(rthcuten)) allocate(rthcuten(0,0)) + if (.not. associated(rqvcuten)) allocate(rqvcuten(0,0)) + if (.not. associated(rqccuten)) allocate(rqccuten(0,0)) + if (.not. associated(rqicuten)) allocate(rqicuten(0,0)) + + call physics_get_tend_work(block, mesh, nCells, nEdges, nCellsSolve, nEdgesSolve, & + rk_step, dynamics_substep, & + config_pbl_scheme, config_convection_scheme, config_radt_lw_scheme, config_radt_sw_scheme, & + index_qv, index_qc, index_qr, index_qi, index_qs, index_ni, & + rublten, rvblten, mass_edge, rublten_Edge, & + tend_ru_physics, & ! change for addtend for u + rucuten, rvcuten, rucuten_Edge, & + tend_th, tend_scalars, mass, rthblten, rqvblten, rqcblten, rqiblten, rniblten, & + rthcuten, rqvcuten, rqccuten, rqrcuten, rqicuten, rqscuten, & + rthratenlw, rthratensw, rthdynten, & + tend_u_phys, tend_rtheta_adv, tend_diabatic, & + theta_m, scalars, & + tend_rtheta_physics, & ! change for addtend for theta_m + tend_theta_euler & + ) + + ! + ! Clean up any pointers that were allocated with zero size before the call to + ! physics_get_tend_work + ! + if (size(rucuten) == 0) deallocate(rucuten) + if (size(rvcuten) == 0) deallocate(rvcuten) + if (size(rqrcuten) == 0) deallocate(rqrcuten) + if (size(rqscuten) == 0) deallocate(rqscuten) + if (size(rniblten) == 0) deallocate(rniblten) + if (size(rthdynten) == 0) deallocate(rthdynten) + if (size(rublten) == 0) deallocate(rublten) + if (size(rvblten) == 0) deallocate(rvblten) + if (size(rthblten) == 0) deallocate(rthblten) + if (size(rqvblten) == 0) deallocate(rqvblten) + if (size(rqcblten) == 0) deallocate(rqcblten) + if (size(rqiblten) == 0) deallocate(rqiblten) + if (size(rthcuten) == 0) deallocate(rthcuten) + if (size(rqvcuten) == 0) deallocate(rqvcuten) + if (size(rqccuten) == 0) deallocate(rqccuten) + if (size(rqicuten) == 0) deallocate(rqicuten) + +! deallocate(theta) + deallocate(tend_th) + +! if(rk_step .eq. 3) then ! write(0,*) ! write(0,*) '--- enter subroutine physics_addtend:' ! write(0,*) 'max rthblten = ',maxval(rthblten(:,1:nCellsSolve)) @@ -251,63 +416,428 @@ subroutine physics_addtend(block, mesh, state, diag, tend, tend_physics, configs ! write(0,*) 'min rthratensw = ',minval(rthratensw(:,1:nCellsSolve)) ! write(0,*) '--- end subroutine physics_addtend' ! write(0,*) - endif +! endif !formats: 201 format(2i6,10(1x,e15.8)) 202 format(3i6,10(1x,e15.8)) - end subroutine physics_addtend + end subroutine physics_get_tend + + !================================================================================================== + subroutine physics_addtend_work(block, mesh, nCells, nEdges, nCellsSolve, nEdgesSolve, & + rk_step, dynamics_substep, & + config_pbl_scheme, config_convection_scheme, config_radt_lw_scheme, config_radt_sw_scheme, & + index_qv, index_qc, index_qr, index_qi, index_qs, index_ni, & + rublten, rvblten, mass_edge, rublten_Edge, tend_u, & + rucuten, rvcuten, rucuten_Edge, & + tend_th, tend_scalars, mass, rthblten, rqvblten, rqcblten, rqiblten, rniblten, & + rthcuten, rqvcuten, rqccuten, rqrcuten, rqicuten, rqscuten, & + rthratenlw, rthratensw, rthdynten, & + tend_u_phys, tend_rtheta_adv, tend_diabatic, & + theta, theta_m, scalars, tend_theta, tend_theta_euler & + ) +!================================================================================================== + + use mpas_atm_dimensions + + implicit none + + type(block_type), intent(in) :: block + type(mpas_pool_type), intent(in) :: mesh + integer, intent(in) :: nCells, nEdges, nCellsSolve, nEdgesSolve + integer, intent(in) :: rk_step, dynamics_substep + character(len=StrKIND), intent(in) :: config_pbl_scheme + character(len=StrKIND), intent(in) :: config_convection_scheme + character(len=StrKIND), intent(in) :: config_radt_lw_scheme + character(len=StrKIND), intent(in) :: config_radt_sw_scheme + integer, intent(in) :: index_qv, index_qc, index_qr, index_qi, index_qs, index_ni + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rublten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rvblten + real (kind=RKIND), dimension(nVertLevels,nEdges+1), intent(in) :: mass_edge + real (kind=RKIND), dimension(nVertLevels,nEdges+1), intent(inout) :: rublten_Edge + real (kind=RKIND), dimension(nVertLevels,nEdges+1), intent(inout) :: tend_u + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rucuten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rvcuten + real (kind=RKIND), dimension(nVertLevels,nEdges+1), intent(inout) :: rucuten_Edge + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(inout) :: tend_th + real (kind=RKIND), dimension(num_scalars,nVertLevels,nCells+1), intent(inout) :: tend_scalars + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: mass + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rthblten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqvblten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqcblten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqiblten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rniblten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rthcuten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqvcuten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqccuten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqrcuten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqicuten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqscuten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rthratenlw + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rthratensw + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(inout) :: rthdynten + real (kind=RKIND), dimension(nVertLevels,nEdges+1), intent(inout) :: tend_u_phys + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: tend_rtheta_adv + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: tend_diabatic + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(inout) :: theta + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: theta_m + real (kind=RKIND), dimension(num_scalars,nVertLevels,nCells+1), intent(in) :: scalars + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(inout) :: tend_theta + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: tend_theta_euler + + integer :: i, k + real (kind=RKIND) :: coeff + + + !MGD does this need to happen for GF or Tiedtke? + if (config_convection_scheme .eq. 'cu_grell_freitas') then + ! + !save the tendency of the modified potential temperature due to horizontal and vertical advections, + !and mixing before adding tendencies due to physics processes (PBL,convection,radiation) for use in + !the Grell-GF and new Tiedtke parameterization of convection: + ! + do i = 1, nCellsSolve + do k = 1, nVertLevels + coeff = 1._RKIND/(1._RKIND + R_v/R_d*scalars(index_qv,k,i)) + rthdynten(k,i) = coeff*tend_rtheta_adv(k,i)/mass(k,i) + enddo + enddo + end if + + + !add coupled tendencies due to PBL processes: + if (config_pbl_scheme .ne. 'off') then + if (rk_step == 1 .and. dynamics_substep == 1) then + call tend_toEdges(block,mesh,rublten,rvblten,rublten_Edge) + + !MGD for PV budget? should a similar line be in the cumulus section below? + tend_u_phys(1:nVertLevels,1:nEdges) = rublten_Edge(1:nVertLevels,1:nEdges) + end if + + do i = 1, nEdgesSolve + do k = 1, nVertLevels + tend_u(k,i)=tend_u(k,i)+rublten_Edge(k,i)*mass_edge(k,i) + enddo + enddo + + do i = 1, nCellsSolve + do k = 1, nVertLevels + tend_th(k,i) = tend_th(k,i) + rthblten(k,i)*mass(k,i) + tend_scalars(index_qv,k,i) = tend_scalars(index_qv,k,i) + rqvblten(k,i)*mass(k,i) + tend_scalars(index_qc,k,i) = tend_scalars(index_qc,k,i) + rqcblten(k,i)*mass(k,i) + tend_scalars(index_qi,k,i) = tend_scalars(index_qi,k,i) + rqiblten(k,i)*mass(k,i) + enddo + enddo + + pbl_select: select case (trim(pbl_scheme)) + + case("bl_mynn") + + do i = 1, nCellsSolve + do k = 1, nVertLevels + tend_scalars(index_ni,k,i) = tend_scalars(index_ni,k,i) + rniblten(k,i)*mass(k,i) + enddo + enddo + + case default + + end select pbl_select + endif + + !add coupled tendencies due to convection: + if (config_convection_scheme .ne. 'off') then + + do i = 1, nCellsSolve + do k = 1, nVertLevels + tend_th(k,i) = tend_th(k,i) + rthcuten(k,i)*mass(k,i) + tend_scalars(index_qv,k,i) = tend_scalars(index_qv,k,i) + rqvcuten(k,i)*mass(k,i) + tend_scalars(index_qc,k,i) = tend_scalars(index_qc,k,i) + rqccuten(k,i)*mass(k,i) + tend_scalars(index_qi,k,i) = tend_scalars(index_qi,k,i) + rqicuten(k,i)*mass(k,i) + enddo + enddo + + convection_select: select case(convection_scheme) + + case('cu_grell_freitas') + + do i = 1, nCellsSolve + do k = 1, nVertLevels + coeff = 1._RKIND/(1._RKIND + R_v/R_d*scalars(index_qv,k,i)) + rthdynten(k,i) = coeff*(tend_theta(k,i)-tend_theta_euler(k,i) & + - mass(k,i)*tend_diabatic(k,i))/mass(k,i) + enddo + enddo + + + case('cu_kain_fritsch') + do i = 1, nCellsSolve + do k = 1, nVertLevels + tend_scalars(index_qr,k,i) = tend_scalars(index_qr,k,i) + rqrcuten(k,i)*mass(k,i) + tend_scalars(index_qs,k,i) = tend_scalars(index_qs,k,i) + rqscuten(k,i)*mass(k,i) + enddo + enddo + + case('cu_tiedtke') + if (rk_step == 1 .and. dynamics_substep == 1) then + call tend_toEdges(block,mesh,rucuten,rvcuten,rucuten_Edge) + + tend_u_phys(1:nVertLevels,1:nEdges) = tend_u_phys(1:nVertLevels,1:nEdges) & + + rucuten_Edge(1:nVertLevels,1:nEdges) + end if + do i = 1, nEdgesSolve + do k = 1, nVertLevels + tend_u(k,i)=tend_u(k,i)+rucuten_Edge(k,i)*mass_edge(k,i) + enddo + enddo + + case default + end select convection_select + endif + + !add coupled tendencies due to longwave radiation: + if (config_radt_lw_scheme .ne. 'off') then + do i = 1, nCellsSolve + do k = 1, nVertLevels + tend_th(k,i) = tend_th(k,i) + rthratenlw(k,i)*mass(k,i) + enddo + enddo + endif + + !add coupled tendencies due to shortwave radiation: + if (config_radt_sw_scheme .ne. 'off') then + do i = 1, nCellsSolve + do k = 1, nVertLevels + tend_th(k,i) = tend_th(k,i) + rthratensw(k,i)*mass(k,i) + enddo + enddo + endif + + !if non-hydrostatic core, convert the tendency for the potential temperature to a + !tendency for the modified potential temperature: + do i = 1, nCellsSolve + do k = 1, nVertLevels + theta(k,i) = theta_m(k,i) / (1. + R_v/R_d * scalars(index_qv,k,i)) + tend_th(k,i) = (1. + R_v/R_d * scalars(index_qv,k,i)) * tend_th(k,i) & + + R_v/R_d * theta(k,i) * tend_scalars(index_qv,k,i) + tend_theta(k,i) = tend_theta(k,i) + tend_th(k,i) + enddo + enddo + end subroutine physics_addtend_work + + !================================================================================================== + subroutine physics_get_tend_work(block, mesh, nCells, nEdges, nCellsSolve, nEdgesSolve, & + rk_step, dynamics_substep, & + config_pbl_scheme, config_convection_scheme, config_radt_lw_scheme, config_radt_sw_scheme, & + index_qv, index_qc, index_qr, index_qi, index_qs, index_ni, & + rublten, rvblten, mass_edge, rublten_Edge, tend_u, & + rucuten, rvcuten, rucuten_Edge, & + tend_th, tend_scalars, mass, rthblten, rqvblten, rqcblten, rqiblten, rniblten, & + rthcuten, rqvcuten, rqccuten, rqrcuten, rqicuten, rqscuten, & + rthratenlw, rthratensw, rthdynten, & + tend_u_phys, tend_rtheta_adv, tend_diabatic, & + theta_m, scalars, tend_theta, tend_theta_euler & + ) !================================================================================================== + + use mpas_atm_dimensions + + implicit none + + type(block_type), intent(in) :: block + type(mpas_pool_type), intent(in) :: mesh + integer, intent(in) :: nCells, nEdges, nCellsSolve, nEdgesSolve + integer, intent(in) :: rk_step, dynamics_substep + character(len=StrKIND), intent(in) :: config_pbl_scheme + character(len=StrKIND), intent(in) :: config_convection_scheme + character(len=StrKIND), intent(in) :: config_radt_lw_scheme + character(len=StrKIND), intent(in) :: config_radt_sw_scheme + integer, intent(in) :: index_qv, index_qc, index_qr, index_qi, index_qs, index_ni + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rublten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rvblten + real (kind=RKIND), dimension(nVertLevels,nEdges+1), intent(in) :: mass_edge + real (kind=RKIND), dimension(nVertLevels,nEdges+1), intent(inout) :: rublten_Edge + real (kind=RKIND), dimension(nVertLevels,nEdges+1), intent(inout) :: tend_u + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rucuten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rvcuten + real (kind=RKIND), dimension(nVertLevels,nEdges+1), intent(inout) :: rucuten_Edge + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(inout) :: tend_th + real (kind=RKIND), dimension(num_scalars,nVertLevels,nCells+1), intent(inout) :: tend_scalars + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: mass + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rthblten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqvblten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqcblten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqiblten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rniblten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rthcuten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqvcuten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqccuten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqrcuten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqicuten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rqscuten + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rthratenlw + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: rthratensw + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(inout) :: rthdynten + real (kind=RKIND), dimension(nVertLevels,nEdges+1), intent(inout) :: tend_u_phys + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: tend_rtheta_adv + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: tend_diabatic + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: theta_m + real (kind=RKIND), dimension(num_scalars,nVertLevels,nCells+1), intent(in) :: scalars + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(inout) :: tend_theta + real (kind=RKIND), dimension(nVertLevels,nCells+1), intent(in) :: tend_theta_euler + + integer :: i, k + real (kind=RKIND) :: coeff + + !add coupled tendencies due to PBL processes: + if (config_pbl_scheme .ne. 'off') then + if (rk_step == 1 .and. dynamics_substep == 1) then + call tend_toEdges(block,mesh,rublten,rvblten,rublten_Edge) + + !MGD for PV budget? should a similar line be in the cumulus section below? + tend_u_phys(1:nVertLevels,1:nEdges) = rublten_Edge(1:nVertLevels,1:nEdges) + end if + + do i = 1, nEdgesSolve + do k = 1, nVertLevels + tend_u(k,i)=tend_u(k,i)+rublten_Edge(k,i)*mass_edge(k,i) + enddo + enddo + + do i = 1, nCellsSolve + do k = 1, nVertLevels + tend_th(k,i) = tend_th(k,i) + rthblten(k,i)*mass(k,i) + tend_scalars(index_qv,k,i) = tend_scalars(index_qv,k,i) + rqvblten(k,i)*mass(k,i) + tend_scalars(index_qc,k,i) = tend_scalars(index_qc,k,i) + rqcblten(k,i)*mass(k,i) + tend_scalars(index_qi,k,i) = tend_scalars(index_qi,k,i) + rqiblten(k,i)*mass(k,i) + enddo + enddo + + pbl_select: select case (trim(pbl_scheme)) + + case("bl_mynn") + + do i = 1, nCellsSolve + do k = 1, nVertLevels + tend_scalars(index_ni,k,i) = tend_scalars(index_ni,k,i) + rniblten(k,i)*mass(k,i) + enddo + enddo + + case default + + end select pbl_select + endif + + !add coupled tendencies due to convection: + if (config_convection_scheme .ne. 'off') then + + do i = 1, nCellsSolve + do k = 1, nVertLevels + tend_th(k,i) = tend_th(k,i) + rthcuten(k,i)*mass(k,i) + tend_scalars(index_qv,k,i) = tend_scalars(index_qv,k,i) + rqvcuten(k,i)*mass(k,i) + tend_scalars(index_qc,k,i) = tend_scalars(index_qc,k,i) + rqccuten(k,i)*mass(k,i) + tend_scalars(index_qi,k,i) = tend_scalars(index_qi,k,i) + rqicuten(k,i)*mass(k,i) + enddo + enddo + + convection_select: select case(convection_scheme) + + case('cu_kain_fritsch') + do i = 1, nCellsSolve + do k = 1, nVertLevels + tend_scalars(index_qr,k,i) = tend_scalars(index_qr,k,i) + rqrcuten(k,i)*mass(k,i) + tend_scalars(index_qs,k,i) = tend_scalars(index_qs,k,i) + rqscuten(k,i)*mass(k,i) + enddo + enddo + + case('cu_tiedtke','cu_ntiedtke') + if (rk_step == 1 .and. dynamics_substep == 1) then + call tend_toEdges(block,mesh,rucuten,rvcuten,rucuten_Edge) + + tend_u_phys(1:nVertLevels,1:nEdges) = tend_u_phys(1:nVertLevels,1:nEdges) & + + rucuten_Edge(1:nVertLevels,1:nEdges) + end if + do i = 1, nEdgesSolve + do k = 1, nVertLevels + tend_u(k,i)=tend_u(k,i)+rucuten_Edge(k,i)*mass_edge(k,i) + enddo + enddo + + case default + end select convection_select + endif + + !add coupled tendencies due to longwave radiation: + if (config_radt_lw_scheme .ne. 'off') then + do i = 1, nCellsSolve + do k = 1, nVertLevels + tend_th(k,i) = tend_th(k,i) + rthratenlw(k,i)*mass(k,i) + enddo + enddo + endif + + !add coupled tendencies due to shortwave radiation: + if (config_radt_sw_scheme .ne. 'off') then + do i = 1, nCellsSolve + do k = 1, nVertLevels + tend_th(k,i) = tend_th(k,i) + rthratensw(k,i)*mass(k,i) + enddo + enddo + endif + + !if non-hydrostatic core, convert the tendency for the potential temperature to a + !tendency for the modified potential temperature: + do i = 1, nCellsSolve + do k = 1, nVertLevels + coeff = (1. + R_v/R_d * scalars(index_qv,k,i)) + tend_th(k,i) = coeff * tend_th(k,i) + R_v/R_d * theta_m(k,i) * tend_scalars(index_qv,k,i) / coeff + tend_theta(k,i) = tend_theta(k,i) + tend_th(k,i) + enddo + enddo + + end subroutine physics_get_tend_work + +!================================================================================================================= subroutine tend_toEdges(block,mesh,Ux_tend,Uy_tend,U_tend) -!================================================================================================== +!================================================================================================================= + + use mpas_atm_dimensions !input arguments: -!---------------- type(block_type),intent(in),target:: block type(mpas_pool_type),intent(in):: mesh - real(kind=RKIND),intent(in),dimension(:,:):: Ux_tend,Uy_tend + real(kind=RKIND),intent(in),dimension(:,:),target:: Ux_tend,Uy_tend !output arguments: -!----------------- real(kind=RKIND),intent(out),dimension(:,:):: U_tend !local variables: -!----------------- type (field2DReal), pointer :: tempField type (field2DReal), target :: tempFieldTarget integer:: iCell,iEdge,k,j - integer,pointer:: nCells,nCellsSolve,nVertLevels - integer,dimension(:),pointer :: nEdgesOnCell - integer,dimension(:,:),pointer:: edgesOnCell + integer:: cell1, cell2 + integer,pointer:: nCells,nCellsSolve,nEdges + integer,dimension(:,:),pointer:: cellsOnEdge + + real(kind=RKIND),dimension(:,:),pointer:: Ux_tend_halo,Uy_tend_halo + real(kind=RKIND), dimension(:,:), pointer :: east, north, edgeNormalVectors - real(kind=RKIND),dimension(:,:),pointer:: east,north,edge_normal - real(kind=RKIND),dimension(:,:),allocatable,target:: Ux_tend_halo,Uy_tend_halo -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_dimension(mesh, 'nCells', nCells) call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) - call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevels) + call mpas_pool_get_dimension(mesh, 'nEdges', nEdges) call mpas_pool_get_array(mesh, 'east', east) call mpas_pool_get_array(mesh, 'north', north) - call mpas_pool_get_array(mesh, 'edgesOnCell', edgesOnCell) - call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) - call mpas_pool_get_array(mesh, 'edgeNormalVectors', edge_normal) + call mpas_pool_get_array(mesh, 'edgeNormalVectors', edgeNormalVectors) - allocate(Ux_tend_halo(nVertLevels,nCells+1)) - allocate(Uy_tend_halo(nVertLevels,nCells+1)) + call mpas_pool_get_array(mesh, 'cellsOnEdge', cellsOnEdge) - Ux_tend_halo(:,:) = 0. - Uy_tend_halo(:,:) = 0. - do iCell = 1, nCellsSolve - do k = 1, nVertLevels - Ux_tend_halo(k,iCell) = Ux_tend(k,iCell) - Uy_tend_halo(k,iCell) = Uy_tend(k,iCell) - enddo - enddo + Ux_tend_halo => Ux_tend + Uy_tend_halo => Uy_tend tempField => tempFieldTarget tempField % block => block @@ -318,6 +848,7 @@ subroutine tend_toEdges(block,mesh,Ux_tend,Uy_tend,U_tend) tempField % copyList => block % parinfo % cellsToCopy tempField % prev => null() tempField % next => null() + tempField % isActive = .true. tempField % array => Ux_tend_halo call mpas_dmpar_exch_halo_field(tempField) @@ -325,27 +856,26 @@ subroutine tend_toEdges(block,mesh,Ux_tend,Uy_tend,U_tend) tempField % array => Uy_tend_halo call mpas_dmpar_exch_halo_field(tempField) - U_tend(:,:) = 0.0 - do iCell = 1, nCells - do j = 1, nEdgesOnCell(iCell) - iEdge = edgesOnCell(j,iCell) - do k = 1, nVertLevels - U_tend(k,iEdge) = U_tend(k,iEdge) & - + 0.5 * Ux_tend_halo(k,iCell) * (edge_normal(1,iEdge) * east(1,iCell) & - + edge_normal(2,iEdge) * east(2,iCell) & - + edge_normal(3,iEdge) * east(3,iCell)) & - + 0.5 * Uy_tend_halo(k,iCell) * (edge_normal(1,iEdge) * north(1,iCell) & - + edge_normal(2,iEdge) * north(2,iCell) & - + edge_normal(3,iEdge) * north(3,iCell)) - enddo - enddo - enddo - - deallocate(Ux_tend_halo) - deallocate(Uy_tend_halo) + do iEdge = 1, nEdges + cell1 = cellsOnEdge(1,iEdge) + cell2 = cellsOnEdge(2,iEdge) + + U_tend(:,iEdge) = Ux_tend_halo(:,cell1) * 0.5 * (edgeNormalVectors(1,iEdge) * east(1,cell1) & + + edgeNormalVectors(2,iEdge) * east(2,cell1) & + + edgeNormalVectors(3,iEdge) * east(3,cell1)) & + + Uy_tend_halo(:,cell1) * 0.5 * (edgeNormalVectors(1,iEdge) * north(1,cell1) & + + edgeNormalVectors(2,iEdge) * north(2,cell1) & + + edgeNormalVectors(3,iEdge) * north(3,cell1)) & + + Ux_tend_halo(:,cell2) * 0.5 * (edgeNormalVectors(1,iEdge) * east(1,cell2) & + + edgeNormalVectors(2,iEdge) * east(2,cell2) & + + edgeNormalVectors(3,iEdge) * east(3,cell2)) & + + Uy_tend_halo(:,cell2) * 0.5 * (edgeNormalVectors(1,iEdge) * north(1,cell2) & + + edgeNormalVectors(2,iEdge) * north(2,cell2) & + + edgeNormalVectors(3,iEdge) * north(3,cell2)) + end do end subroutine tend_toEdges -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_todynamics -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_update.F b/src/core_atmosphere/physics/mpas_atmphys_update.F index 5a3f922437..2b067fdad9 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_update.F +++ b/src/core_atmosphere/physics/mpas_atmphys_update.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_update use mpas_kind_types use mpas_derived_types @@ -20,33 +20,31 @@ module mpas_atmphys_update update_radiation_diagnostics -!>\brief update diagnostics. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_update: -!> ----------------------------------- -!> physics_update : not used. -!> update_radiation_diagnostics: update accumulated radiation diagnostics. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. -!> Laura D. Fowler (laura@ucar.edu) / 2014-04-22. -!> * modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. -!> * renamed config_conv_deep_scheme to config_convection_scheme. -!> Laura D. Fowler (laura@ucar.edu) / 2014-09-18. +!Update diagnostics. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_update: +! ----------------------------------- +! physics_update : not used. +! update_radiation_diagnostics: update accumulated radiation diagnostics. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * throughout the sourcecode, replaced all "var_struct" defined arrays by local pointers. +! Laura D. Fowler (laura@ucar.edu) / 2014-04-22. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +! * renamed config_conv_deep_scheme to config_convection_scheme. +! Laura D. Fowler (laura@ucar.edu) / 2014-09-18. contains -!================================================================================================== +!================================================================================================================= subroutine physics_update(domain,dt) -!================================================================================================== +!================================================================================================================= !input arguments: type(domain_type),intent(in):: domain @@ -56,10 +54,9 @@ subroutine physics_update(domain,dt) type(block_type),pointer:: block integer:: i,j,k -!-------------------------------------------------------------------------------------------------- - -! write(0,*) -! write(0,*) '--- begin physics_update:' +!----------------------------------------------------------------------------------------------------------------- +!write(0,*) +!write(0,*) '--- begin physics_update:' block => domain % blocklist do while(associated(block)) @@ -72,13 +69,14 @@ subroutine physics_update(domain,dt) end subroutine physics_update -!================================================================================================== - subroutine update_radiation_diagnostics(configs,mesh,diag_physics) -!================================================================================================== +!================================================================================================================= + subroutine update_radiation_diagnostics(configs,mesh,diag_physics,its,ite) +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(in):: configs type(mpas_pool_type),intent(in):: mesh + integer,intent(in):: its,ite !inout arguments: type(mpas_pool_type),intent(inout):: diag_physics @@ -103,7 +101,7 @@ subroutine update_radiation_diagnostics(configs,mesh,diag_physics) !local variables and arrays: integer:: iCell -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_config(configs,'config_bucket_radt',bucket_radt) @@ -160,7 +158,7 @@ subroutine update_radiation_diagnostics(configs,mesh,diag_physics) call mpas_pool_get_array(diag_physics,'lwupt' , lwupt ) call mpas_pool_get_array(diag_physics,'lwuptc' , lwuptc ) - do iCell = 1, nCellsSolve + do iCell = its, ite !short-wave radiation: acswdnb(iCell) = acswdnb (iCell) + swdnb (iCell)*dt_dyn acswdnbc(iCell) = acswdnbc(iCell) + swdnbc(iCell)*dt_dyn @@ -183,7 +181,7 @@ subroutine update_radiation_diagnostics(configs,mesh,diag_physics) if(l_acradt .and. bucket_radt.gt.0._RKIND) then - do iCell = 1, nCellsSolve + do iCell = its, ite !short-wave radiation: if(acswdnb(iCell) .gt. bucket_radt) then i_acswdnb(iCell) = i_acswdnb(iCell) + 1 @@ -256,6 +254,6 @@ subroutine update_radiation_diagnostics(configs,mesh,diag_physics) end subroutine update_radiation_diagnostics -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_update -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_update_surface.F b/src/core_atmosphere/physics/mpas_atmphys_update_surface.F index 896ad50d36..8582837785 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_update_surface.F +++ b/src/core_atmosphere/physics/mpas_atmphys_update_surface.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_update_surface use mpas_dmpar use mpas_kind_types @@ -25,33 +25,31 @@ module mpas_atmphys_update_surface physics_update_deepsoiltemp -!>\brief update surface boundary conditions. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> subroutines in mpas_atmphys_update_surface: -!> ------------------------------------------- -!> physics_update_surface : update the surface albedo and greeness fraction. -!> physics_update_sst : update the sea-surface temperatures. -!> physics_update_sstskin : add a diurnal cycle to the sea-surface temperatures. -!> physics_update_deepsoiltemp: update the deep soil temperatures. -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> * revised subroutine physics_update_sst. -!> Laura D. Fowler (laura@ucar.edu) / 2013-08-24. -!> * modified sourcecode to use pools. -!> Laura D. Fowler (laura@ucar.edu) / 2014-05-15. +!Update surface boundary conditions. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! subroutines in mpas_atmphys_update_surface: +! ------------------------------------------- +! physics_update_surface : update the surface albedo and greeness fraction. +! physics_update_sst : update the sea-surface temperatures. +! physics_update_sstskin : add a diurnal cycle to the sea-surface temperatures. +! physics_update_deepsoiltemp: update the deep soil temperatures. +! +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * revised subroutine physics_update_sst. +! Laura D. Fowler (laura@ucar.edu) / 2013-08-24. +! * modified sourcecode to use pools. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-15. contains -!================================================================================================== +!================================================================================================================= subroutine physics_update_surface(current_date,config_sfc_albedo,mesh,sfc_input) -!================================================================================================== +!================================================================================================================= !input variables: type(mpas_pool_type),intent(in):: mesh @@ -75,7 +73,7 @@ subroutine physics_update_surface(current_date,config_sfc_albedo,mesh,sfc_input) !local variables: integer:: iCell -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_dimension(mesh,'nCellsSolve',nCellsSolve) @@ -109,9 +107,9 @@ subroutine physics_update_surface(current_date,config_sfc_albedo,mesh,sfc_input) end subroutine physics_update_surface -!================================================================================================== +!================================================================================================================= subroutine physics_update_sst(dminfo,config_frac_seaice,mesh,sfc_input,diag_physics) -!================================================================================================== +!================================================================================================================= !input arguments: type(dm_info),intent(in):: dminfo @@ -142,7 +140,7 @@ subroutine physics_update_sst(dminfo,config_frac_seaice,mesh,sfc_input,diag_phys real(kind=RKIND):: global_sst_min,global_sst_max real(kind=RKIND):: global_xice_min,global_xice_max -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- call mpas_pool_get_dimension(mesh,'nCellsSolve',nCellsSolve) call mpas_pool_get_dimension(mesh,'nSoilLevels',nSoilLevels) @@ -170,12 +168,12 @@ subroutine physics_update_sst(dminfo,config_frac_seaice,mesh,sfc_input,diag_phys call mpas_pool_get_array(diag_physics,'sfc_emibck',sfc_emibck) call mpas_pool_get_array(diag_physics,'xicem' ,xicem ) -! write(0,*) -! write(0,*) '--- enter subroutine physics_update_sst:' -! write(0,*) '--- config_frac_seaice =', config_frac_seaice -! write(0,*) '--- xice_threshold =', xice_threshold -! write(0,*) '--- isice =', isice -! write(0,*) '--- iswater=', iswater +!write(0,*) +!write(0,*) '--- enter subroutine physics_update_sst:' +!write(0,*) '--- config_frac_seaice =', config_frac_seaice +!write(0,*) '--- xice_threshold =', xice_threshold +!write(0,*) '--- isice =', isice +!write(0,*) '--- iswater=', iswater if(config_frac_seaice) then do iCell = 1,nCellsSolve @@ -269,10 +267,10 @@ subroutine physics_update_sst(dminfo,config_frac_seaice,mesh,sfc_input,diag_phys tslb(1,iCell) = sst(iCell) endif enddo -! write(0,*) -! write(0,*) '--- nb of seaice points converted to land points = ',nb_to_land -! write(0,*) '--- nb of seaice points converted to ocean points = ',nb_to_ocean -! write(0,*) '--- nb of seaice points less than xice threshold = ',nb_removed +!write(0,*) +!write(0,*) '--- nb of seaice points converted to land points = ',nb_to_land +!write(0,*) '--- nb of seaice points converted to ocean points = ',nb_to_ocean +!write(0,*) '--- nb of seaice points less than xice threshold = ',nb_removed !finally, update the sea-ice flag. save xice prior to next update: do iCell = 1, nCellsSolve @@ -284,44 +282,44 @@ subroutine physics_update_sst(dminfo,config_frac_seaice,mesh,sfc_input,diag_phys enddo !local and global max and min sea-surface temperatures and fractional sea-ice: -! local_min = 999._RKIND -! local_max = -999._RKIND -! do iCell = 1,nCellsSolve -! if(xland(iCell) == 2._RKIND .and. sst(iCell) <= local_min) local_min = sst(iCell) -! if(xland(iCell) == 2._RKIND .and. sst(iCell) >= local_max) local_max = sst(iCell) -! enddo -! call mpas_dmpar_min_real(dminfo,local_min,global_sst_min) -! call mpas_dmpar_max_real(dminfo,local_max,global_sst_max) -! write(0,*) -! write(0,*) '--- min local SST = ',local_min -! write(0,*) '--- max local SST = ',local_max -! write(0,*) '--- min global SST = ',global_sst_min -! write(0,*) '--- max global SST = ',global_sst_max - -! local_min = 999._RKIND -! local_max = -999._RKIND -! do iCell = 1,nCellsSolve -! if(xland(iCell) == 1._RKIND .and. xice(iCell) <= local_min) local_min = xice(iCell) -! if(xland(iCell) == 1._RKIND .and. xice(iCell) >= local_max) local_max = xice(iCell) -! enddo -! call mpas_dmpar_min_real(dminfo,local_min,global_xice_min) -! call mpas_dmpar_max_real(dminfo,local_max,global_xice_max) -! if(local_min .eq. 999._RKIND) local_min = 0._RKIND -! if(local_max .eq. -999._RKIND) local_max = 0._RKIND -! write(0,*) -! write(0,*) '--- min local XICE = ',local_min -! write(0,*) '--- max local XICE = ',local_max -! write(0,*) '--- min global XICE = ',global_xice_min -! write(0,*) '--- max global XICE = ',global_xice_max +!local_min = 999._RKIND +!local_max = -999._RKIND +!do iCell = 1,nCellsSolve +! if(xland(iCell) == 2._RKIND .and. sst(iCell) <= local_min) local_min = sst(iCell) +! if(xland(iCell) == 2._RKIND .and. sst(iCell) >= local_max) local_max = sst(iCell) +!enddo +!call mpas_dmpar_min_real(dminfo,local_min,global_sst_min) +!call mpas_dmpar_max_real(dminfo,local_max,global_sst_max) +!write(0,*) +!write(0,*) '--- min local SST = ',local_min +!write(0,*) '--- max local SST = ',local_max +!write(0,*) '--- min global SST = ',global_sst_min +!write(0,*) '--- max global SST = ',global_sst_max + +!local_min = 999._RKIND +!local_max = -999._RKIND +!do iCell = 1,nCellsSolve +! if(xland(iCell) == 1._RKIND .and. xice(iCell) <= local_min) local_min = xice(iCell) +! if(xland(iCell) == 1._RKIND .and. xice(iCell) >= local_max) local_max = xice(iCell) +!enddo +!call mpas_dmpar_min_real(dminfo,local_min,global_xice_min) +!call mpas_dmpar_max_real(dminfo,local_max,global_xice_max) +!if(local_min .eq. 999._RKIND) local_min = 0._RKIND +!if(local_max .eq. -999._RKIND) local_max = 0._RKIND +!write(0,*) +!write(0,*) '--- min local XICE = ',local_min +!write(0,*) '--- max local XICE = ',local_max +!write(0,*) '--- min global XICE = ',global_xice_min +!write(0,*) '--- max global XICE = ',global_xice_max -! write(0,*) '--- end subroutine physics_update_sst' -! write(0,*) +!write(0,*) '--- end subroutine physics_update_sst' +!write(0,*) end subroutine physics_update_sst -!================================================================================================== +!================================================================================================================= subroutine physics_update_sstskin(dt,mesh,diag_physics,sfc_input) -!================================================================================================== +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(in):: mesh @@ -352,7 +350,7 @@ subroutine physics_update_sstskin(dt,mesh,diag_physics,sfc_input) real(kind=RKIND):: fs, con1, con2, con3, con4, con5, zlan, q2, ts, phi, qn1 real(kind=RKIND):: usw, qo, swo, us, tb, dtc, dtw, alw, dtwo, delt, f1 -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- ! write(0,*) ! write(0,*) '--- enter subroutine physics_update_sstskin:' @@ -457,9 +455,9 @@ subroutine physics_update_sstskin(dt,mesh,diag_physics,sfc_input) end subroutine physics_update_sstskin -!================================================================================================== +!================================================================================================================= subroutine physics_update_deepsoiltemp(LeapYear,dt,julian_in,mesh,sfc_input,diag_physics) -!================================================================================================== +!================================================================================================================= !input arguments: type(mpas_pool_type),intent(in) :: mesh @@ -483,7 +481,7 @@ subroutine physics_update_deepsoiltemp(LeapYear,dt,julian_in,mesh,sfc_input,diag real(kind=RKIND),parameter:: tconst = 0.6 real(kind=RKIND):: deltat,julian,tprior,yrday -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- !write(0,*) !write(0,*) '--- enter subroutine physics_update_deepsoiltemp:' @@ -561,8 +559,8 @@ subroutine physics_update_deepsoiltemp(LeapYear,dt,julian_in,mesh,sfc_input,diag end subroutine physics_update_deepsoiltemp -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_update_surface -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_utilities.F b/src/core_atmosphere/physics/mpas_atmphys_utilities.F index 5a9abd4940..81044354bc 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_utilities.F +++ b/src/core_atmosphere/physics/mpas_atmphys_utilities.F @@ -5,10 +5,12 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_utilities use mpas_kind_types + use mpas_abort, only : mpas_dmpar_global_abort + implicit none private public:: physics_error_fatal, & @@ -16,25 +18,22 @@ module mpas_atmphys_utilities character(len=StrKIND),public:: mpas_err_message - -!>\brief simple utility subroutines to broadcast messages or abort physics parameterizations. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> +!Simple utility subroutines to broadcast messages or abort physics parameterizations. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. contains -!================================================================================================== + +!================================================================================================================= subroutine physics_message(str) -!================================================================================================== +!================================================================================================================= !input arguments: character(len=*),intent(in):: str -!-------------------------------------------------------------------------------------------------- +!----------------------------------------------------------------------------------------------------------------- !#ifdef _MPI ! write(0,*) trim(str) @@ -44,22 +43,22 @@ subroutine physics_message(str) end subroutine physics_message -!================================================================================================== +!================================================================================================================= subroutine physics_error_fatal(str) -!================================================================================================== +!================================================================================================================= !input arguments: character(len=*),intent(in):: str !-------------------------------------------------------------------------------------------------- - write(0,*) - write(0,*) ( '------------------------------ FATAL CALLED ------------------------------') - write(0,*) trim(str) + call mpas_dmpar_global_abort(' ', deferredAbort=.true.) + call mpas_dmpar_global_abort('------------------------------ FATAL CALLED ------------------------------', deferredAbort=.true.) + call mpas_dmpar_global_abort(trim(str), deferredAbort=.true.) call mpas_dmpar_global_abort('ERROR: MPAS core_physics abort') end subroutine physics_error_fatal -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_utilities -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/mpas_atmphys_vars.F b/src/core_atmosphere/physics/mpas_atmphys_vars.F index 245c18705c..bdd26ce109 100644 --- a/src/core_atmosphere/physics/mpas_atmphys_vars.F +++ b/src/core_atmosphere/physics/mpas_atmphys_vars.F @@ -5,7 +5,7 @@ ! Additional copyright and license information can be found in the LICENSE file ! distributed with this code, or at http://mpas-dev.github.com/license.html ! -!================================================================================================== +!================================================================================================================= module mpas_atmphys_vars use mpas_kind_types @@ -14,35 +14,65 @@ module mpas_atmphys_vars save -!>\brief contains all local variables and arrays used in the physics parameterizations. -!>\author Laura D. Fowler (send comments to laura@ucar.edu). -!>\date 2013-05-01. -!> -!>\details -!> -!> add-ons and modifications: -!> -------------------------- -!> * added the variables sf_surface_physics,alswvisdir_p,alswvisdif_p,alswnirdir_p,alswnirdif_p, -!> swvisdir_p,swvisdif_p,swnirdir_p,and swnirdif_p to upgrade the RRTMG short wave radiation -!> code to WRF version 3.4.1. see definition of each individual variables below. -!> Laura D. Fowler (laura@ucar.edu) / 2013-03-11. -!> * removed call to the updated Kain-Fritsch convection scheme. -!> Laura D. Fowler (laura@ucar.edu) / 2013-05-29. -!> * added the arrays o3clim_p for implementation of monthly-varying climatological ozone in the -!> long wave and short wave RRTMG radiation codes. -!> Laura D. Fowler (laura@ucar.edu) / 2013-07-08. -!> * corrected definition of local variable dx_p. -!> Laura D. Fowler (laura@ucar.edu) / 2013-08-23. -!> * renamed local variable conv_deep_scheme to convection_scheme. -!> Laura D. Fowler (laura@ucar.edu) / 2014-09-18. -!> * added empty subroutine atmphys_vars_init that does not do anything, but needed for -!> compiling MPAS with some compilers. -!> Laura D. Fowler (laura@ucar.edu) / 2015-01-12. - - -!================================================================================================== +!mpas_atmphys_vars contains all local variables and arrays used in the physics parameterizations. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2013-05-01. +! +! add-ons and modifications: +! -------------------------- +! * added the variables sf_surface_physics,alswvisdir_p,alswvisdif_p,alswnirdir_p,alswnirdif_p,swvisdir_p, +! swvisdif_p,swnirdir_p,and swnirdif_p to upgrade the RRTMG short wave radiation code to WRF version 3.4.1. +! see definition of each individual variables below. +! Laura D. Fowler (laura@ucar.edu) / 2013-03-11. +! * removed call to the updated Kain-Fritsch convection scheme. +! Laura D. Fowler (laura@ucar.edu) / 2013-05-29. +! * added the arrays o3clim_p for implementation of monthly-varying climatological ozone in the +! long wave and short wave RRTMG radiation codes. +! Laura D. Fowler (laura@ucar.edu) / 2013-07-08. +! * corrected definition of local variable dx_p. +! Laura D. Fowler (laura@ucar.edu) / 2013-08-23. +! * renamed local variable conv_deep_scheme to convection_scheme. +! Laura D. Fowler (laura@ucar.edu) / 2014-09-18. +! * added empty subroutine atmphys_vars_init that does not do anything, but needed for +! compiling MPAS with some compilers. +! Laura D. Fowler (laura@ucar.edu) / 2015-01-12. +! * added local variables needed for the Thompson parameterization of cloud microphysics. +! Laura D. Fowler (laura@ucar.edu) / 2016-03-28. +! * added local variables needed for the Grell-Freitas parameterization of deep and shallow convection. +! * Laura D. Fowler (laura@ucar.edu) / 2016-03-30. +! * added local arrays needed in the MYNN surface layer scheme and PBL scheme. +! Laura D. Fowler (laura@ucar.edu) / 2016-03-31. +! * added the logical ua_phys needed in the call to subroutine sfcdiags. ua_phys is set to false. +! Laura D. Fowler (laura@ucar.edu) / 2016-05-13. +! * added the integers has_reqc,has_reqi,and has_reqs. when initialized to zero, the effective radii for cloud +! water,cloud ice,and snow are calculated using the subroutines relcalc and ricalc in subroutines rrtmg_lwrad +! and rrtmg_swrad. when initialized to 1, the effective radii are calculated in the Thompson cloud microphysics +! scheme instead. has_reqc,has_reqi,and has_reqs are initialized depending on the logical config_microp_re. +! Laura D. Fowler (laura@ucar.edu) / 2016-07-08. +! * added diagnostics of the effective radii for cloud water, cloud ice, and snow used in rrtmg_lwrad. +! Laura D. Fowler (laura@ucar.edu) / 2016-07-08. +! * added the variables qvrad_p,qcrad_p,qirad_p, and qsrad_p which are the water vapor,cloud water,cloud ice, +! and snow mixing ratios local to the calculation of the cloud fraction, and used in the radiation codes. +! Laura D. Fowler (laura@ucar.edu) / 2016-07-08. +! * added the variables rqvften and rthften which are the forcing tendencies needed to run the "new" Tiedtke +! parameterization of convection. +! Laura D. Fowler (laura@ucar.edu) / 2016-09-20. +! * added local "_sea" arrays that are needed in the surface layer scheme and land surface scheme for handling +! grid cells with fractional seaice when config_frac_seaice is set to true. also added local tsk_ice variable +! needed in the land surface scheme for handling grid cells with fractional seaice when config_frac_seaice is +! set to true. +! Laura D. Fowler (laura@ucar.edu) / 2016-10-03. +! * added local variable regime_hold to save the original value of variable regime over seaice grid cells when +! config_frac_seaice is set to true. +! Laura D. Fowler (laura@ucar.edu) / 2016-10-21. +! * moved the declarations of arrays delta_p,wstar_p,uoce_p,and voce_p since they are now used in both modules +! module_bl_ysu.F and module_bl_mynn.F. +! Laura D. Fowler (laura@ucar.edu) / 20016-10-27. + + +!================================================================================================================= !list of physics parameterizations: -!================================================================================================== +!================================================================================================================= character(len=StrKIND),public:: microp_scheme character(len=StrKIND),public:: convection_scheme @@ -54,10 +84,10 @@ module mpas_atmphys_vars character(len=StrKIND),public:: radt_sw_scheme character(len=StrKIND),public:: sfclayer_scheme -!================================================================================================== +!================================================================================================================= !wrf-variables:these variables are needed to keep calls to different physics parameterizations !as in wrf model. -!================================================================================================== +!================================================================================================================= logical:: l_radtlw !controls call to longwave radiation parameterization. logical:: l_radtsw !controls call to shortwave radiation parameterization. @@ -66,6 +96,7 @@ module mpas_atmphys_vars logical:: l_diags !controls when to calculate physics diagnostics. logical:: l_acrain !when .true., limit to accumulated rain is applied. logical:: l_acradt !when .true., limit to lw and sw radiation is applied. + logical:: l_mp_tables !when .true., read look-up tables for Thompson cloud microphysics scheme. integer,public:: ids,ide,jds,jde,kds,kde integer,public:: ims,ime,jms,jme,kms,kme @@ -87,6 +118,7 @@ module mpas_atmphys_vars !... arrays related to surface: real(kind=RKIND),dimension(:,:),allocatable:: & + ht_p, &! psfc_p, &!surface pressure [Pa] ptop_p !model-top pressure [Pa] @@ -120,7 +152,11 @@ module mpas_atmphys_vars qi_p, &!cloud ice mixing ratio [kg/kg] qs_p, &!snow mixing ratio [kg/kg] qg_p !graupel mixing ratio [kg/kg] - + + real(kind=RKIND),dimension(:,:,:),allocatable:: & + ni_p, &! + nr_p ! + !... arrays located at w (vertical velocity) points, or at interface between layers: real(kind=RKIND),dimension(:,:,:),allocatable:: & w_p, &!vertical velocity [m/s] @@ -138,14 +174,14 @@ module mpas_atmphys_vars pres2_hydd_p, &!"dry" pressure located at w-velocity levels [Pa] znu_hyd_p !(pres_hyd_p / P0) needed in the Tiedtke convection scheme [Pa] -!============================================================================================= +!================================================================================================================= !... variables related to ozone climatlogy: -!============================================================================================= +!================================================================================================================= real(kind=RKIND),dimension(:,:,:),allocatable:: & o3clim_p !climatological ozone volume mixing ratio [???] -!================================================================================================== +!================================================================================================================= !... variables and arrays related to parameterization of cloud microphysics: ! warm_phase: logical that determines if we want to run warm-phase cloud microphysics only. ! If set to false, cold-phase cloud microphysics is active. In MPAS, we always assume @@ -156,7 +192,7 @@ module mpas_atmphys_vars ! which kind of hydrometeors are present. Here, we assume that all six water species ! are present, even if their mixing ratios and number concentrations are zero. -!================================================================================================== +!================================================================================================================= logical,parameter:: & warm_rain=.false. !warm-phase cloud microphysics only (used in WRF). @@ -169,6 +205,10 @@ module mpas_atmphys_vars f_qs = .true., &! f_qg = .true. ! + logical,parameter:: & + f_qnc = .true., &! + f_qni = .true. ! + real(kind=RKIND),dimension(:,:,:),allocatable:: & f_ice, &!fraction of cloud ice (used in WRF only). f_rain !fraction of rain (used in WRF only). @@ -182,9 +222,26 @@ module mpas_atmphys_vars graupelncv_p, &! sr_p -!================================================================================================== +!... added for the thompson and wsm6 cloud microphysics: + integer:: & + has_reqc, &! + has_reqi, &! + has_reqs + + real(kind=RKIND),dimension(:,:),allocatable:: & + ntc_p, &! + muc_p ! + real(kind=RKIND),dimension(:,:,:),allocatable:: & + rainprod_p, &! + evapprod_p, &! + recloud_p, &! + reice_p, &! + resnow_p, &! + refl10cm_p ! + +!================================================================================================================= !... variables and arrays related to parameterization of convection: -!================================================================================================== +!================================================================================================================= integer,public:: n_cu real(kind=RKIND),public:: dt_cu @@ -215,30 +272,67 @@ module mpas_atmphys_vars !... tiedtke specific arrays: real(kind=RKIND),dimension(:,:,:),allocatable:: & - znu_p, &! - rqvdynten_p, &! - rqvdynblten_p ! + znu_p ! real(kind=RKIND),dimension(:,:,:),allocatable:: & rucuten_p, &! rvcuten_p ! -!================================================================================================== +!... grell-freitas specific parameters and arrays: + integer, parameter:: ishallow = 1 !shallow convection used with grell scheme. + + integer,dimension(:,:),allocatable:: & + k22_shallow_p, &! + kbcon_shallow_p, &! + ktop_shallow_p, &! + kbot_shallow_p, &! + ktop_deep_p ! + + real(kind=RKIND),dimension(:,:),allocatable:: & + xmb_total_p, &! + xmb_shallow_p ! + + real(kind=RKIND),dimension(:,:,:),allocatable:: & + rthdynten_p, &! + qccu_p, &! + qicu_p ! + + real(kind=RKIND),dimension(:,:,:),allocatable:: & + rthraten_p ! + +!... grell and tiedkte specific arrays: + real(kind=RKIND),dimension(:,:,:),allocatable:: & + rqvdynten_p, &! + rqvdynblten_p, &! + rthdynblten_p ! + +!... ntiedtke specific arrays: + real(kind=RKIND),dimension(:,:,:),allocatable:: & + rqvften_p, &! + rthften_p ! + +!================================================================================================================= !... variables and arrays related to parameterization of pbl: -!================================================================================================== +!================================================================================================================= + + integer:: ysu_pblmix integer,dimension(:,:),allocatable:: & - kpbl_p !index of PBL top [-] + kpbl_p !index of PBL top [-] real(kind=RKIND),public:: dt_pbl real(kind=RKIND),dimension(:,:),allocatable:: & - ctopo_p, &!correction to topography [-] - ctopo2_p, &!correction to topography 2 [-] - hpbl_p !PBL height [m] + ctopo_p, &!correction to topography [-] + ctopo2_p, &!correction to topography 2 [-] + hpbl_p, &!PBL height [m] + delta_p, &! + wstar_p, &! + uoce_p, &! + voce_p ! real(kind=RKIND),dimension(:,:,:),allocatable:: & - exch_p !exchange coefficient [-] + exch_p !exchange coefficient [-] real(kind=RKIND),dimension(:,:,:),allocatable:: & rublten_p, &! @@ -253,78 +347,116 @@ module mpas_atmphys_vars kzm_p, &! kzq_p ! -!================================================================================================== +!... MYNN PBL scheme (module_bl_mynn.F): + integer,parameter:: grav_settling = 0 + + logical,parameter:: bl_mynn_tkeadvect = .false.! + integer,parameter:: bl_mynn_tkebudget = 0 ! + integer,parameter:: bl_mynn_cloudpdf = 0 ! + + real(kind=RKIND),dimension(:,:),allocatable:: & + vdfg_p ! + + real(kind=RKIND),dimension(:,:,:),allocatable:: & + dqke_p, &! + qbuoy_p, &! + qdiss_p, &! + qke_p, &! + qkeadv_p, &! + qshear_p, &! + qwt_p, &! + tkepbl_p ! + + real(kind=RKIND),dimension(:,:,:),allocatable:: & + rniblten_p ! + +!================================================================================================================= !... variables and arrays related to parameterization of gravity wave drag over orography: -!================================================================================================== +!================================================================================================================= real(kind=RKIND),dimension(:,:),allocatable:: & - var2d_p, &!orographic variance [m2] - con_p, &!orographic convexity [m2] - oa1_p, &!orographic direction asymmetry function [-] - oa2_p, &!orographic direction asymmetry function [-] - oa3_p, &!orographic direction asymmetry function [-] - oa4_p, &!orographic direction asymmetry function [-] - ol1_p, &!orographic direction asymmetry function [-] - ol2_p, &!orographic direction asymmetry function [-] - ol3_p, &!orographic direction asymmetry function [-] - ol4_p !orographic direction asymmetry function [-] + var2d_p, &!orographic variance [m2] + con_p, &!orographic convexity [m2] + oa1_p, &!orographic direction asymmetry function [-] + oa2_p, &!orographic direction asymmetry function [-] + oa3_p, &!orographic direction asymmetry function [-] + oa4_p, &!orographic direction asymmetry function [-] + ol1_p, &!orographic direction asymmetry function [-] + ol2_p, &!orographic direction asymmetry function [-] + ol3_p, &!orographic direction asymmetry function [-] + ol4_p !orographic direction asymmetry function [-] real(kind=RKIND),dimension(:,:),allocatable:: & - dx_p !mean distance between cell centers [m] + dx_p !mean distance between cell centers [m] real(kind=RKIND),dimension(:,:),allocatable:: & - dusfcg_p, &!vertically-integrated gwdo u-stress [Pa m s-1] - dvsfcg_p !vertically-integrated gwdo v -stress [Pa m s-1] + dusfcg_p, &!vertically-integrated gwdo u-stress [Pa m s-1] + dvsfcg_p !vertically-integrated gwdo v -stress [Pa m s-1] real(kind=RKIND),dimension(:,:,:),allocatable:: & - dtaux3d_p, &!gravity wave drag over orography u-stress [m s-1] - dtauy3d_p !gravity wave drag over orography u-stress [m s-1] + dtaux3d_p, &!gravity wave drag over orography u-stress [m s-1] + dtauy3d_p !gravity wave drag over orography u-stress [m s-1] -!================================================================================================== +!================================================================================================================= !... variables and arrays related to parameterization of surface layer: -!================================================================================================== +!================================================================================================================= + real(kind=RKIND),dimension(:,:),allocatable:: & - br_p, &!bulk richardson number [-] - cd_p, &!momentum exchange coeff at 10 meters [?] - cda_p, &!momentum exchange coeff at the lowest model level [?] + br_p, &!bulk richardson number [-] + cd_p, &!momentum exchange coeff at 10 meters [?] + cda_p, &!momentum exchange coeff at the lowest model level [?] cpm_p, &! chs_p, &! chs2_p, &! - ck_p, &!enthalpy exchange coeff at 10 meters [?] - cka_p, &!enthalpy exchange coeff at the lowest model level [?] + ck_p, &!enthalpy exchange coeff at 10 meters [?] + cka_p, &!enthalpy exchange coeff at the lowest model level [?] cqs2_p, &! - gz1oz0_p, &!log of z1 over z0 [-] - flhc_p, &!exchange coefficient for heat [-] - flqc_p, &!exchange coefficient for moisture [-] - hfx_p, &!upward heat flux at the surface [W/m2] - fh_p, &!integrated stability function for heat [-] - fm_p, &!integrated stability function for momentum [-] - lh_p, &!latent heat flux at the surface [W/m2] - mavail_p, &!surface moisture availability [-] - mol_p, &!T* in similarity theory [K] - pblh_p, &!PBL height [m] - psih_p, &!similarity theory for heat [-] - psim_p, &!similarity theory for momentum [-] - q2_p, &!specific humidity at 2m [kg/kg] - qfx_p, &!upward moisture flux at the surface [kg/m2/s] + gz1oz0_p, &!log of z1 over z0 [-] + flhc_p, &!exchange coefficient for heat [-] + flqc_p, &!exchange coefficient for moisture [-] + hfx_p, &!upward heat flux at the surface [W/m2] + lh_p, &!latent heat flux at the surface [W/m2] + mavail_p, &!surface moisture availability [-] + mol_p, &!T* in similarity theory [K] + pblh_p, &!PBL height [m] + psih_p, &!similarity theory for heat [-] + psim_p, &!similarity theory for momentum [-] + q2_p, &!specific humidity at 2m [kg/kg] + qfx_p, &!upward moisture flux at the surface [kg/m2/s] qgh_p, &! - qsfc_p, &!specific humidity at lower boundary [kg/kg] - regime_p, &!flag indicating PBL regime (stable_p,unstable_p,etc...) [-] - rmol_p, &!1 / Monin Ob length [-] - t2m_p, &!temperature at 2m [K] - th2m_p, &!potential temperature at 2m [K] - u10_p, &!u at 10 m [m/s] - ust_p, &!u* in similarity theory [m/s] - ustm_p, &!u* in similarity theory without vconv correction [m/s] - v10_p, &!v at 10 m [m/s] - wspd_p, &!wind speed [m/s] - znt_p, &!time-varying roughness length [m] + qsfc_p, &!specific humidity at lower boundary [kg/kg] + regime_p, &!flag indicating PBL regime (stable_p,unstable_p,etc...) [-] + rmol_p, &!1 / Monin Ob length [-] + t2m_p, &!temperature at 2m [K] + th2m_p, &!potential temperature at 2m [K] + u10_p, &!u at 10 m [m/s] + ust_p, &!u* in similarity theory [m/s] + ustm_p, &!u* in similarity theory without vconv correction [m/s] + v10_p, &!v at 10 m [m/s] + wspd_p, &!wind speed [m/s] + znt_p, &!time-varying roughness length [m] zol_p ! +!... arrays only in monin_obukohv (module_sf_sfclay.F): + real(kind=RKIND),dimension(:,:),allocatable:: & + fh_p, &!integrated stability function for heat [-] + fm_p !integrated stability function for momentum [-] + +!... arrays only in mynn surface layer scheme (module_sf_mynn.F): + real(kind=RKIND),dimension(:,:),allocatable:: & + ch_p, &!surface exchange coeff for heat [m/s] + qcg_p !cloud water mixing ratio at the ground surface [kg/kg] + + real(kind=RKIND),dimension(:,:,:),allocatable:: & + cov_p, &!liquid water-liquid water potential temperature covariance [K kg/kg] + qsq_p, &!liquid water variance [(kg/kg)^2] + tsq_p, &!liquid water potential temperature variance [K^2] + sh3d_p, &!stability function for heat [-] + elpbl_p !length scale from PBL [m] -!================================================================================================== +!================================================================================================================= !... variables and arrays related to parameterization of short-wave radiation: -!================================================================================================== +!================================================================================================================= real(kind=RKIND):: & declin, &!solar declination [-] @@ -358,9 +490,9 @@ module mpas_atmphys_vars real(kind=RKIND),dimension(:,:,:),allocatable:: & rthratensw_p !uncoupled theta tendency due to shortwave radiation [K s-1] -!================================================================================================== +!================================================================================================================= !... variables and arrays related to parameterization of long-wave radiation: -!================================================================================================== +!================================================================================================================= integer,dimension(:,:),allocatable:: & nlrad_p !number of layers added above the model top [-] @@ -387,12 +519,15 @@ module mpas_atmphys_vars lwupflxc_p ! real(kind=RKIND),dimension(:,:,:),allocatable:: & - rthratenlw_p !uncoupled theta tendency due to longwave radiation [K s-1] + rthratenlw_p, &!uncoupled theta tendency due to longwave radiation [K s-1] + rrecloud_p, &!effective radius for cloud water calculated in rrtmg_lwrad [mu] + rreice_p, &!effective radius for cloud ice calculated in rrmtg_lwrad [mu] + rresnow_p !effective radius for snow calculated in rrtmg_lwrad [mu] -!================================================================================================== +!================================================================================================================= !... variables and arrays related to parameterization of long- and short-wave radiation needed ! only by the "CAM" radiation codes: -!================================================================================================== +!================================================================================================================= logical:: doabsems @@ -426,19 +561,27 @@ module mpas_atmphys_vars absnxt_p, &!total nearest layer absorptivity [-] ozmixm_p !ozone mixing ratio. -!================================================================================================== +!================================================================================================================= !.. variables and arrays related to cloudiness: -!================================================================================================== +!================================================================================================================= integer,parameter:: & icloud= 1 !used in WRF only. real(kind=RKIND),dimension(:,:,:),allocatable:: & - cldfrac_p !cloud fraction [-] + cldfrac_p, &!cloud fraction [-] + qvrad_p, &!water vapor mixing ratio local to cloudiness and radiation [kg/kg] + qcrad_p, &!cloud liquid water mixing ratio local to cloudiness and radiation [kg/kg] + qirad_p, &!cloud ice mixing ratio local to cloudiness and radiation [kg/kg] + qsrad_p !snow mixing ratio local to cloudiness and radiation [kg/kg] -!================================================================================================== +!================================================================================================================= !.. variables and arrays related to land-surface parameterization: -!================================================================================================== +!================================================================================================================= + + logical,parameter:: & + ua_phys=.false. !option to activate UA Noah changes: a different snow-cover physics in the land-surface + !scheme. That option is not currently implemented in MPAS. integer,public:: & sf_surface_physics !used to define the land surface scheme by a number instead of name. It @@ -446,81 +589,98 @@ module mpas_atmphys_vars !albedos as functions of the land surface scheme. integer,public:: & - num_soils !number of soil layers [-] + num_soils !number of soil layers [-] integer,dimension(:,:),allocatable:: & - isltyp_p, &!dominant soil type category [-] - ivgtyp_p !dominant vegetation category [-] + isltyp_p, &!dominant soil type category [-] + ivgtyp_p !dominant vegetation category [-] real(kind=RKIND),dimension(:),allocatable:: & - dzs_p !thickness of soil layers [m] + dzs_p !thickness of soil layers [m] real(kind=RKIND),dimension(:,:,:),allocatable:: & - smcrel_p, &!soil moisture threshold below which transpiration starts to stress [-] - sh2o_p, &!unfrozen soil moisture content [volumetric fraction] - smois_p, &!soil moisture [volumetric fraction] - tslb_p !soil temperature [K] + smcrel_p, &!soil moisture threshold below which transpiration starts to stress [-] + sh2o_p, &!unfrozen soil moisture content [volumetric fraction] + smois_p, &!soil moisture [volumetric fraction] + tslb_p !soil temperature [K] real(kind=RKIND),dimension(:,:),allocatable:: & - acsnom_p, &!accumulated melted snow [kg m-2] - acsnow_p, &!accumulated snow [kg m-2] - canwat_p, &!canopy water [kg m-2] - chklowq_p, &!surface saturation flag [-] - grdflx_p, &!ground heat flux [W m-2] - lai_p, &!leaf area index [-] - noahres_p, &!residual of the noah land-surface scheme energy budget [W m-2] - potevp_p, &!potential evaporation [W m-2] - qz0_p, &!specific humidity at znt [kg kg-1] + acsnom_p, &!accumulated melted snow [kg m-2] + acsnow_p, &!accumulated snow [kg m-2] + canwat_p, &!canopy water [kg m-2] + chklowq_p, &!surface saturation flag [-] + grdflx_p, &!ground heat flux [W m-2] + lai_p, &!leaf area index [-] + noahres_p, &!residual of the noah land-surface scheme energy budget [W m-2] + potevp_p, &!potential evaporation [W m-2] + qz0_p, &!specific humidity at znt [kg kg-1] rainbl_p, &! - sfcrunoff_p, &!surface runoff [m s-1] - shdmin_p, &!minimum areal fractional coverage of annual green vegetation [-] - shdmax_p, &!maximum areal fractional coverage of annual green vegetation [-] - smstav_p, &!moisture availability [-] - smstot_p, &!total moisture [m3 m-3] - snopcx_p, &!snow phase change heat flux [W m-2] + sfcrunoff_p, &!surface runoff [m s-1] + shdmin_p, &!minimum areal fractional coverage of annual green vegetation [-] + shdmax_p, &!maximum areal fractional coverage of annual green vegetation [-] + smstav_p, &!moisture availability [-] + smstot_p, &!total moisture [m3 m-3] + snopcx_p, &!snow phase change heat flux [W m-2] snotime_p, &! - snowc_p, &!snow water equivalent [kg m-2] - snowh_p, &!physical snow depth [m] - swdown_p, &!downward shortwave flux at the surface [W m-2] - udrunoff_p, &!sub-surface runoff [m s-1] - tmn_p, &!soil temperature at lower boundary [K] - vegfra_p, &!vegetation fraction [-] - z0_p !background roughness length [m] + snowc_p, &!snow water equivalent [kg m-2] + snowh_p, &!physical snow depth [m] + swdown_p, &!downward shortwave flux at the surface [W m-2] + udrunoff_p, &!sub-surface runoff [m s-1] + tmn_p, &!soil temperature at lower boundary [K] + vegfra_p, &!vegetation fraction [-] + z0_p !background roughness length [m] real(kind=RKIND),dimension(:,:),allocatable:: & - alswvisdir_p, &!direct-beam surface albedo in visible spectrum [-] - alswvisdif_p, &!diffuse-beam surface albedo in visible spectrum [-] - alswnirdir_p, &!direct-beam surface albedo in near-IR spectrum [-] - alswnirdif_p !diffuse-beam surface albedo in near-IR spectrum [-] + alswvisdir_p, &!direct-beam surface albedo in visible spectrum [-] + alswvisdif_p, &!diffuse-beam surface albedo in visible spectrum [-] + alswnirdir_p, &!direct-beam surface albedo in near-IR spectrum [-] + alswnirdif_p !diffuse-beam surface albedo in near-IR spectrum [-] -!================================================================================================== +!================================================================================================================= !.. variables and arrays related to surface characteristics: -!================================================================================================== +!================================================================================================================= real(kind=RKIND),dimension(:,:),allocatable:: & - xlat_p, &!longitude, west is negative [degrees] - xlon_p !latitude, south is negative [degrees] + xlat_p, &!longitude, west is negative [degrees] + xlon_p !latitude, south is negative [degrees] real(kind=RKIND),dimension(:,:),allocatable:: & - sfc_albedo_p, &!surface albedo [-] - sfc_albbck_p, &!surface background albedo [-] - sfc_emibck_p, &!land surface background emissivity [-] - sfc_emiss_p, &!land surface emissivity [-] - snoalb_p, &!annual max snow albedo [-] - snow_p, &!snow water equivalent [kg m-2] - tsk_p, &!surface-skin temperature [K] - xice_p, &!ice mask [-] - xland_p !land mask (1 for land; 2 for water) [-] + sfc_albedo_p, &!surface albedo [-] + sfc_albbck_p, &!surface background albedo [-] + sfc_emibck_p, &!land surface background emissivity [-] + sfc_emiss_p, &!land surface emissivity [-] + snoalb_p, &!annual max snow albedo [-] + snow_p, &!snow water equivalent [kg m-2] + tsk_p, &!surface-skin temperature [K] + sst_p, &!sea-surface temperature [K] + xice_p, &!ice mask [-] + xland_p !land mask (1 for land; 2 for water) [-] + +!================================================================================================================= +!.. variables needed for the surface layer scheme and land surface scheme when config_frac_seaice +! is set to true. the arrays below have the same definition as the corresponding "_p" arrays: +!================================================================================================================= + + real(kind=RKIND),dimension(:,:),allocatable:: br_sea,ch_sea,chs_sea,chs2_sea,cpm_sea,cqs2_sea, & + flhc_sea,flqc_sea,gz1oz0_sea,hfx_sea,lh_sea,mavail_sea,mol_sea, & + psih_sea,psim_sea,fh_sea,fm_sea,qfx_sea,qgh_sea,qsfc_sea,regime_sea, & + rmol_sea,ust_sea,wspd_sea,znt_sea,zol_sea,tsk_sea,xland_sea + real(kind=RKIND),dimension(:,:),allocatable:: t2m_sea,th2m_sea,q2_sea,u10_sea,v10_sea + real(kind=RKIND),dimension(:,:),allocatable:: cd_sea,cda_sea,ck_sea,cka_sea,ustm_sea + + real(kind=RKIND),dimension(:,:),allocatable:: regime_hold + real(kind=RKIND),dimension(:,:),allocatable:: tsk_ice contains -!================================================================================================== + +!================================================================================================================= subroutine atmphys_vars_init() -!================================================================================================== +!================================================================================================================= !dummy subroutine that does not do anything. end subroutine atmphys_vars_init -!================================================================================================== +!================================================================================================================= end module mpas_atmphys_vars -!================================================================================================== +!================================================================================================================= diff --git a/src/core_atmosphere/physics/physics_wrf/Makefile b/src/core_atmosphere/physics/physics_wrf/Makefile index b6d15b5bb4..68810446e6 100644 --- a/src/core_atmosphere/physics/physics_wrf/Makefile +++ b/src/core_atmosphere/physics/physics_wrf/Makefile @@ -6,42 +6,63 @@ dummy: echo "****** compile physics_wrf ******" OBJS = \ - libmassv.o \ - module_bl_gwdo.o \ - module_bl_ysu.o \ - module_cam_shr_kind_mod.o \ - module_cam_support.o \ - module_cu_kfeta.o \ - module_cu_tiedtke.o \ - module_mp_kessler.o \ - module_mp_radar.o \ - module_mp_wsm6.o \ - module_ra_cam.o \ - module_ra_cam_support.o \ - module_ra_rrtmg_lw.o \ - module_ra_rrtmg_sw.o \ - module_sf_bem.o \ - module_sf_bep.o \ - module_sf_bep_bem.o \ - module_sf_noahdrv.o \ - module_sf_noahlsm.o \ - module_sf_sfclay.o \ + libmassv.o \ + module_bl_gwdo.o \ + module_bl_mynn.o \ + module_bl_ysu.o \ + module_cam_error_function.o \ + module_cam_shr_kind_mod.o \ + module_cam_support.o \ + module_cu_gf.mpas.o \ + module_cu_tiedtke.o \ + module_cu_ntiedtke.o \ + module_cu_kfeta.o \ + module_mp_kessler.o \ + module_mp_radar.o \ + module_mp_thompson.o \ + module_mp_thompson_cldfra3.o \ + module_mp_wsm6.o \ + module_ra_cam.o \ + module_ra_cam_support.o \ + module_ra_rrtmg_lw.o \ + module_ra_rrtmg_sw.o \ + module_sf_bem.o \ + module_sf_bep.o \ + module_sf_bep_bem.o \ + module_sf_sfcdiags.o \ + module_sf_mynn.o \ + module_sf_noahdrv.o \ + module_sf_noahlsm.o \ + module_sf_oml.o \ + module_sf_sfclay.o \ module_sf_urban.o physics_wrf: $(OBJS) ar -ru ./../libphys.a $(OBJS) # DEPENDENCIES: +module_bl_mynn.o: \ + module_cam_error_function.o \ + ../mpas_atmphys_constants.o + module_cam_support.o: \ module_cam_shr_kind_mod.o \ ../mpas_atmphys_utilities.o +module_cu_tiedtke.o: \ + ../mpas_atmphys_constants.o + +module_cu_ntiedtke.o: \ + ../mpas_atmphys_constants.o + module_mp_radar.o: \ + ../mpas_atmphys_functions.o \ ../mpas_atmphys_utilities.o -module_mp_wsm6.o: \ - libmassv.o \ - module_mp_radar.o +module_mp_thompson.o: \ + module_mp_radar.o \ + ../mpas_atmphys_functions.o \ + ../mpas_atmphys_utilities.o module_ra_cam.o: \ module_cam_support.o \ @@ -64,8 +85,13 @@ module_sf_bep_bem.o: \ module_sf_bem.o \ module_sf_urban.o +module_sf_mynn.o: \ + module_bl_mynn.o \ + module_sf_sfclay.o \ + ../mpas_atmphys_constants.o + module_sf_noahdrv.o: \ - module_sf_bem.o \ + module_sf_bem.o \ module_sf_bep.o \ module_sf_bep_bem.o \ module_sf_noahlsm.o \ diff --git a/src/core_atmosphere/physics/physics_wrf/module_bl_gwdo.F b/src/core_atmosphere/physics/physics_wrf/module_bl_gwdo.F index 4738e456d5..ac7c660f22 100644 --- a/src/core_atmosphere/physics/physics_wrf/module_bl_gwdo.F +++ b/src/core_atmosphere/physics/physics_wrf/module_bl_gwdo.F @@ -1,3 +1,12 @@ +!================================================================================================== +! copied for implementation in MPAS from WRF version 3.6.1. + +! modifications made to sourcecode: +! * used preprocessing option to define the variable dx as a function of the horizontal grid. +! Laura D. Fowler (laura@ucar.edu) / 2014-09-25. + +!================================================================================================== + ! WRf:model_layer:physics ! ! @@ -446,14 +455,14 @@ subroutine gwdo2d(dudt,dvdt,dtaux2d,dtauy2d, & ol(i) = ol4(i,mod(nwd-1,4)+1) enddo ! + kpblmax = 2 kpblmin = kte - do i = its,ite - kpblmin = min(kpblmin, kbl(i)) - enddo -! do i = its,ite if (oa(i).le.0.0) kbl(i) = kpbl(i) + 1 + kpblmax = max(kpblmax,kbl(i)) + kpblmin = min(kpblmin, kbl(i)) enddo + kpblmax = min(kpblmax+1,kte-1) ! do i = its,ite delks(i) = 1.0 / (prsi(i,1) - prsi(i,kbl(i))) diff --git a/src/core_atmosphere/physics/physics_wrf/module_bl_mynn.F b/src/core_atmosphere/physics/physics_wrf/module_bl_mynn.F new file mode 100644 index 0000000000..23c41a6812 --- /dev/null +++ b/src/core_atmosphere/physics/physics_wrf/module_bl_mynn.F @@ -0,0 +1,3064 @@ +!================================================================================================== +! copied for implementation in MPAS from WRF version 3.6.1. + +! modifications made to sourcecode: +! * used preprocessing option to replace module_model_constants with mpas_atmphys_constants. +! * used preprocessing option to not compile subroutine mynn_bl_init_driver. +! Laura D. Fowler (laura@ucar.edu) / 2014-09-25. + +!================================================================================================== + +! translated from NN f77 to F90 and put into WRF by Mariusz Pagowski +! NOAA/GSD & CIRA/CSU, Feb 2008 +! changes to original code: +! 1. code is 1d (in z) +! 2. no advection of TKE, covariances and variances +! 3. Cranck-Nicholson replaced with the implicit scheme +! 4. removed terrain dependent grid since input in WRF in actual +! distances in z[m] +! 5. cosmetic changes to adhere to WRF standard (remove common blocks, +! intent etc) +!------------------------------------------------------------------- +!Modifications implemented by Joseph Olson NOAA/GSD/AMB - CU/CIRES +!(approved by Mikio Nakanishi or under consideration): +! 1. Addition of BouLac mixing length in the free atmosphere. +! 2. Changed the turbulent mixing length to be integrated from the +! surface to the top of the BL + a transition layer depth. +! 3. v3.4.1: Option to use Kitamura/Canuto modification which removes +! the critical Richardson number and negative TKE (default). +! 4. v3.4.1: Hybrid PBL height diagnostic, which blends a theta-v-based +! definition in neutral/convective BL and a TKE-based definition +! in stable conditions. +! 5. v3.4.1: TKE budget output option (bl_mynn_tkebudget) +! 6. v3.5.0: TKE advection option (bl_mynn_tkeadvect) +! 7. v3.5.1: Fog deposition related changes. +! +! For changes 1 and 3, see "JOE's mods" below: +!------------------------------------------------------------------- + +MODULE module_bl_mynn + +#if defined(mpas) + use mpas_atmphys_constants, only: & + karman, & + g => gravity, & + p1000mb => P0, & + cp, & + r_d => R_d, & + rcp, & + xlv, & + xlf, & + svp1, & + svp2, & + svp3, & + svpt0, & + ep_1, & + ep_2 + use error_function, only: erf + + implicit none + private + public:: tv0,mym_condensation,mynn_bl_driver +#else + USE module_model_constants, only: & + &karman, g, p1000mb, & + &cp, r_d, rcp, xlv, xlf,& + &svp1, svp2, svp3, svpt0, ep_1, ep_2 + USE module_state_description, only: param_first_scalar, & + &p_qc, p_qr, p_qi, p_qs, p_qg, p_qnc, p_qni +!------------------------------------------------------------------- + IMPLICIT NONE +!------------------------------------------------------------------- +#endif + +! The parameters below depend on stability functions of module_sf_mynn. + REAL, PARAMETER :: cphm_st=5.0, cphm_unst=16.0, & + cphh_st=5.0, cphh_unst=16.0 + + REAL, PARAMETER :: xlvcp=xlv/cp, xlscp=(xlv+xlf)/cp, ev=xlv, rd=r_d, & + &rk=cp/rd, svp11=svp1*1.e3, p608=ep_1, ep_3=1.-ep_2 + + REAL, PARAMETER :: tref=300.0 ! reference temperature (K) + REAL, PARAMETER :: tv0=p608*tref, tv1=(1.+p608)*tref, gtr=g/tref + +! Closure constants + REAL, PARAMETER :: & + &vk = karman, & + &pr = 0.74, & + &g1 = 0.229, & ! NN2009 = 0.235 + &b1 = 24.0, & + &b2 = 15.0, & ! CKmod NN2009 + &c2 = 0.729, & ! 0.729, & !0.75, & + &c3 = 0.340, & ! 0.340, & !0.352, & + &c4 = 0.0, & + &c5 = 0.2, & + &a1 = b1*( 1.0-3.0*g1 )/6.0, & +! &c1 = g1 -1.0/( 3.0*a1*b1**(1.0/3.0) ), & + &c1 = g1 -1.0/( 3.0*a1*2.88449914061481660), & + &a2 = a1*( g1-c1 )/( g1*pr ), & + &g2 = b2/b1*( 1.0-c3 ) +2.0*a1/b1*( 3.0-2.0*c2 ) + + REAL, PARAMETER :: & + &cc2 = 1.0-c2, & + &cc3 = 1.0-c3, & + &e1c = 3.0*a2*b2*cc3, & + &e2c = 9.0*a1*a2*cc2, & + &e3c = 9.0*a2*a2*cc2*( 1.0-c5 ), & + &e4c = 12.0*a1*a2*cc2, & + &e5c = 6.0*a1*a1 + +! Constants for length scale (alps & cns) and TKE diffusion (Sqfac) +! Original (Nakanishi and Niino 2009) (for CKmod=0.): +! REAL, PARAMETER :: qmin=0.0, zmax=1.0, cns=2.7, & +! &alp1=0.23, alp2=1.0, alp3=5.0, alp4=100.0, & +! &alp5=0.40, Sqfac=3.0 +! Modified for Rapid Refresh/HRRR (and for CKmod=1.): + REAL, PARAMETER :: qmin=0.0, zmax=1.0, cns=2.1, & + &alp1=0.23, alp2=0.65, alp3=3.0, alp4=20.0, & + &alp5=1.0, Sqfac=2.0 + +! Constants for gravitational settling +! REAL, PARAMETER :: gno=1.e6/(1.e8)**(2./3.), gpw=5./3., qcgmin=1.e-8 + REAL, PARAMETER :: gno=1.0 !original value seems too agressive: 4.64158883361278196 + REAL, PARAMETER :: gpw=5./3., qcgmin=1.e-8, qkemin=1.e-12 +! REAL, PARAMETER :: pblh_ref=1500. + +! Constants for cloud PDF (mym_condensation) + REAL, PARAMETER :: rr2=0.7071068, rrp=0.3989423 + +!JOE's mods + !Use Canuto/Kitamura mod (remove Ric and negative TKE) (1:yes, 0:no) + !For more info, see Canuto et al. (2008 JAS) and Kitamura (Journal of the + !Meteorological Society of Japan, Vol. 88, No. 5, pp. 857-864, 2010). + !Note that this change required further modification of other parameters + !above (c2, c3, alp2, and Sqfac). If you want to remove this option, set these + !parameters back to NN2009 values (see commented out lines next to the + !parameters above). This only removes the negative TKE problem + !but does not necessarily improve performance - neutral impact. + REAL, PARAMETER :: CKmod=1. + + !Use BouLac mixing length in free atmosphere (1:yes, 0:no) + !This helps remove excessively large mixing in unstable layers aloft. + REAL, PARAMETER :: BLmod=1. + + !Mix couds (water & ice): (0: no, 1: yes) +! REAL, PARAMETER :: Cloudmix=0. + REAL, PARAMETER :: Cloudmix=1. +!JOE-end + + INTEGER :: mynn_level=2 + + INTEGER, PARAMETER :: kdebug=27 + +CONTAINS + +! ********************************************************************** +! * An improved Mellor-Yamada turbulence closure model * +! * * +! * Aug/2005 M. Nakanishi (N.D.A) * +! * Modified: Dec/2005 M. Nakanishi (N.D.A) * +! * naka@nda.ac.jp * +! * * +! * Contents: * +! * 1. mym_initialize (to be called once initially) * +! * gives the closure constants and initializes the turbulent * +! * quantities. * +! * (2) mym_level2 (called in the other subroutines) * +! * calculates the stability functions at Level 2. * +! * (3) mym_length (called in the other subroutines) * +! * calculates the master length scale. * +! * 4. mym_turbulence * +! * calculates the vertical diffusivity coefficients and the * +! * production terms for the turbulent quantities. * +! * 5. mym_predict * +! * predicts the turbulent quantities at the next step. * +! * 6. mym_condensation * +! * determines the liquid water content and the cloud fraction * +! * diagnostically. * +! * * +! * call mym_initialize * +! * | * +! * |<----------------+ * +! * | | * +! * call mym_condensation | * +! * call mym_turbulence | * +! * call mym_predict | * +! * | | * +! * |-----------------+ * +! * | * +! * end * +! * * +! * Variables worthy of special mention: * +! * tref : Reference temperature * +! * thl : Liquid water potential temperature * +! * qw : Total water (water vapor+liquid water) content * +! * ql : Liquid water content * +! * vt, vq : Functions for computing the buoyancy flux * +! * * +! * If the water contents are unnecessary, e.g., in the case of * +! * ocean models, thl is the potential temperature and qw, ql, vt * +! * and vq are all zero. * +! * * +! * Grid arrangement: * +! * k+1 +---------+ * +! * | | i = 1 - nx * +! * (k) | * | j = 1 - ny * +! * | | k = 1 - nz * +! * k +---------+ * +! * i (i) i+1 * +! * * +! * All the predicted variables are defined at the center (*) of * +! * the grid boxes. The diffusivity coefficients are, however, * +! * defined on the walls of the grid boxes. * +! * # Upper boundary values are given at k=nz. * +! * * +! * References: * +! * 1. Nakanishi, M., 2001: * +! * Boundary-Layer Meteor., 99, 349-378. * +! * 2. Nakanishi, M. and H. Niino, 2004: * +! * Boundary-Layer Meteor., 112, 1-31. * +! * 3. Nakanishi, M. and H. Niino, 2006: * +! * Boundary-Layer Meteor., (in press). * +! * 4. Nakanishi, M. and H. Niino, 2009: * +! * Jour. Meteor. Soc. Japan, 87, 895-912. * +! ********************************************************************** +! +! SUBROUTINE mym_initialize: +! +! Input variables: +! iniflag : <>0; turbulent quantities will be initialized +! = 0; turbulent quantities have been already +! given, i.e., they will not be initialized +! mx, my : Maximum numbers of grid boxes +! in the x and y directions, respectively +! nx, ny, nz : Numbers of the actual grid boxes +! in the x, y and z directions, respectively +! tref : Reference temperature (K) +! dz(nz) : Vertical grid spacings (m) +! # dz(nz)=dz(nz-1) +! zw(nz+1) : Heights of the walls of the grid boxes (m) +! # zw(1)=0.0 and zw(k)=zw(k-1)+dz(k-1) +! h(mx,ny) : G^(1/2) in the terrain-following coordinate +! # h=1-zg/zt, where zg is the height of the +! terrain and zt the top of the model domain +! pi0(mx,my,nz) : Exner function at zw*h+zg (J/kg K) +! defined by c_p*( p_basic/1000hPa )^kappa +! This is usually computed by integrating +! d(pi0)/dz = -h*g/tref. +! rmo(mx,ny) : Inverse of the Obukhov length (m^(-1)) +! flt, flq(mx,ny) : Turbulent fluxes of sensible and latent heat, +! respectively, e.g., flt=-u_*Theta_* (K m/s) +!! flt - liquid water potential temperature surface flux +!! flq - total water flux surface flux +! ust(mx,ny) : Friction velocity (m/s) +! pmz(mx,ny) : phi_m-zeta at z1*h+z0, where z1 (=0.5*dz(1)) +! is the first grid point above the surafce, z0 +! the roughness length and zeta=(z1*h+z0)*rmo +! phh(mx,ny) : phi_h at z1*h+z0 +! u, v(mx,my,nz): Components of the horizontal wind (m/s) +! thl(mx,my,nz) : Liquid water potential temperature +! (K) +! qw(mx,my,nz) : Total water content Q_w (kg/kg) +! +! Output variables: +! ql(mx,my,nz) : Liquid water content (kg/kg) +! v?(mx,my,nz) : Functions for computing the buoyancy flux +! qke(mx,my,nz) : Twice the turbulent kinetic energy q^2 +! (m^2/s^2) +! tsq(mx,my,nz) : Variance of Theta_l (K^2) +! qsq(mx,my,nz) : Variance of Q_w +! cov(mx,my,nz) : Covariance of Theta_l and Q_w (K) +! el(mx,my,nz) : Master length scale L (m) +! defined on the walls of the grid boxes +! bsh : no longer used +! via common : Closure constants +! +! Work arrays: see subroutine mym_level2 +! pd?(mx,my,nz) : Half of the production terms at Level 2 +! defined on the walls of the grid boxes +! qkw(mx,my,nz) : q on the walls of the grid boxes (m/s) +! +! # As to dtl, ...gh, see subroutine mym_turbulence. +! +!------------------------------------------------------------------- + SUBROUTINE mym_initialize ( kts,kte,& + & dz, zw, & + & u, v, thl, qw, & +! & ust, rmo, pmz, phh, flt, flq,& +!JOE-BouLac/PBLH mod + & zi,theta,& + & sh,& +!JOE-end + & ust, rmo, el,& + & Qke, Tsq, Qsq, Cov) +! +!------------------------------------------------------------------- + + INTEGER, INTENT(IN) :: kts,kte +! REAL, INTENT(IN) :: ust, rmo, pmz, phh, flt, flq + REAL, INTENT(IN) :: ust, rmo + REAL, DIMENSION(kts:kte), INTENT(in) :: dz + REAL, DIMENSION(kts:kte+1), INTENT(in) :: zw + REAL, DIMENSION(kts:kte), INTENT(in) :: u,v,thl,qw + + REAL, DIMENSION(kts:kte), INTENT(out) :: tsq,qsq,cov + REAL, DIMENSION(kts:kte), INTENT(inout) :: el,qke + + REAL, DIMENSION(kts:kte) :: & + &ql,pdk,pdt,pdq,pdc,dtl,dqw,dtv,& + &gm,gh,sm,sh,qkw,vt,vq + INTEGER :: k,l,lmax + REAL :: phm,vkz,elq,elv,b1l,b2l,pmz=1.,phh=1.,flt=0.,flq=0.,tmpq +!JOE-BouLac and PBLH mod + REAL :: zi + REAL, DIMENSION(kts:kte) :: theta +!JOE-end + + +! ** At first ql, vt and vq are set to zero. ** + DO k = kts,kte + ql(k) = 0.0 + vt(k) = 0.0 + vq(k) = 0.0 + END DO +! + CALL mym_level2 ( kts,kte,& + & dz, & + & u, v, thl, qw, & + & ql, vt, vq, & + & dtl, dqw, dtv, gm, gh, sm, sh ) +! +! ** Preliminary setting ** + + el (kts) = 0.0 + qke(kts) = ust**2 * ( b1*pmz )**(2.0/3.0) +! + phm = phh*b2 / ( b1*pmz )**(1.0/3.0) + tsq(kts) = phm*( flt/ust )**2 + qsq(kts) = phm*( flq/ust )**2 + cov(kts) = phm*( flt/ust )*( flq/ust ) +! + DO k = kts+1,kte + vkz = vk*zw(k) + el (k) = vkz/( 1.0 + vkz/100.0 ) + qke(k) = 0.0 +! + tsq(k) = 0.0 + qsq(k) = 0.0 + cov(k) = 0.0 + END DO +! +! ** Initialization with an iterative manner ** +! ** lmax is the iteration count. This is arbitrary. ** + lmax = 5 +! + DO l = 1,lmax +! + CALL mym_length ( kts,kte,& + & dz, zw, & + & rmo, flt, flq, & + & vt, vq, & + & qke, & + & dtv, & + & el, & +!JOE-added for BouLac/PBHL + & zi,theta,& +!JOE-end + & qkw) +! + DO k = kts+1,kte + elq = el(k)*qkw(k) + pdk(k) = elq*( sm(k)*gm (k)+& + &sh(k)*gh (k) ) + pdt(k) = elq* sh(k)*dtl(k)**2 + pdq(k) = elq* sh(k)*dqw(k)**2 + pdc(k) = elq* sh(k)*dtl(k)*dqw(k) + END DO +! +! ** Strictly, vkz*h(i,j) -> vk*( 0.5*dz(1)*h(i,j)+z0 ) ** + vkz = vk*0.5*dz(kts) +! + elv = 0.5*( el(kts+1)+el(kts) ) / vkz + qke(kts) = ust**2 * ( b1*pmz*elv )**(2.0/3.0) +! + phm = phh*b2 / ( b1*pmz/elv**2 )**(1.0/3.0) + tsq(kts) = phm*( flt/ust )**2 + qsq(kts) = phm*( flq/ust )**2 + cov(kts) = phm*( flt/ust )*( flq/ust ) +! + DO k = kts+1,kte-1 + b1l = b1*0.25*( el(k+1)+el(k) ) + tmpq=MAX(b1l*( pdk(k+1)+pdk(k) ),qkemin) +! PRINT *,'tmpqqqqq',tmpq,pdk(k+1),pdk(k) + qke(k) = tmpq**(2.0/3.0) + +! + IF ( qke(k) .LE. 0.0 ) THEN + b2l = 0.0 + ELSE + b2l = b2*( b1l/b1 ) / SQRT( qke(k) ) + END IF +! + tsq(k) = b2l*( pdt(k+1)+pdt(k) ) + qsq(k) = b2l*( pdq(k+1)+pdq(k) ) + cov(k) = b2l*( pdc(k+1)+pdc(k) ) + END DO + +! + END DO + +!! qke(kts)=qke(kts+1) +!! tsq(kts)=tsq(kts+1) +!! qsq(kts)=qsq(kts+1) +!! cov(kts)=cov(kts+1) + + qke(kte)=qke(kte-1) + tsq(kte)=tsq(kte-1) + qsq(kte)=qsq(kte-1) + cov(kte)=cov(kte-1) + +! +! RETURN + + END SUBROUTINE mym_initialize + +! +! ================================================================== +! SUBROUTINE mym_level2: +! +! Input variables: see subroutine mym_initialize +! +! Output variables: +! dtl(mx,my,nz) : Vertical gradient of Theta_l (K/m) +! dqw(mx,my,nz) : Vertical gradient of Q_w +! dtv(mx,my,nz) : Vertical gradient of Theta_V (K/m) +! gm (mx,my,nz) : G_M divided by L^2/q^2 (s^(-2)) +! gh (mx,my,nz) : G_H divided by L^2/q^2 (s^(-2)) +! sm (mx,my,nz) : Stability function for momentum, at Level 2 +! sh (mx,my,nz) : Stability function for heat, at Level 2 +! +! These are defined on the walls of the grid boxes. +! + SUBROUTINE mym_level2 (kts,kte,& + & dz, & + & u, v, thl, qw, & + & ql, vt, vq, & + & dtl, dqw, dtv, gm, gh, sm, sh ) +! +!------------------------------------------------------------------- + + INTEGER, INTENT(IN) :: kts,kte + REAL, DIMENSION(kts:kte), INTENT(in) :: dz + REAL, DIMENSION(kts:kte), INTENT(in) :: u,v,thl,qw,ql,vt,vq + + REAL, DIMENSION(kts:kte), INTENT(out) :: & + &dtl,dqw,dtv,gm,gh,sm,sh + + INTEGER :: k + + REAL :: rfc,f1,f2,rf1,rf2,smc,shc,& + &ri1,ri2,ri3,ri4,duz,dtz,dqz,vtt,vqq,dtq,dzk,afk,abk,ri,rf + +!JOE-Canuto/Kitamura mod + REAL :: a2den +!JOE-end + +! ev = 2.5e6 +! tv0 = 0.61*tref +! tv1 = 1.61*tref +! gtr = 9.81/tref +! + rfc = g1/( g1+g2 ) + f1 = b1*( g1-c1 ) +3.0*a2*( 1.0 -c2 )*( 1.0-c5 ) & + & +2.0*a1*( 3.0-2.0*c2 ) + f2 = b1*( g1+g2 ) -3.0*a1*( 1.0 -c2 ) + rf1 = b1*( g1-c1 )/f1 + rf2 = b1* g1 /f2 + smc = a1 /a2* f1/f2 + shc = 3.0*a2*( g1+g2 ) +! + ri1 = 0.5/smc + ri2 = rf1*smc + ri3 = 4.0*rf2*smc -2.0*ri2 + ri4 = ri2**2 +! + DO k = kts+1,kte + dzk = 0.5 *( dz(k)+dz(k-1) ) + afk = dz(k)/( dz(k)+dz(k-1) ) + abk = 1.0 -afk + duz = ( u(k)-u(k-1) )**2 +( v(k)-v(k-1) )**2 + duz = duz /dzk**2 + dtz = ( thl(k)-thl(k-1) )/( dzk ) + dqz = ( qw(k)-qw(k-1) )/( dzk ) +! + vtt = 1.0 +vt(k)*abk +vt(k-1)*afk + vqq = tv0 +vq(k)*abk +vq(k-1)*afk + dtq = vtt*dtz +vqq*dqz +! + dtl(k) = dtz + dqw(k) = dqz + dtv(k) = dtq +!? dtv(i,j,k) = dtz +tv0*dqz +!? : +( ev/pi0(i,j,k)-tv1 ) +!? : *( ql(i,j,k)-ql(i,j,k-1) )/( dzk*h(i,j) ) +! + gm (k) = duz + gh (k) = -dtq*gtr +! +! ** Gradient Richardson number ** + ri = -gh(k)/MAX( duz, 1.0e-10 ) + +!JOE-Canuto/Kitamura mod + IF (CKmod .eq. 1) THEN + a2den = 1. + MAX(ri,0.0) + ELSE + a2den = 1. + 0.0 + ENDIF + + rfc = g1/( g1+g2 ) + f1 = b1*( g1-c1 ) +3.0*(a2/a2den)*( 1.0 -c2 )*( 1.0-c5 ) & + & +2.0*a1*( 3.0-2.0*c2 ) + f2 = b1*( g1+g2 ) -3.0*a1*( 1.0 -c2 ) + rf1 = b1*( g1-c1 )/f1 + rf2 = b1* g1 /f2 + smc = a1 /(a2/a2den)* f1/f2 + shc = 3.0*(a2/a2den)*( g1+g2 ) + + ri1 = 0.5/smc + ri2 = rf1*smc + ri3 = 4.0*rf2*smc -2.0*ri2 + ri4 = ri2**2 +!JOE-end + +! ** Flux Richardson number ** + rf = MIN( ri1*( ri+ri2-SQRT(ri**2-ri3*ri+ri4) ), rfc ) +! + sh (k) = shc*( rfc-rf )/( 1.0-rf ) + sm (k) = smc*( rf1-rf )/( rf2-rf ) * sh(k) + END DO +! + RETURN + + END SUBROUTINE mym_level2 + +! ================================================================== +! SUBROUTINE mym_length: +! +! Input variables: see subroutine mym_initialize +! +! Output variables: see subroutine mym_initialize +! +! Work arrays: +! elt(mx,ny) : Length scale depending on the PBL depth (m) +! vsc(mx,ny) : Velocity scale q_c (m/s) +! at first, used for computing elt +! +! NOTE: the mixing lengths are meant to be calculated at the full- +! sigmal levels (or interfaces beween the model layers). +! + SUBROUTINE mym_length ( kts,kte,& + & dz, zw, & + & rmo, flt, flq, & + & vt, vq, & + & qke, & + & dtv, & + & el, & + & zi,theta,& !JOE-BouLac mod + & qkw) + +!------------------------------------------------------------------- + + INTEGER, INTENT(IN) :: kts,kte + REAL, DIMENSION(kts:kte), INTENT(in) :: dz + REAL, DIMENSION(kts:kte+1), INTENT(in) :: zw + REAL, INTENT(in) :: rmo,flt,flq + REAL, DIMENSION(kts:kte), INTENT(IN) :: qke,vt,vq + + REAL, DIMENSION(kts:kte), INTENT(out) :: qkw, el + REAL, DIMENSION(kts:kte), INTENT(in) :: dtv + + REAL :: elt,vsc +!JOE-added for BouLac ML + REAL, DIMENSION(kts:kte), INTENT(IN) :: theta + REAL, DIMENSION(kts:kte) :: qtke,elBLmin,elBLavg,thetaw + REAL :: wt,zi,zi2,h1,h2 + + !THE FOLLOWING LIMITS DO NOT DIRECTLY AFFECT THE ACTUAL PBLH. + !THEY ONLY IMPOSE LIMITS ON THE CALCULATION OF THE MIXING LENGTH + !SCALES SO THAT THE BOULAC MIXING LENGTH (IN FREE ATMOS) DOES + !NOT ENCROACH UPON THE BOUNDARY LAYER MIXING LENGTH (els, elb & elt). + REAL, PARAMETER :: minzi = 300. !min mixed-layer height + REAL, PARAMETER :: maxdz = 750. !max (half) transition layer depth + !=0.3*2500 m PBLH, so the transition + !layer stops growing for PBLHs > 2.5 km. + REAL, PARAMETER :: mindz = 300. !min (half) transition layer depth + + !SURFACE LAYER LENGTH SCALE MODS TO REDUCE IMPACT IN UPPER BOUNDARY LAYER + REAL, PARAMETER :: ZSLH = 100. ! Max height correlated to surface conditions (m) + REAL, PARAMETER :: CSL = 2. ! CSL = constant of proportionality to L O(1) + REAL :: z_m + +!Joe-end + + INTEGER :: i,j,k + REAL :: afk,abk,zwk,dzk,qdz,vflx,bv,elb,els,elf + +! tv0 = 0.61*tref +! gtr = 9.81/tref +! +!JOE-added to impose limits on the height integration for elt as well +! as the transition layer depth + IF ( BLmod .EQ. 0. ) THEN + zi2=5000. !originally integrated to model top, not just 5000 m. + ELSE + zi2=MAX(zi,minzi) + ENDIF + h1=MAX(0.3*zi2,mindz) + h1=MIN(h1,maxdz) ! 1/2 transition layer depth + h2=h1/2.0 ! 1/4 transition layer depth + + qtke(kts)=MAX(qke(kts)/2.,0.01) !tke at full sigma levels + thetaw(kts)=theta(kts) !theta at full-sigma levels +!JOE-end + qkw(kts) = SQRT(MAX(qke(kts),1.0e-10)) + + DO k = kts+1,kte + afk = dz(k)/( dz(k)+dz(k-1) ) + abk = 1.0 -afk + qkw(k) = SQRT(MAX(qke(k)*abk+qke(k-1)*afk,1.0e-3)) + +!JOE- BouLac Start + qtke(k) = (qkw(k)**2.)/2. ! q -> TKE + thetaw(k)= theta(k)*abk + theta(k-1)*afk +!JOE- BouLac End + + END DO +! + elt = 1.0e-5 + vsc = 1.0e-5 +! +! ** Strictly, zwk*h(i,j) -> ( zwk*h(i,j)+z0 ) ** +!JOE-Lt mod: only integrate to top of PBL (+ transition/entrainment +! layer), since TKE aloft is not relevant. Make WHILE loop, so it +! exits after looping through the boundary layer. +! + k = kts+1 + zwk = zw(k) + DO WHILE (zwk .LE. MIN((zi2+h1), 4000.)) !JOE: 20130523 reduce too high diffusivity over mts + dzk = 0.5*( dz(k)+dz(k-1) ) + qdz = MAX( qkw(k)-qmin, 0.03 )*dzk + elt = elt +qdz*zwk + vsc = vsc +qdz + k = k+1 + zwk = zw(k) + END DO +! + elt = alp1*elt/vsc + vflx = ( vt(kts)+1.0 )*flt +( vq(kts)+tv0 )*flq + vsc = ( gtr*elt*MAX( vflx, 0.0 ) )**(1.0/3.0) +! +! ** Strictly, el(i,j,1) is not zero. ** + el(kts) = 0.0 +! +!JOE- BouLac Start + IF ( BLmod .GT. 0. ) THEN + ! COMPUTE BouLac mixing length + CALL boulac_length(kts,kte,zw,dz,qtke,thetaw,elBLmin,elBLavg) + ENDIF +!JOE- BouLac END + + DO k = kts+1,kte + zwk = zw(k) !full-sigma levels + +! ** Length scale limited by the buoyancy effect ** + IF ( dtv(k) .GT. 0.0 ) THEN + bv = SQRT( gtr*dtv(k) ) + elb = alp2*qkw(k) / bv & + & *( 1.0 + alp3/alp2*& + &SQRT( vsc/( bv*elt ) ) ) + + elf = alp2 * qkw(k)/bv + ELSE + elb = 1.0e10 + elf = elb + END IF +! + z_m = MAX(ZSLH,CSL*zwk*rmo) + +! ** Length scale in the surface layer ** + IF ( rmo .GT. 0.0 ) THEN + ! IF ( zwk <= z_m ) THEN ! use original cns + els = vk*zwk/(1.0+cns*MIN( zwk*rmo, zmax )) + !els = vk*zwk/(1.0+cns*MIN( 0.5*zw(kts+1)*rmo, zmax )) + ! ELSE + ! !blend to neutral values (kz) above z_m + ! els = vk*z_m/(1.0+cns*MIN( zwk*rmo, zmax )) + vk*(zwk - z_m) + ! ENDIF + ELSE + els = vk*zwk*( 1.0 - alp4* zwk*rmo )**0.2 + END IF +! +! ** HARMONC AVERGING OF MIXING LENGTH SCALES: +! el(k) = MIN(elb/( elb/elt+elb/els+1.0 ),elf) +! el(k) = elb/( elb/elt+elb/els+1.0 ) +!JOE- BouLac Start + IF ( BLmod .EQ. 0. ) THEN + el(k) = MIN(elb/( elb/elt+elb/els+1.0 ),elf) + ELSE + !add blending to use BouLac mixing length in free atmos; + !defined relative to the PBLH (zi) + transition layer (h1) + el(k) = MIN(elb/( elb/elt+elb/els+1.0 ),elf) + wt=.5*TANH((zwk - (zi2+h1))/h2) + .5 + el(k) = el(k)*(1.-wt) + alp5*elBLmin(k)*wt + ENDIF +!JOE- BouLac End + + !IF (el(k) > 1000.) THEN + ! print*,"SUSPICIOUSLY LARGE Lm:",el(k),k + !ENDIF + END DO +! + RETURN + + END SUBROUTINE mym_length + +!JOE- BouLac Code Start - +! ================================================================== + SUBROUTINE boulac_length(kts,kte,zw,dz,qtke,theta,lb1,lb2) +! +! NOTE: This subroutine was taken from the BouLac scheme in WRF-ARW +! and modified for integration into the MYNN PBL scheme. +! WHILE loops were added to reduce the computational expense. +! This subroutine computes the length scales up and down +! and then computes the min, average of the up/down +! length scales, and also considers the distance to the +! surface. +! +! dlu = the distance a parcel can be lifted upwards give a finite +! amount of TKE. +! dld = the distance a parcel can be displaced downwards given a +! finite amount of TKE. +! lb1 = the minimum of the length up and length down +! lb2 = the average of the length up and length down +!------------------------------------------------------------------- + + INTEGER, INTENT(IN) :: kts,kte + REAL, DIMENSION(kts:kte), INTENT(IN) :: qtke,dz,theta + REAL, DIMENSION(kts:kte), INTENT(OUT) :: lb1,lb2 + REAL, DIMENSION(kts:kte+1), INTENT(IN) :: zw + + !LOCAL VARS + INTEGER :: iz, izz, found + REAL, DIMENSION(kts:kte) :: dlu,dld + REAL, PARAMETER :: Lmax=2000. !soft limit + REAL :: dzt, zup, beta, zup_inf, bbb, tl, zdo, zdo_sup, zzz + + !print*,"IN MYNN-BouLac",kts, kte + + do iz=kts,kte + + !---------------------------------- + ! FIND DISTANCE UPWARD + !---------------------------------- + zup=0. + dlu(iz)=zw(kte+1)-zw(iz)-dz(iz)/2. + zzz=0. + zup_inf=0. + beta=g/theta(iz) !Buoyancy coefficient + + !print*,"FINDING Dup, k=",iz," zw=",zw(iz) + + if (iz .lt. kte) then !cant integrate upwards from highest level + + found = 0 + izz=iz + DO WHILE (found .EQ. 0) + + if (izz .lt. kte) then + dzt=dz(izz) ! layer depth above + zup=zup-beta*theta(iz)*dzt ! initial PE the parcel has at iz + !print*," ",iz,izz,theta(izz),dz(izz) + zup=zup+beta*(theta(izz+1)+theta(izz))*dzt/2. ! PE gained by lifting a parcel to izz+1 + zzz=zzz+dzt ! depth of layer iz to izz+1 + !print*," PE=",zup," TKE=",qtke(iz)," z=",zw(izz) + if (qtke(iz).lt.zup .and. qtke(iz).ge.zup_inf) then + bbb=(theta(izz+1)-theta(izz))/dzt + if (bbb .ne. 0.) then + !fractional distance up into the layer where TKE becomes < PE + tl=(-beta*(theta(izz)-theta(iz)) + & + & sqrt( max(0.,(beta*(theta(izz)-theta(iz)))**2. + & + & 2.*bbb*beta*(qtke(iz)-zup_inf))))/bbb/beta + else + if (theta(izz) .ne. theta(iz))then + tl=(qtke(iz)-zup_inf)/(beta*(theta(izz)-theta(iz))) + else + tl=0. + endif + endif + dlu(iz)=zzz-dzt+tl + !print*," FOUND Dup:",dlu(iz)," z=",zw(izz)," tl=",tl + found =1 + endif + zup_inf=zup + izz=izz+1 + ELSE + found = 1 + ENDIF + + ENDDO + + endif + + !---------------------------------- + ! FIND DISTANCE DOWN + !---------------------------------- + zdo=0. + zdo_sup=0. + dld(iz)=zw(iz) + zzz=0. + + !print*,"FINDING Ddown, k=",iz," zwk=",zw(iz) + if (iz .gt. kts) then !cant integrate downwards from lowest level + + found = 0 + izz=iz + DO WHILE (found .EQ. 0) + + if (izz .gt. kts) then + dzt=dz(izz-1) + zdo=zdo+beta*theta(iz)*dzt + !print*," ",iz,izz,theta(izz),dz(izz-1) + zdo=zdo-beta*(theta(izz-1)+theta(izz))*dzt/2. + zzz=zzz+dzt + !print*," PE=",zdo," TKE=",qtke(iz)," z=",zw(izz) + if (qtke(iz).lt.zdo .and. qtke(iz).ge.zdo_sup) then + bbb=(theta(izz)-theta(izz-1))/dzt + if (bbb .ne. 0.) then + tl=(beta*(theta(izz)-theta(iz))+ & + & sqrt( max(0.,(beta*(theta(izz)-theta(iz)))**2. + & + & 2.*bbb*beta*(qtke(iz)-zdo_sup))))/bbb/beta + else + if (theta(izz) .ne. theta(iz)) then + tl=(qtke(iz)-zdo_sup)/(beta*(theta(izz)-theta(iz))) + else + tl=0. + endif + endif + dld(iz)=zzz-dzt+tl + !print*," FOUND Ddown:",dld(iz)," z=",zw(izz)," tl=",tl + found = 1 + endif + zdo_sup=zdo + izz=izz-1 + ELSE + found = 1 + ENDIF + ENDDO + + endif + + !---------------------------------- + ! GET MINIMUM (OR AVERAGE) + !---------------------------------- + !The surface layer length scale can exceed z for large z/L, + !so keep maximum distance down > z. + dld(iz) = min(dld(iz),zw(iz+1))!not used in PBL anyway, only free atmos + lb1(iz) = min(dlu(iz),dld(iz)) !minimum + lb2(iz) = sqrt(dlu(iz)*dld(iz)) !average - biased towards smallest + !lb2(iz) = 0.5*(dlu(iz)+dld(iz)) !average + + !Apply soft limit (only impacts very large lb; lb=100 by 5%, lb=500 by 20%). + lb1(iz) = lb1(iz)/(1. + (lb1(iz)/Lmax)) + lb2(iz) = lb2(iz)/(1. + (lb2(iz)/Lmax)) + + if (iz .eq. kte) then + lb1(kte) = lb1(kte-1) + lb2(kte) = lb2(kte-1) + endif + !print*,"IN MYNN-BouLac",kts, kte,lb1(iz) + !print*,"IN MYNN-BouLac",iz,dld(iz),dlu(iz) + + ENDDO + + END SUBROUTINE boulac_length +! +!JOE-END BOULAC CODE + +! ================================================================== +! SUBROUTINE mym_turbulence: +! +! Input variables: see subroutine mym_initialize +! levflag : <>3; Level 2.5 +! = 3; Level 3 +! +! # ql, vt, vq, qke, tsq, qsq and cov are changed to input variables. +! +! Output variables: see subroutine mym_initialize +! dfm(mx,my,nz) : Diffusivity coefficient for momentum, +! divided by dz (not dz*h(i,j)) (m/s) +! dfh(mx,my,nz) : Diffusivity coefficient for heat, +! divided by dz (not dz*h(i,j)) (m/s) +! dfq(mx,my,nz) : Diffusivity coefficient for q^2, +! divided by dz (not dz*h(i,j)) (m/s) +! tcd(mx,my,nz) : Countergradient diffusion term for Theta_l +! (K/s) +! qcd(mx,my,nz) : Countergradient diffusion term for Q_w +! (kg/kg s) +! pd?(mx,my,nz) : Half of the production terms +! +! Only tcd and qcd are defined at the center of the grid boxes +! +! # DO NOT forget that tcd and qcd are added on the right-hand side +! of the equations for Theta_l and Q_w, respectively. +! +! Work arrays: see subroutine mym_initialize and level2 +! +! # dtl, dqw, dtv, gm and gh are allowed to share storage units with +! dfm, dfh, dfq, tcd and qcd, respectively, for saving memory. +! + SUBROUTINE mym_turbulence ( kts,kte,& + & levflag, & + & dz, zw, & + & u, v, thl, ql, qw, & + & qke, tsq, qsq, cov, & + & vt, vq,& + & rmo, flt, flq, & +!JOE-BouLac/PBLH test + & zi,theta,& + & sh,& +!JOE-end + & El,& + & Dfm, Dfh, Dfq, Tcd, Qcd, Pdk, Pdt, Pdq, Pdc & +!JOE-TKE BUDGET + & ,qWT1D,qSHEAR1D,qBUOY1D,qDISS1D & + & ,bl_mynn_tkebudget & +!JOE-end + &) + +!------------------------------------------------------------------- +! + INTEGER, INTENT(IN) :: kts,kte + INTEGER, INTENT(IN) :: levflag + REAL, DIMENSION(kts:kte), INTENT(in) :: dz + REAL, DIMENSION(kts:kte+1), INTENT(in) :: zw + REAL, INTENT(in) :: rmo,flt,flq + REAL, DIMENSION(kts:kte), INTENT(in) :: u,v,thl,qw,& + &ql,vt,vq,qke,tsq,qsq,cov + + REAL, DIMENSION(kts:kte), INTENT(out) :: dfm,dfh,dfq,& + &pdk,pdt,pdq,pdc,tcd,qcd,el + +!JOE-TKE BUDGET + REAL, DIMENSION(kts:kte), INTENT(inout) :: & + qWT1D,qSHEAR1D,qBUOY1D,qDISS1D + REAL :: q3sq_old,dlsq1,qWTP_old,qWTP_new + REAL :: dudz,dvdz,dTdz,& + upwp,vpwp,Tpwp + INTEGER, INTENT(in) :: bl_mynn_tkebudget +!JOE-end + + REAL, DIMENSION(kts:kte) :: qkw,dtl,dqw,dtv,gm,gh,sm,sh + + INTEGER :: k +! REAL :: cc2,cc3,e1c,e2c,e3c,e4c,e5c + REAL :: e6c,dzk,afk,abk,vtt,vqq,& + &cw25,clow,cupp,gamt,gamq,smd,gamv,elq,elh + +!JOE-added for BouLac/PBLH test + REAL :: zi + REAL, DIMENSION(kts:kte), INTENT(in) :: theta +!JOE-end + + REAL :: a2den, duz, ri, HLmod !JOE-Canuto/Kitamura mod + + DOUBLE PRECISION q2sq, t2sq, r2sq, c2sq, elsq, gmel, ghel + DOUBLE PRECISION q3sq, t3sq, r3sq, c3sq, dlsq, qdiv + DOUBLE PRECISION e1, e2, e3, e4, enum, eden, wden +! +! tv0 = 0.61*tref +! gtr = 9.81/tref +! +! cc2 = 1.0-c2 +! cc3 = 1.0-c3 +! e1c = 3.0*a2*b2*cc3 +! e2c = 9.0*a1*a2*cc2 +! e3c = 9.0*a2*a2*cc2*( 1.0-c5 ) +! e4c = 12.0*a1*a2*cc2 +! e5c = 6.0*a1*a1 +! + + CALL mym_level2 (kts,kte,& + & dz, & + & u, v, thl, qw, & + & ql, vt, vq, & + & dtl, dqw, dtv, gm, gh, sm, sh ) +! + CALL mym_length (kts,kte, & + & dz, zw, & + & rmo, flt, flq, & + & vt, vq, & + & qke, & + & dtv, & + & el, & + & zi,theta,& !JOE-hybrid PBLH + & qkw) +! + + DO k = kts+1,kte + dzk = 0.5 *( dz(k)+dz(k-1) ) + afk = dz(k)/( dz(k)+dz(k-1) ) + abk = 1.0 -afk + elsq = el (k)**2 + q2sq = b1*elsq*( sm(k)*gm(k)+sh(k)*gh(k) ) + q3sq = qkw(k)**2 + +!JOE-Canuto/Kitamura mod + duz = ( u(k)-u(k-1) )**2 +( v(k)-v(k-1) )**2 + duz = duz /dzk**2 + ! ** Gradient Richardson number ** + ri = -gh(k)/MAX( duz, 1.0e-10 ) + IF (CKmod .eq. 1) THEN + a2den = 1. + MAX(ri,0.0) + ELSE + a2den = 1. + 0.0 + ENDIF +!JOE-end +! +! Modified: Dec/22/2005, from here, (dlsq -> elsq) + gmel = gm (k)*elsq + ghel = gh (k)*elsq +! Modified: Dec/22/2005, up to here +! +!JOE-add prints + IF (sh(k)<0.0 .OR. sm(k)<0.0) THEN + PRINT*,"MYM_TURBULENCE2.0: k=",k," sh=",sh(k) + PRINT*," gm=",gm(k)," gh=",gh(k)," sm=",sm(k) + PRINT*," q2sq=",q2sq," q3sq=",q3sq," q3/q2=",q3sq/q2sq + PRINT*," qke=",qke(k)," el=",el(k)," ri=",ri + PRINT*," PBLH=",zi," u=",u(k)," v=",v(k) + ENDIF +!JOE-Apply Helfand & Labraga stability check for all Ric +! when CKmod == 1. Suggested by Kitamura. Not applied below. + IF (CKmod .eq. 1) THEN + HLmod = q2sq -1. + ELSE + HLmod = q3sq + ENDIF +! ** Since qkw is set to more than 0.0, q3sq > 0.0. ** + IF ( q3sq .LT. q2sq ) THEN +! IF ( HLmod .LT. q2sq ) THEN +!JOE-END + qdiv = SQRT( q3sq/q2sq ) !HL89: (1-alfa) + sm(k) = sm(k) * qdiv + sh(k) = sh(k) * qdiv +! +!JOE-Canuto/Kitamura mod +! e1 = q3sq - e1c*ghel * qdiv**2 +! e2 = q3sq - e2c*ghel * qdiv**2 +! e3 = e1 + e3c*ghel * qdiv**2 +! e4 = e1 - e4c*ghel * qdiv**2 + e1 = q3sq - e1c*ghel/a2den * qdiv**2 + e2 = q3sq - e2c*ghel/a2den * qdiv**2 + e3 = e1 + e3c*ghel/(a2den**2) * qdiv**2 + e4 = e1 - e4c*ghel/a2den * qdiv**2 +!JOE-end + eden = e2*e4 + e3*e5c*gmel * qdiv**2 + eden = MAX( eden, 1.0d-20 ) + ELSE +!JOE-Canuto/Kitamura mod +! e1 = q3sq - e1c*ghel +! e2 = q3sq - e2c*ghel +! e3 = e1 + e3c*ghel +! e4 = e1 - e4c*ghel + e1 = q3sq - e1c*ghel/a2den + e2 = q3sq - e2c*ghel/a2den + e3 = e1 + e3c*ghel/(a2den**2) + e4 = e1 - e4c*ghel/a2den +!JOE-end + eden = e2*e4 + e3*e5c*gmel + eden = MAX( eden, 1.0d-20 ) +! + qdiv = 1.0 + sm(k) = q3sq*a1*( e3-3.0*c1*e4 )/eden +!JOE-Canuto/Kitamura mod +! sh(k) = q3sq*a2*( e2+3.0*c1*e5c*gmel )/eden + sh(k) = q3sq*(a2/a2den)*( e2+3.0*c1*e5c*gmel )/eden +!JOE-end + END IF +! +! HL88 , lev2.5 criteria from eqs. 3.17, 3.19, & 3.20 + IF (sh(k)<0.0 .OR. sm(k)<0.0 .OR. & + sh(k) > 0.76*b2 .or. (sm(k)**2*gm(k) .gt. .44**2)) THEN + PRINT*,"MYM_TURBULENCE2.5: k=",k," sh=",sh(k) + PRINT*," gm=",gm(k)," gh=",gh(k)," sm=",sm(k) + PRINT*," q2sq=",q2sq," q3sq=",q3sq," q3/q2=",q3sq/q2sq + PRINT*," qke=",qke(k)," el=",el(k)," ri=",ri + PRINT*," PBLH=",zi," u=",u(k)," v=",v(k) + ENDIF + +! ** Level 3 : start ** + IF ( levflag .EQ. 3 ) THEN + t2sq = qdiv*b2*elsq*sh(k)*dtl(k)**2 + r2sq = qdiv*b2*elsq*sh(k)*dqw(k)**2 + c2sq = qdiv*b2*elsq*sh(k)*dtl(k)*dqw(k) + t3sq = MAX( tsq(k)*abk+tsq(k-1)*afk, 0.0 ) + r3sq = MAX( qsq(k)*abk+qsq(k-1)*afk, 0.0 ) + c3sq = cov(k)*abk+cov(k-1)*afk +! +! Modified: Dec/22/2005, from here + c3sq = SIGN( MIN( ABS(c3sq), SQRT(t3sq*r3sq) ), c3sq ) +! + vtt = 1.0 +vt(k)*abk +vt(k-1)*afk + vqq = tv0 +vq(k)*abk +vq(k-1)*afk + t2sq = vtt*t2sq +vqq*c2sq + r2sq = vtt*c2sq +vqq*r2sq + c2sq = MAX( vtt*t2sq+vqq*r2sq, 0.0d0 ) + t3sq = vtt*t3sq +vqq*c3sq + r3sq = vtt*c3sq +vqq*r3sq + c3sq = MAX( vtt*t3sq+vqq*r3sq, 0.0d0 ) +! + cw25 = e1*( e2 + 3.0*c1*e5c*gmel*qdiv**2 )/( 3.0*eden ) +! +! ** Limitation on q, instead of L/q ** + dlsq = elsq + IF ( q3sq/dlsq .LT. -gh(k) ) q3sq = -dlsq*gh(k) +! +! ** Limitation on c3sq (0.12 =< cw =< 0.76) ** +!JOE-Canuto/Kitamura mod +! e2 = q3sq - e2c*ghel * qdiv**2 +! e3 = q3sq + e3c*ghel * qdiv**2 +! e4 = q3sq - e4c*ghel * qdiv**2 + e2 = q3sq - e2c*ghel/a2den * qdiv**2 + e3 = q3sq + e3c*ghel/(a2den**2) * qdiv**2 + e4 = q3sq - e4c*ghel/a2den * qdiv**2 +!JOE-end + eden = e2*e4 + e3 *e5c*gmel * qdiv**2 +! +!JOE-Canuto/Kitamura mod +! wden = cc3*gtr**2 * dlsq**2/elsq * qdiv**2 & +! & *( e2*e4c - e3c*e5c*gmel * qdiv**2 ) + wden = cc3*gtr**2 * dlsq**2/elsq * qdiv**2 & + & *( e2*e4c/a2den - e3c*e5c*gmel/(a2den**2) * qdiv**2 ) +!JOE-end +! + IF ( wden .NE. 0.0 ) THEN + clow = q3sq*( 0.12-cw25 )*eden/wden + cupp = q3sq*( 0.76-cw25 )*eden/wden +! + IF ( wden .GT. 0.0 ) THEN + c3sq = MIN( MAX( c3sq, c2sq+clow ), c2sq+cupp ) + ELSE + c3sq = MAX( MIN( c3sq, c2sq+clow ), c2sq+cupp ) + END IF + END IF +! + e1 = e2 + e5c*gmel * qdiv**2 + eden = MAX( eden, 1.0d-20 ) +! Modified: Dec/22/2005, up to here +! +!JOE-Canuto/Kitamura mod +! e6c = 3.0*a2*cc3*gtr * dlsq/elsq + e6c = 3.0*(a2/a2den)*cc3*gtr * dlsq/elsq +!JOE-end +! +! ** for Gamma_theta ** +!! enum = qdiv*e6c*( t3sq-t2sq ) + IF ( t2sq .GE. 0.0 ) THEN + enum = MAX( qdiv*e6c*( t3sq-t2sq ), 0.0d0 ) + ELSE + enum = MIN( qdiv*e6c*( t3sq-t2sq ), 0.0d0 ) + ENDIF + + gamt =-e1 *enum /eden +! +! ** for Gamma_q ** +!! enum = qdiv*e6c*( r3sq-r2sq ) + IF ( r2sq .GE. 0.0 ) THEN + enum = MAX( qdiv*e6c*( r3sq-r2sq ), 0.0d0 ) + ELSE + enum = MIN( qdiv*e6c*( r3sq-r2sq ), 0.0d0 ) + ENDIF + + gamq =-e1 *enum /eden +! +! ** for Sm' and Sh'd(Theta_V)/dz ** +!! enum = qdiv*e6c*( c3sq-c2sq ) + enum = MAX( qdiv*e6c*( c3sq-c2sq ), 0.0d0) + +!JOE-Canuto/Kitamura mod +! smd = dlsq*enum*gtr/eden * qdiv**2 * (e3c+e4c)*a1/a2 + smd = dlsq*enum*gtr/eden * qdiv**2 * (e3c/(a2den**2) + & + & e4c/a2den)*a1/(a2/a2den) +!JOE-end + gamv = e1 *enum*gtr/eden +! + sm(k) = sm(k) +smd +! +! ** For elh (see below), qdiv at Level 3 is reset to 1.0. ** + qdiv = 1.0 +! ** Level 3 : end ** +! + IF (sh(k)<0.0 .OR. sm(k)<0.0) THEN + PRINT*,"MYM_TURBULENCE3.0: k=",k," sh=",sh(k) + PRINT*," gm=",gm(k)," gh=",gh(k)," sm=",sm(k) + PRINT*," q2sq=",q2sq," q3sq=",q3sq," q3/q2=",q3sq/q2sq + PRINT*," qke=",qke(k)," el=",el(k)," ri=",ri + PRINT*," PBLH=",zi," u=",u(k)," v=",v(k) + ENDIF + + ELSE +! ** At Level 2.5, qdiv is not reset. ** + gamt = 0.0 + gamq = 0.0 + gamv = 0.0 + END IF +! + elq = el(k)*qkw(k) + elh = elq*qdiv +! + pdk(k) = elq*( sm(k)*gm(k) & + & +sh(k)*gh(k)+gamv ) + pdt(k) = elh*( sh(k)*dtl(k)+gamt )*dtl(k) + pdq(k) = elh*( sh(k)*dqw(k)+gamq )*dqw(k) + pdc(k) = elh*( sh(k)*dtl(k)+gamt )& + &*dqw(k)*0.5 & + &+elh*( sh(k)*dqw(k)+gamq )*dtl(k)*0.5 +! + tcd(k) = elq*gamt + qcd(k) = elq*gamq +! + dfm(k) = elq*sm (k) / dzk + dfh(k) = elq*sh (k) / dzk +! Modified: Dec/22/2005, from here +! ** In sub.mym_predict, dfq for the TKE and scalar variance ** +! ** are set to 3.0*dfm and 1.0*dfm, respectively. (Sqfac) ** + dfq(k) = dfm(k) +! Modified: Dec/22/2005, up to here + + IF ( bl_mynn_tkebudget == 1) THEN + !TKE BUDGET + dudz = ( u(k)-u(k-1) )/dzk + dvdz = ( v(k)-v(k-1) )/dzk + dTdz = ( thl(k)-thl(k-1) )/dzk + + upwp = -elq*sm(k)*dudz + vpwp = -elq*sm(k)*dvdz + Tpwp = -elq*sh(k)*dTdz + Tpwp = SIGN(MAX(ABS(Tpwp),1.E-6),Tpwp) + + IF ( k .EQ. kts+1 ) THEN + qWT1D(kts)=0. + q3sq_old =0. + qWTP_old =0. + !** Limitation on q, instead of L/q ** + dlsq1 = MAX(el(kts)**2,1.0) + IF ( q3sq_old/dlsq1 .LT. -gh(k) ) q3sq_old = -dlsq1*gh(k) + ENDIF + + !!!Vertical Transport Term + qWTP_new = elq*Sqfac*sm(k)*(q3sq - q3sq_old)/dzk + qWT1D(k) = 0.5*(qWTP_new - qWTP_old)/dzk + qWTP_old = elq*Sqfac*sm(k)*(q3sq - q3sq_old)/dzk + q3sq_old = q3sq + + !!!Shear Term + !!!qSHEAR1D(k)=-(upwp*dudz + vpwp*dvdz) + qSHEAR1D(k) = elq*sm(k)*gm(k) + + !!!Buoyancy Term + !!!qBUOY1D(k)=g*Tpwp/thl(k) + !qBUOY1D(k)= elq*(sh(k)*gh(k) + gamv) + qBUOY1D(k) = elq*(sh(k)*(-dTdz*g/thl(k)) + gamv) + + !!!Dissipation Term + qDISS1D(k) = (q3sq**(3./2.))/(b1*MAX(el(k),1.)) + ENDIF + + END DO +! + + dfm(kts) = 0.0 + dfh(kts) = 0.0 + dfq(kts) = 0.0 + tcd(kts) = 0.0 + qcd(kts) = 0.0 + + tcd(kte) = 0.0 + qcd(kte) = 0.0 + +! + DO k = kts,kte-1 + dzk = dz(k) + tcd(k) = ( tcd(k+1)-tcd(k) )/( dzk ) + qcd(k) = ( qcd(k+1)-qcd(k) )/( dzk ) + END DO +! + + IF ( bl_mynn_tkebudget == 1) THEN + !JOE-TKE BUDGET + qWT1D(kts)=0. + qSHEAR1D(kts)=qSHEAR1D(kts+1) + qBUOY1D(kts)=qBUOY1D(kts+1) + qDISS1D(kts)=qDISS1D(kts+1) + ENDIF + + RETURN + + END SUBROUTINE mym_turbulence + +! ================================================================== +! SUBROUTINE mym_predict: +! +! Input variables: see subroutine mym_initialize and turbulence +! qke(mx,my,nz) : qke at (n)th time level +! tsq, ...cov : ditto +! +! Output variables: +! qke(mx,my,nz) : qke at (n+1)th time level +! tsq, ...cov : ditto +! +! Work arrays: +! qkw(mx,my,nz) : q at the center of the grid boxes (m/s) +! bp (mx,my,nz) : = 1/2*F, see below +! rp (mx,my,nz) : = P-1/2*F*Q, see below +! +! # The equation for a turbulent quantity Q can be expressed as +! dQ/dt + Ah + Av = Dh + Dv + P - F*Q, (1) +! where A is the advection, D the diffusion, P the production, +! F*Q the dissipation and h and v denote horizontal and vertical, +! respectively. If Q is q^2, F is 2q/B_1L. +! Using the Crank-Nicholson scheme for Av, Dv and F*Q, a finite +! difference equation is written as +! Q{n+1} - Q{n} = dt *( Dh{n} - Ah{n} + P{n} ) +! + dt/2*( Dv{n} - Av{n} - F*Q{n} ) +! + dt/2*( Dv{n+1} - Av{n+1} - F*Q{n+1} ), (2) +! where n denotes the time level. +! When the advection and diffusion terms are discretized as +! dt/2*( Dv - Av ) = a(k)Q(k+1) - b(k)Q(k) + c(k)Q(k-1), (3) +! Eq.(2) can be rewritten as +! - a(k)Q(k+1) + [ 1 + b(k) + dt/2*F ]Q(k) - c(k)Q(k-1) +! = Q{n} + dt *( Dh{n} - Ah{n} + P{n} ) +! + dt/2*( Dv{n} - Av{n} - F*Q{n} ), (4) +! where Q on the left-hand side is at (n+1)th time level. +! +! In this subroutine, a(k), b(k) and c(k) are obtained from +! subprogram coefvu and are passed to subprogram tinteg via +! common. 1/2*F and P-1/2*F*Q are stored in bp and rp, +! respectively. Subprogram tinteg solves Eq.(4). +! +! Modify this subroutine according to your numerical integration +! scheme (program). +! +!------------------------------------------------------------------- + SUBROUTINE mym_predict (kts,kte,& + & levflag, & + & delt,& + & dz, & + & ust, flt, flq, pmz, phh, & + & el, dfq, & + & pdk, pdt, pdq, pdc,& + & qke, tsq, qsq, cov & + &) + +!------------------------------------------------------------------- + INTEGER, INTENT(IN) :: kts,kte + INTEGER, INTENT(IN) :: levflag + REAL, INTENT(IN) :: delt + REAL, DIMENSION(kts:kte), INTENT(IN) :: dz, dfq,el + REAL, DIMENSION(kts:kte), INTENT(INOUT) :: pdk, pdt, pdq, pdc + REAL, INTENT(IN) :: flt, flq, ust, pmz, phh + REAL, DIMENSION(kts:kte), INTENT(INOUT) :: qke,tsq, qsq, cov + + INTEGER :: k,nz + REAL, DIMENSION(kts:kte) :: qkw, bp, rp, df3q + REAL :: vkz,pdk1,phm,pdt1,pdq1,pdc1,b1l,b2l + REAL, DIMENSION(kts:kte) :: dtz + REAL, DIMENSION(1:kte-kts+1) :: a,b,c,d + + nz=kte-kts+1 + +! ** Strictly, vkz*h(i,j) -> vk*( 0.5*dz(1)*h(i,j)+z0 ) ** + vkz = vk*0.5*dz(kts) +! +! Modified: Dec/22/2005, from here +! ** dfq for the TKE is 3.0*dfm. ** +! CALL coefvu ( dfq, 3.0 ) ! make change here +! Modified: Dec/22/2005, up to here +! + DO k = kts,kte +!! qke(k) = MAX(qke(k), 0.0) + qkw(k) = SQRT( MAX( qke(k), 0.0 ) ) + !df3q(k)=3.*dfq(k) + df3q(k)=Sqfac*dfq(k) + dtz(k)=delt/dz(k) + END DO +! + pdk1 = 2.0*ust**3*pmz/( vkz ) + phm = 2.0/ust *phh/( vkz ) + pdt1 = phm*flt**2 + pdq1 = phm*flq**2 + pdc1 = phm*flt*flq +! +! ** pdk(i,j,1)+pdk(i,j,2) corresponds to pdk1. ** + pdk(kts) = pdk1 -pdk(kts+1) + +!! pdt(kts) = pdt1 -pdt(kts+1) +!! pdq(kts) = pdq1 -pdq(kts+1) +!! pdc(kts) = pdc1 -pdc(kts+1) + pdt(kts) = pdt(kts+1) + pdq(kts) = pdq(kts+1) + pdc(kts) = pdc(kts+1) +! +! ** Prediction of twice the turbulent kinetic energy ** +!! DO k = kts+1,kte-1 + DO k = kts,kte-1 + b1l = b1*0.5*( el(k+1)+el(k) ) + bp(k) = 2.*qkw(k) / b1l + rp(k) = pdk(k+1) + pdk(k) + END DO + +!! a(1)=0. +!! b(1)=1. +!! c(1)=-1. +!! d(1)=0. + +! Since df3q(kts)=0.0, a(1)=0.0 and b(1)=1.+dtz(k)*df3q(k+1)+bp(k)*delt. + DO k=kts,kte-1 + a(k-kts+1)=-dtz(k)*df3q(k) + b(k-kts+1)=1.+dtz(k)*(df3q(k)+df3q(k+1))+bp(k)*delt + c(k-kts+1)=-dtz(k)*df3q(k+1) + d(k-kts+1)=rp(k)*delt + qke(k) + ENDDO + +!! DO k=kts+1,kte-1 +!! a(k-kts+1)=-dtz(k)*df3q(k) +!! b(k-kts+1)=1.+dtz(k)*(df3q(k)+df3q(k+1)) +!! c(k-kts+1)=-dtz(k)*df3q(k+1) +!! d(k-kts+1)=rp(k)*delt + qke(k) - qke(k)*bp(k)*delt +!! ENDDO + + a(nz)=-1. !0. + b(nz)=1. + c(nz)=0. + d(nz)=0. + + CALL tridiag(nz,a,b,c,d) + + DO k=kts,kte + qke(k)=d(k-kts+1) + ENDDO + + + IF ( levflag .EQ. 3 ) THEN +! +! Modified: Dec/22/2005, from here +! ** dfq for the scalar variance is 1.0*dfm. ** +! CALL coefvu ( dfq, 1.0 ) make change here +! Modified: Dec/22/2005, up to here +! +! ** Prediction of the temperature variance ** +!! DO k = kts+1,kte-1 + DO k = kts,kte-1 + b2l = b2*0.5*( el(k+1)+el(k) ) + bp(k) = 2.*qkw(k) / b2l + rp(k) = pdt(k+1) + pdt(k) + END DO + +!zero gradient for tsq at bottom and top + +!! a(1)=0. +!! b(1)=1. +!! c(1)=-1. +!! d(1)=0. + +! Since dfq(kts)=0.0, a(1)=0.0 and b(1)=1.+dtz(k)*dfq(k+1)+bp(k)*delt. + DO k=kts,kte-1 + a(k-kts+1)=-dtz(k)*dfq(k) + b(k-kts+1)=1.+dtz(k)*(dfq(k)+dfq(k+1))+bp(k)*delt + c(k-kts+1)=-dtz(k)*dfq(k+1) + d(k-kts+1)=rp(k)*delt + tsq(k) + ENDDO + +!! DO k=kts+1,kte-1 +!! a(k-kts+1)=-dtz(k)*dfq(k) +!! b(k-kts+1)=1.+dtz(k)*(dfq(k)+dfq(k+1)) +!! c(k-kts+1)=-dtz(k)*dfq(k+1) +!! d(k-kts+1)=rp(k)*delt + tsq(k) - tsq(k)*bp(k)*delt +!! ENDDO + + a(nz)=-1. !0. + b(nz)=1. + c(nz)=0. + d(nz)=0. + + CALL tridiag(nz,a,b,c,d) + + DO k=kts,kte + tsq(k)=d(k-kts+1) + ENDDO + +! ** Prediction of the moisture variance ** +!! DO k = kts+1,kte-1 + DO k = kts,kte-1 + b2l = b2*0.5*( el(k+1)+el(k) ) + bp(k) = 2.*qkw(k) / b2l + rp(k) = pdq(k+1) +pdq(k) + END DO + +!zero gradient for qsq at bottom and top + +!! a(1)=0. +!! b(1)=1. +!! c(1)=-1. +!! d(1)=0. + +! Since dfq(kts)=0.0, a(1)=0.0 and b(1)=1.+dtz(k)*dfq(k+1)+bp(k)*delt. + DO k=kts,kte-1 + a(k-kts+1)=-dtz(k)*dfq(k) + b(k-kts+1)=1.+dtz(k)*(dfq(k)+dfq(k+1))+bp(k)*delt + c(k-kts+1)=-dtz(k)*dfq(k+1) + d(k-kts+1)=rp(k)*delt + qsq(k) + ENDDO + +!! DO k=kts+1,kte-1 +!! a(k-kts+1)=-dtz(k)*dfq(k) +!! b(k-kts+1)=1.+dtz(k)*(dfq(k)+dfq(k+1)) +!! c(k-kts+1)=-dtz(k)*dfq(k+1) +!! d(k-kts+1)=rp(k)*delt + qsq(k) -qsq(k)*bp(k)*delt +!! ENDDO + + a(nz)=-1. !0. + b(nz)=1. + c(nz)=0. + d(nz)=0. + + CALL tridiag(nz,a,b,c,d) + + DO k=kts,kte + qsq(k)=d(k-kts+1) + ENDDO + +! ** Prediction of the temperature-moisture covariance ** +!! DO k = kts+1,kte-1 + DO k = kts,kte-1 + b2l = b2*0.5*( el(k+1)+el(k) ) + bp(k) = 2.*qkw(k) / b2l + rp(k) = pdc(k+1) + pdc(k) + END DO + +!zero gradient for tqcov at bottom and top + +!! a(1)=0. +!! b(1)=1. +!! c(1)=-1. +!! d(1)=0. + +! Since dfq(kts)=0.0, a(1)=0.0 and b(1)=1.+dtz(k)*dfq(k+1)+bp(k)*delt. + DO k=kts,kte-1 + a(k-kts+1)=-dtz(k)*dfq(k) + b(k-kts+1)=1.+dtz(k)*(dfq(k)+dfq(k+1))+bp(k)*delt + c(k-kts+1)=-dtz(k)*dfq(k+1) + d(k-kts+1)=rp(k)*delt + cov(k) + ENDDO + +!! DO k=kts+1,kte-1 +!! a(k-kts+1)=-dtz(k)*dfq(k) +!! b(k-kts+1)=1.+dtz(k)*(dfq(k)+dfq(k+1)) +!! c(k-kts+1)=-dtz(k)*dfq(k+1) +!! d(k-kts+1)=rp(k)*delt + cov(k) - cov(k)*bp(k)*delt +!! ENDDO + + a(nz)=-1. !0. + b(nz)=1. + c(nz)=0. + d(nz)=0. + + CALL tridiag(nz,a,b,c,d) + + DO k=kts,kte + cov(k)=d(k-kts+1) + ENDDO + + ELSE +!! DO k = kts+1,kte-1 + DO k = kts,kte-1 + IF ( qkw(k) .LE. 0.0 ) THEN + b2l = 0.0 + ELSE + b2l = b2*0.25*( el(k+1)+el(k) )/qkw(k) + END IF +! + tsq(k) = b2l*( pdt(k+1)+pdt(k) ) + qsq(k) = b2l*( pdq(k+1)+pdq(k) ) + cov(k) = b2l*( pdc(k+1)+pdc(k) ) + END DO + +!! tsq(kts)=tsq(kts+1) +!! qsq(kts)=qsq(kts+1) +!! cov(kts)=cov(kts+1) + + tsq(kte)=tsq(kte-1) + qsq(kte)=qsq(kte-1) + cov(kte)=cov(kte-1) + + END IF + + END SUBROUTINE mym_predict + +! ================================================================== +! SUBROUTINE mym_condensation: +! +! Input variables: see subroutine mym_initialize and turbulence +! exner(nz) : Perturbation of the Exner function (J/kg K) +! defined on the walls of the grid boxes +! This is usually computed by integrating +! d(pi)/dz = h*g*tv/tref**2 +! from the upper boundary, where tv is the +! virtual potential temperature minus tref. +! +! Output variables: see subroutine mym_initialize +! cld(mx,my,nz) : Cloud fraction +! +! Work arrays: +! qmq(mx,my,nz) : Q_w-Q_{sl}, where Q_{sl} is the saturation +! specific humidity at T=Tl +! alp(mx,my,nz) : Functions in the condensation process +! bet(mx,my,nz) : ditto +! sgm(mx,my,nz) : Combined standard deviation sigma_s +! multiplied by 2/alp +! +! # qmq, alp, bet and sgm are allowed to share storage units with +! any four of other work arrays for saving memory. +! +! # Results are sensitive particularly to values of cp and rd. +! Set these values to those adopted by you. +! +!------------------------------------------------------------------- + SUBROUTINE mym_condensation (kts,kte, & + & dz, & + & thl, qw, & + & p,exner, & + & tsq, qsq, cov, & + & Sh, el, bl_mynn_cloudpdf,& !JOE - cloud PDF testing + & Vt, Vq) + +!------------------------------------------------------------------- + INTEGER, INTENT(IN) :: kts,kte, bl_mynn_cloudpdf + + REAL, DIMENSION(kts:kte), INTENT(IN) :: dz + REAL, DIMENSION(kts:kte), INTENT(IN) :: p,exner, thl, qw, & + &tsq, qsq, cov + + REAL, DIMENSION(kts:kte), INTENT(OUT) :: vt,vq + + REAL, DIMENSION(kts:kte) :: qmq,alp,bet,sgm,ql,cld + + DOUBLE PRECISION :: t3sq, r3sq, c3sq +! + + REAL :: p2a,t,esl,qsl,dqsl,q1,cld0,eq1,qll,& + &q2p,pt,rac,qt + INTEGER :: i,j,k + + REAL :: erf + + !JOE: NEW VARIABLES FOR ALTERNATE SIGMA + REAL::dth,dqw,dzk + REAL, DIMENSION(kts:kte), INTENT(IN) :: Sh,el + +! Note: kte needs to be larger than kts, i.e., kte >= kts+1. + + DO k = kts,kte-1 + p2a = exner(k) + t = thl(k)*p2a + +!x if ( ct .gt. 0.0 ) then +! a = 17.27 +! b = 237.3 +!x else +!x a = 21.87 +!x b = 265.5 +!x end if +! +! ** 3.8 = 0.622*6.11 (hPa) ** + !SATURATED VAPOR PRESSURE + esl=svp11*EXP(svp2*(t-svpt0)/(t-svp3)) + !SATURATED SPECIFIC HUMIDITY + qsl=ep_2*esl/(p(k)-ep_3*esl) + !dqw/dT: Clausius-Clapeyron + dqsl = qsl*ep_2*ev/( rd*t**2 ) + !DEFICIT/EXCESS WATER CONTENT + qmq(k) = qw(k) -qsl + + alp(k) = 1.0/( 1.0+dqsl*xlvcp ) + bet(k) = dqsl*p2a +! + t3sq = MAX( tsq(k), 0.0 ) + r3sq = MAX( qsq(k), 0.0 ) + c3sq = cov(k) + c3sq = SIGN( MIN( ABS(c3sq), SQRT(t3sq*r3sq) ), c3sq ) +! + r3sq = r3sq +bet(k)**2*t3sq -2.0*bet(k)*c3sq + IF (bl_mynn_cloudpdf == 0) THEN + !ORIGINAL STANDARD DEVIATION: limit e-6 produces ~10% more BL clouds than e-10 + sgm(k) = SQRT( MAX( r3sq, 1.0d-10 )) + ELSE + !ALTERNATIVE FORM (Nakanishi & Niino 2004 BLM, eq. B6, and + ! Kuwano-Yoshida et al. 2010 QJRMS, eq. 7): + if (k .eq. kts) then + dzk = 0.5*dz(k) + else + dzk = 0.5*( dz(k) + dz(k-1) ) + end if + dth = 0.5*(thl(k+1)+thl(k)) - 0.5*(thl(k)+thl(MAX(k-1,kts))) + dqw = 0.5*(qw(k+1) + qw(k)) - 0.5*(qw(k) + qw(MAX(k-1,kts))) + sgm(k) = SQRT( MAX( (alp(k)**2 * MAX(el(k)**2,1.) * & + b2 * MAX(Sh(k),0.03))/4. * & + (dqw/dzk - bet(k)*(dth/dzk ))**2 , 1.0e-10) ) + ENDIF + END DO +! + DO k = kts,kte-1 + !NORMALIZED DEPARTURE FROM SATURATION + q1 = qmq(k) / sgm(k) + !CLOUD FRACTION. rr2 = 1/SQRT(2) = 0.707 + cld(k) = 0.5*( 1.0+erf( q1*rr2 ) ) +! IF (cld(k) < 0. .OR. cld(k) > 1.) THEN +! PRINT*,"MYM_CONDENSATION, k=",k," cld=",cld(k) +! PRINT*," r3sq=",r3sq," t3sq=",t3sq," c3sq=",c3sq +! ENDIF +! q1=0. +! cld(k)=0. + + !qll IS THE NORMALIZED LIQUID WATER CONTENT (Sommeria and + !Deardorff (1977, eq 29a). rrp = 1/(sqrt(2*pi)) = 0.3989 + eq1 = rrp*EXP( -0.5*q1*q1 ) + qll = MAX( cld(k)*q1 + eq1, 0.0 ) + !ESTIMATED LIQUID WATER CONTENT (UNNORMALIZED) + ql (k) = alp(k)*sgm(k)*qll +! + q2p = xlvcp/exner(k) + !POTENTIAL TEMPERATURE + pt = thl(k) +q2p*ql(k) + !qt is a THETA-V CONVERSION FOR TOTAL WATER (i.e., THETA-V = qt*THETA) + qt = 1.0 +p608*qw(k) -(1.+p608)*ql(k) + rac = alp(k)*( cld(k)-qll*eq1 )*( q2p*qt-(1.+p608)*pt ) + + !BUOYANCY FACTORS: wherever vt and vq are used, there is a + !"+1" and "+tv0", respectively, so these are subtracted out here. + !vt is unitless and vq has units of K. + vt (k) = qt-1.0 -rac*bet(k) + vq (k) = p608*pt-tv0 +rac + END DO +! + + cld(kte) = cld(kte-1) + ql(kte) = ql(kte-1) + vt(kte) = vt(kte-1) + vq(kte) = vq(kte-1) + + RETURN + + END SUBROUTINE mym_condensation + +! ================================================================== + SUBROUTINE mynn_tendencies(kts,kte,& + &levflag,grav_settling,& + &delt,& + &dz,& + &u,v,th,qv,qc,qi,qni,& !qnc,& + &p,exner,& + &thl,sqv,sqc,sqi,sqw,& + &ust,flt,flq,flqv,flqc,wspd,qcg,& + &uoce,voce,& + &tsq,qsq,cov,& + &tcd,qcd,& + &dfm,dfh,dfq,& + &Du,Dv,Dth,Dqv,Dqc,Dqi,Dqni&!,Dqnc& + &,vdfg1& !Katata/JOE-fogdes + &,FLAG_QI,FLAG_QNI,FLAG_QC,FLAG_QNC & + &) + +!------------------------------------------------------------------- + INTEGER, INTENT(in) :: kts,kte + INTEGER, INTENT(in) :: grav_settling,levflag + LOGICAL, INTENT(IN) :: FLAG_QI,FLAG_QNI,FLAG_QC,FLAG_QNC + +!! grav_settling = 1 or 2 for gravitational settling of droplets +!! grav_settling = 0 otherwise +! thl - liquid water potential temperature +! qw - total water +! dfm,dfh,dfq - as above +! flt - surface flux of thl +! flq - surface flux of qw + + REAL, DIMENSION(kts:kte), INTENT(in) :: u,v,th,qv,qc,qi,qni,&!qnc,& + &p,exner,dfm,dfh,dfq,dz,tsq,qsq,cov,tcd,qcd + REAL, DIMENSION(kts:kte), INTENT(inout) :: thl,sqw,sqv,sqc,sqi + REAL, DIMENSION(kts:kte), INTENT(inout) :: du,dv,dth,dqv,dqc,dqi,& + &dqni!,dqnc + REAL, INTENT(IN) :: delt,ust,flt,flq,flqv,flqc,wspd,uoce,voce,qcg + +! REAL, INTENT(IN) :: delt,ust,flt,flq,qcg,& +! &gradu_top,gradv_top,gradth_top,gradqv_top + +!local vars + + REAL, DIMENSION(kts:kte) :: dtz,vt,vq,qni2!,qnc2 + + REAL, DIMENSION(1:kte-kts+1) :: a,b,c,d + + REAL :: rhs,gfluxm,gfluxp,dztop + + REAL :: grav_settling2,vdfg1 !Katata-fogdes + + INTEGER :: k,kk,nz + + nz=kte-kts+1 + + dztop=.5*(dz(kte)+dz(kte-1)) + + DO k=kts,kte + dtz(k)=delt/dz(k) + ENDDO + +!!============================================ +!! u +!!============================================ + + k=kts + + a(1)=0. + b(1)=1.+dtz(k)*(dfm(k+1)+ust**2/wspd) + c(1)=-dtz(k)*dfm(k+1) +! d(1)=u(k) + d(1)=u(k)+dtz(k)*uoce*ust**2/wspd + +!! a(1)=0. +!! b(1)=1.+dtz(k)*dfm(k+1) +!! c(1)=-dtz(k)*dfm(k+1) +!! d(1)=u(k)*(1.-ust**2/wspd*dtz(k)) + + DO k=kts+1,kte-1 + kk=k-kts+1 + a(kk)=-dtz(k)*dfm(k) + b(kk)=1.+dtz(k)*(dfm(k)+dfm(k+1)) + c(kk)=-dtz(k)*dfm(k+1) + d(kk)=u(k) + ENDDO + +!! no flux at the top + +! a(nz)=-1. +! b(nz)=1. +! c(nz)=0. +! d(nz)=0. + +!! specified gradient at the top + +! a(nz)=-1. +! b(nz)=1. +! c(nz)=0. +! d(nz)=gradu_top*dztop + +!! prescribed value + + a(nz)=0 + b(nz)=1. + c(nz)=0. + d(nz)=u(kte) + + CALL tridiag(nz,a,b,c,d) + + DO k=kts,kte + du(k)=(d(k-kts+1)-u(k))/delt + ENDDO + +!!============================================ +!! v +!!============================================ + + k=kts + + a(1)=0. + b(1)=1.+dtz(k)*(dfm(k+1)+ust**2/wspd) + c(1)=-dtz(k)*dfm(k+1) +! d(1)=v(k) + d(1)=v(k)+dtz(k)*voce*ust**2/wspd + +!! a(1)=0. +!! b(1)=1.+dtz(k)*dfm(k+1) +!! c(1)=-dtz(k)*dfm(k+1) +!! d(1)=v(k)*(1.-ust**2/wspd*dtz(k)) + + DO k=kts+1,kte-1 + kk=k-kts+1 + a(kk)=-dtz(k)*dfm(k) + b(kk)=1.+dtz(k)*(dfm(k)+dfm(k+1)) + c(kk)=-dtz(k)*dfm(k+1) + d(kk)=v(k) + ENDDO + +!! no flux at the top + +! a(nz)=-1. +! b(nz)=1. +! c(nz)=0. +! d(nz)=0. + + +!! specified gradient at the top + +! a(nz)=-1. +! b(nz)=1. +! c(nz)=0. +! d(nz)=gradv_top*dztop + +!! prescribed value + + a(nz)=0 + b(nz)=1. + c(nz)=0. + d(nz)=v(kte) + + CALL tridiag(nz,a,b,c,d) + + DO k=kts,kte + dv(k)=(d(k-kts+1)-v(k))/delt + ENDDO + +!!============================================ +!! thl +!! NOTE: currently, gravitational settling is removed +!!============================================ + k=kts + + a(1)=0. + b(1)=1.+dtz(k)*dfh(k+1) + c(1)=-dtz(k)*dfh(k+1) + +!Katata - added +! grav_settling2 = MIN(REAL(grav_settling),1.) +!Katata - end +! +! if qcg not used then assume constant flux in the surface layer +!JOE-remove original code +! IF (qcg < qcgmin) THEN +! IF (sqc(k) > qcgmin) THEN +! gfluxm=grav_settling2*gno*sqc(k)**gpw +! ELSE +! gfluxm=0. +! ENDIF +! ELSE +! gfluxm=grav_settling2*gno*(qcg/(1.+qcg))**gpw +! ENDIF +!and replace with vdfg1 is computed in module_sf_fogdes.F. +! IF (sqc(k) > qcgmin) THEN +! !gfluxm=grav_settling2*gno*sqc(k)**gpw +! gfluxm=grav_settling2*sqc(k)*vdfg1 +! ELSE +! gfluxm=0. +! ENDIF +!JOE-end +! +! IF (.5*(sqc(k+1)+sqc(k)) > qcgmin) THEN +! gfluxp=grav_settling2*gno*(.5*(sqc(k+1)+sqc(k)))**gpw +! ELSE +! gfluxp=0. +! ENDIF + + rhs= tcd(k) !-xlvcp/exner(k)*& +! ((gfluxp - gfluxm)/dz(k)) + + d(1)=thl(k) + dtz(k)*flt + rhs*delt + + DO k=kts+1,kte-1 + kk=k-kts+1 + a(kk)=-dtz(k)*dfh(k) + b(kk)=1.+dtz(k)*(dfh(k)+dfh(k+1)) + c(kk)=-dtz(k)*dfh(k+1) + +! IF (.5*(sqc(k+1)+sqc(k)) > qcgmin) THEN +! gfluxp=grav_settling2*gno*(.5*(sqc(k+1)+sqc(k)))**gpw +! ELSE +! gfluxp=0. +! ENDIF +! +! IF (.5*(sqc(k-1)+sqc(k)) > qcgmin) THEN +! gfluxm=grav_settling2*gno*(.5*(sqc(k-1)+sqc(k)))**gpw +! ELSE +! gfluxm=0. +! ENDIF + + rhs= tcd(k) !-xlvcp/exner(k)*& +! &((gfluxp - gfluxm)/dz(k)) + + d(kk)=thl(k) + rhs*delt + ENDDO + +!! no flux at the top + +! a(nz)=-1. +! b(nz)=1. +! c(nz)=0. +! d(nz)=0. + +!! specified gradient at the top + +!assume gradthl_top=gradth_top + +! a(nz)=-1. +! b(nz)=1. +! c(nz)=0. +! d(nz)=gradth_top*dztop + +!! prescribed value + + a(nz)=0. + b(nz)=1. + c(nz)=0. + d(nz)=thl(kte) + + CALL tridiag(nz,a,b,c,d) + + DO k=kts,kte + thl(k)=d(k-kts+1) + ENDDO + +!!============================================ +!! NO LONGER MIX total water (sqw = sqc + sqv) +!! NOTE: no total water tendency is output +!!============================================ +! +! k=kts +! +! a(1)=0. +! b(1)=1.+dtz(k)*dfh(k+1) +! c(1)=-dtz(k)*dfh(k+1) +! +!JOE: replace orig code with fogdep +! IF (qcg < qcgmin) THEN +! IF (sqc(k) > qcgmin) THEN +! gfluxm=grav_settling2*gno*sqc(k)**gpw +! ELSE +! gfluxm=0. +! ENDIF +! ELSE +! gfluxm=grav_settling2*gno*(qcg/(1.+qcg))**gpw +! ENDIF +!and replace with fogdes code + remove use of qcg: +! IF (sqc(k) > qcgmin) THEN +! !gfluxm=grav_settling2*gno*(.5*(sqc(k)+sqc(k)))**gpw +! gfluxm=grav_settling2*sqc(k)*vdfg1 +! ELSE +! gfluxm=0. +! ENDIF +!JOE-end +! +! IF (.5*(sqc(k+1)+sqc(k)) > qcgmin) THEN +! gfluxp=grav_settling2*gno*(.5*(sqc(k+1)+sqc(k)))**gpw +! ELSE +! gfluxp=0. +! ENDIF +! +! rhs= qcd(k) !+ (gfluxp - gfluxm)/dz(k)& +! +! d(1)=sqw(k) + dtz(k)*flq + rhs*delt +! +! DO k=kts+1,kte-1 +! kk=k-kts+1 +! a(kk)=-dtz(k)*dfh(k) +! b(kk)=1.+dtz(k)*(dfh(k)+dfh(k+1)) +! c(kk)=-dtz(k)*dfh(k+1) +! +! IF (.5*(sqc(k+1)+sqc(k)) > qcgmin) THEN +! gfluxp=grav_settling2*gno*(.5*(sqc(k+1)+sqc(k)))**gpw +! ELSE +! gfluxp=0. +! ENDIF +! +! IF (.5*(sqc(k-1)+sqc(k)) > qcgmin) THEN +! gfluxm=grav_settling2*gno*(.5*(sqc(k-1)+sqc(k)))**gpw +! ELSE +! gfluxm=0. +! ENDIF +! +! rhs= qcd(k) !+ (gfluxp - gfluxm)/dz(k)& +! +! d(kk)=sqw(k) + rhs*delt +! ENDDO + + +!! no flux at the top + +! a(nz)=-1. +! b(nz)=1. +! c(nz)=0. +! d(nz)=0. + +!! specified gradient at the top +!assume gradqw_top=gradqv_top + +! a(nz)=-1. +! b(nz)=1. +! c(nz)=0. +! d(nz)=gradqv_top*dztop + +!! prescribed value + +! a(nz)=0. +! b(nz)=1. +! c(nz)=0. +! d(nz)=sqw(kte) +! +! CALL tridiag(nz,a,b,c,d) +! +! DO k=kts,kte +! sqw(k)=d(k-kts+1) +! ENDDO + +!!============================================ +!! cloud water ( sqc ) +!!============================================ +IF (Cloudmix > 0.5 .AND. FLAG_QC) THEN + + k=kts + + a(1)=0. + b(1)=1.+dtz(k)*dfh(k+1) + c(1)=-dtz(k)*dfh(k+1) + + rhs = qcd(k) + d(1)=sqc(k) + dtz(k)*flqc + rhs*delt + + DO k=kts+1,kte-1 + kk=k-kts+1 + a(kk)=-dtz(k)*dfh(k) + b(kk)=1.+dtz(k)*(dfh(k)+dfh(k+1)) + c(kk)=-dtz(k)*dfh(k+1) + + rhs = qcd(k) + d(kk)=sqc(k) + rhs*delt + ENDDO + +!! prescribed value + a(nz)=0. + b(nz)=1. + c(nz)=0. + d(nz)=sqc(kte) + + CALL tridiag(nz,a,b,c,d) + + DO k=kts,kte + sqc(k)=d(k-kts+1) + ENDDO + +ENDIF + +!!============================================ +!! cloud water number concentration ( qnc ) +!!============================================ +!IF (Cloudmix > 0.5 .AND. FLAG_QNC) THEN +! +! k=kts +! +! a(1)=0. +! b(1)=1.+dtz(k)*dfh(k+1) +! c(1)=-dtz(k)*dfh(k+1) +! +! rhs =qcd(k) +! d(1)=qnc(k) !+ dtz(k)*flqc + rhs*delt +! +! DO k=kts+1,kte-1 +! kk=k-kts+1 +! a(kk)=-dtz(k)*dfh(k) +! b(kk)=1.+dtz(k)*(dfh(k)+dfh(k+1)) +! c(kk)=-dtz(k)*dfh(k+1) +! +! rhs = qcd(k) +! d(kk)=qnc(k) + rhs*delt +! ENDDO +! +!! prescribed value +! a(nz)=0. +! b(nz)=1. +! c(nz)=0. +! d(nz)=qnc(kte) +! +! CALL tridiag(nz,a,b,c,d) +! +! DO k=kts,kte +! qnc2(k)=d(k-kts+1) +! ENDDO +! +!ELSE +! qnc2=qnc +!ENDIF + +!!============================================ +!! MIX WATER VAPOR ONLY ( sqv ) +!!============================================ + + k=kts + + a(1)=0. + b(1)=1.+dtz(k)*dfh(k+1) + c(1)=-dtz(k)*dfh(k+1) + d(1)=sqv(k) + dtz(k)*flqv + qcd(k)*delt + + DO k=kts+1,kte-1 + kk=k-kts+1 + a(kk)=-dtz(k)*dfh(k) + b(kk)=1.+dtz(k)*(dfh(k)+dfh(k+1)) + c(kk)=-dtz(k)*dfh(k+1) + d(kk)=sqv(k) + qcd(k)*delt + ENDDO + +!! no flux at the top +! a(nz)=-1. +! b(nz)=1. +! c(nz)=0. +! d(nz)=0. + +!! specified gradient at the top +!assume gradqw_top=gradqv_top +! a(nz)=-1. +! b(nz)=1. +! c(nz)=0. +! d(nz)=gradqv_top*dztop + +!! prescribed value + a(nz)=0. + b(nz)=1. + c(nz)=0. + d(nz)=sqv(kte) + + CALL tridiag(nz,a,b,c,d) + + DO k=kts,kte + sqv(k)=d(k-kts+1) + ENDDO + +!!============================================ +!! MIX CLOUD ICE ( sqi ) +!!============================================ +IF (Cloudmix > 0.5 .AND. FLAG_QI) THEN + + k=kts + + a(1)=0. + b(1)=1.+dtz(k)*dfh(k+1) + c(1)=-dtz(k)*dfh(k+1) + d(1)=sqi(k) + qcd(k)*delt !should we have qcd for ice??? + + DO k=kts+1,kte-1 + kk=k-kts+1 + a(kk)=-dtz(k)*dfh(k) + b(kk)=1.+dtz(k)*(dfh(k)+dfh(k+1)) + c(kk)=-dtz(k)*dfh(k+1) + d(kk)=sqi(k) + qcd(k)*delt + ENDDO + +!! no flux at the top +! a(nz)=-1. +! b(nz)=1. +! c(nz)=0. +! d(nz)=0. + +!! specified gradient at the top +!assume gradqw_top=gradqv_top +! a(nz)=-1. +! b(nz)=1. +! c(nz)=0. +! d(nz)=gradqv_top*dztop + +!! prescribed value + a(nz)=0. + b(nz)=1. + c(nz)=0. + d(nz)=sqi(kte) + + CALL tridiag(nz,a,b,c,d) + + DO k=kts,kte + sqi(k)=d(k-kts+1) + ENDDO + +ENDIF + +!!============================================ +!! ice water number concentration (qni) +!!============================================ +IF (Cloudmix > 0.5 .AND. FLAG_QNI) THEN + + k=kts + + a(1)=0. + b(1)=1.+dtz(k)*dfh(k+1) + c(1)=-dtz(k)*dfh(k+1) + + rhs = qcd(k) + + d(1)=qni(k) !+ dtz(k)*flqc + rhs*delt + + DO k=kts+1,kte-1 + kk=k-kts+1 + a(kk)=-dtz(k)*dfh(k) + b(kk)=1.+dtz(k)*(dfh(k)+dfh(k+1)) + c(kk)=-dtz(k)*dfh(k+1) + + rhs = qcd(k) + d(kk)=qni(k) + rhs*delt + + ENDDO + +!! prescribed value + a(nz)=0. + b(nz)=1. + c(nz)=0. + d(nz)=qni(kte) + + CALL tridiag(nz,a,b,c,d) + + DO k=kts,kte + qni2(k)=d(k-kts+1) + ENDDO +ELSE + qni2=qni +ENDIF + +!!============================================ +!! convert to mixing ratios for wrf +!!============================================ +!!NOTE: added number conc tendencies for double moment schemes + + DO k=kts,kte + !sqw(k)=d(k-kts+1) + Dqv(k)=(sqv(k)/(1.-sqv(k))-qv(k))/delt + !qc settling tendency is now computed in module_bl_fogdes.F, so + !sqc should only be changed by turbulent mixing. + Dqc(k)=(sqc(k)/(1.-sqc(k))-qc(k))/delt + Dqi(k)=(sqi(k)/(1.-sqi(k))-qi(k))/delt + ! Dqnc(k)=(qnc2(k)-qnc(k))/delt + Dqni(k)=(qni2(k)-qni(k))/delt + Dth(k)=(thl(k) + xlvcp/exner(k)*sqc(k) & + & + xlscp/exner(k)*sqi(k) & + & - th(k))/delt + !Dth(k)=(thl(k)+xlvcp/exner(k)*sqc(k)-th(k))/delt + ENDDO + + END SUBROUTINE mynn_tendencies + +! ================================================================== + SUBROUTINE retrieve_exchange_coeffs(kts,kte,& + &dfm,dfh,dfq,dz,& + &K_m,K_h,K_q) + +!------------------------------------------------------------------- + + INTEGER , INTENT(in) :: kts,kte + + REAL, DIMENSION(KtS:KtE), INTENT(in) :: dz,dfm,dfh,dfq + + REAL, DIMENSION(KtS:KtE), INTENT(out) :: & + &K_m, K_h, K_q + + + INTEGER :: k + REAL :: dzk + + K_m(kts)=0. + K_h(kts)=0. + K_q(kts)=0. + + DO k=kts+1,kte + dzk = 0.5 *( dz(k)+dz(k-1) ) + K_m(k)=dfm(k)*dzk + K_h(k)=dfh(k)*dzk + K_q(k)=dfq(k)*dzk + ENDDO + + END SUBROUTINE retrieve_exchange_coeffs + +! ================================================================== + SUBROUTINE tridiag(n,a,b,c,d) + +!! to solve system of linear eqs on tridiagonal matrix n times n +!! after Peaceman and Rachford, 1955 +!! a,b,c,d - are vectors of order n +!! a,b,c - are coefficients on the LHS +!! d - is initially RHS on the output becomes a solution vector + +!------------------------------------------------------------------- + + INTEGER, INTENT(in):: n + REAL, DIMENSION(n), INTENT(in) :: a,b + REAL, DIMENSION(n), INTENT(inout) :: c,d + + INTEGER :: i + REAL :: p + REAL, DIMENSION(n) :: q + + c(n)=0. + q(1)=-c(1)/b(1) + d(1)=d(1)/b(1) + + DO i=2,n + p=1./(b(i)+a(i)*q(i-1)) + q(i)=-c(i)*p + d(i)=(d(i)-a(i)*d(i-1))*p + ENDDO + + DO i=n-1,1,-1 + d(i)=d(i)+q(i)*d(i+1) + ENDDO + + END SUBROUTINE tridiag + +! ================================================================== + SUBROUTINE mynn_bl_driver(& + &initflag,& + &grav_settling,& + &delt,& + &dz,& + &u,v,th,qv,qc,qi,qni,&! qnc& !JOE: ice & num conc mixing + &p,exner,rho,& + &xland,ts,qsfc,qcg,ps,& + &ust,ch,hfx,qfx,rmol,wspd,& + &uoce,voce,& !ocean current + &vdfg,& !Katata-added for fog dep + &Qke,tke_pbl,& !JOE: add TKE for coupling + &qke_adv,bl_mynn_tkeadvect,& !ACF for QKE advection + &Tsq,Qsq,Cov,& + &Du,Dv,Dth,& + &Dqv,Dqc,Dqi,Dqni,& !Dqnc,& !JOE: ice & nim conc mixing + &K_m,K_h,K_q,& +! &K_h,k_m,& + &Pblh,kpbl& !JOE-added kpbl for coupling + &,el_pbl& + &,dqke,qWT,qSHEAR,qBUOY,qDISS & !JOE-TKE BUDGET + &,wstar,delta & !JOE-added for grims + &,bl_mynn_tkebudget & !JOE-TKE BUDGET + &,bl_mynn_cloudpdf,Sh3D & !JOE-cloudPDF testing + ! optional arguments + &,FLAG_QI,FLAG_QNI,FLAG_QC,FLAG_QNC & + &,IDS,IDE,JDS,JDE,KDS,KDE & + &,IMS,IME,JMS,JME,KMS,KME & + &,ITS,ITE,JTS,JTE,KTS,KTE) + +!------------------------------------------------------------------- + + INTEGER, INTENT(in) :: initflag + !INPUT NAMELIST OPTIONS: + INTEGER, INTENT(in) :: grav_settling + INTEGER, INTENT(in) :: bl_mynn_tkebudget + INTEGER, INTENT(in) :: bl_mynn_cloudpdf + LOGICAL, INTENT(IN) :: bl_mynn_tkeadvect + + LOGICAL, INTENT(IN) :: FLAG_QI,FLAG_QNI,FLAG_QC,FLAG_QNC + + INTEGER,INTENT(IN) :: & + & IDS,IDE,JDS,JDE,KDS,KDE & + &,IMS,IME,JMS,JME,KMS,KME & + &,ITS,ITE,JTS,JTE,KTS,KTE + + +! initflag > 0 for TRUE +! else for FALSE +! levflag : <>3; Level 2.5 +! = 3; Level 3 +! grav_settling = 1 when gravitational settling accounted for +! grav_settling = 0 when gravitational settling NOT accounted for + + REAL, INTENT(in) :: delt + REAL, DIMENSION(IMS:IME,KMS:KME,JMS:JME), INTENT(in) :: dz,& + &u,v,th,qv,qc,p,exner,rho + REAL, DIMENSION(IMS:IME,KMS:KME,JMS:JME), OPTIONAL, INTENT(in)::& + &qi,qni! ,qnc + REAL, DIMENSION(IMS:IME,JMS:JME), INTENT(in) :: xland,ust,& +! &ch,rmol,ts,qsfc,qcg,ps,hfx,qfx, wspd,uoce,voce +!Katata-added for extra in-output + &ch,rmol,ts,qsfc,qcg,ps,hfx,qfx, wspd,uoce,voce, vdfg +!Katata-end + + REAL, DIMENSION(IMS:IME,KMS:KME,JMS:JME), INTENT(inout) :: & + &Qke,Tsq,Qsq,Cov, & + &tke_pbl, & !JOE-added for coupling (TKE_PBL = QKE/2) + &qke_adv !ACF for QKE advection + + REAL, DIMENSION(IMS:IME,KMS:KME,JMS:JME), INTENT(inout) :: & + &Du,Dv,Dth,Dqv,Dqc,Dqi,Dqni!,Dqnc + + REAL, DIMENSION(IMS:IME,KMS:KME,JMS:JME), INTENT(out) :: & + &K_h,K_m + + REAL, DIMENSION(IMS:IME,JMS:JME), INTENT(inout) :: & + &Pblh,wstar,delta !JOE-added for GRIMS + INTEGER,DIMENSION(IMS:IME,JMS:JME),INTENT(INOUT) :: & + &KPBL + + REAL, DIMENSION(IMS:IME,KMS:KME,JMS:JME), INTENT(inout) :: & + &el_pbl + +!JOE-TKE BUDGET + REAL, DIMENSION(IMS:IME,KMS:KME,JMS:JME), INTENT(out) :: & + &qWT,qSHEAR,qBUOY,qDISS,dqke + ! 3D budget arrays are not allocated when bl_mynn_tkebudget == 0. + ! 1D (local) budget arrays are used for passing between subroutines. + REAL, DIMENSION(KTS:KTE) :: qWT1,qSHEAR1,qBUOY1,qDISS1,dqke1 +!JOE-end + REAL, DIMENSION(IMS:IME,KMS:KME,JMS:JME) :: K_q,Sh3D + +!local vars + INTEGER :: ITF,JTF,KTF, IMD,JMD + INTEGER :: i,j,k + REAL, DIMENSION(KTS:KTE) :: thl,sqv,sqc,sqi,sqw,& + &El, Dfm, Dfh, Dfq, Tcd, Qcd, Pdk, Pdt, Pdq, Pdc, Vt, Vq + + REAL, DIMENSION(KTS:KTE) :: thetav,sh,u1,v1,p1,ex1,dz1,th1,qke1, & + & tsq1,qsq1,cov1,qv1,qi1,qc1,du1,dv1,dth1,dqv1,dqc1,dqi1, & + & k_m1,k_h1,k_q1,qni1,dqni1!,qnc1,dqnc1 + + REAL, DIMENSION(KTS:KTE+1) :: zw + + REAL :: cpm,sqcg,flt,flq,flqv,flqc,pmz,phh,exnerg,zet,& + &afk,abk +!JOE-add GRIMS parameters & variables + real,parameter :: d1 = 0.02, d2 = 0.05, d3 = 0.001 + real,parameter :: h1 = 0.33333335, h2 = 0.6666667 + REAL :: govrth, sflux, bfx0, wstar3, wm2, wm3, delb +!JOE-end GRIMS + INTEGER, SAVE :: levflag + +!*** Begin debugging + IMD=(IMS+IME)/2 + JMD=(JMS+JME)/2 +!*** End debugging + + JTF=MIN0(JTE,JDE-1) + ITF=MIN0(ITE,IDE-1) + KTF=MIN0(KTE,KDE-1) + + levflag=mynn_level + + IF (initflag > 0) THEN +! write(0,*) +! write(0,*) '--- bl_mynn initflag = ', initflag +! write(0,*) '--- bl_mynn mynn_level = ', levflag +! write(0,*) '--- initialize sh3d, el_pbl, tsq, qsq, cov' +! write(0,*) + Sh3D(its:ite,kts:kte,jts:jte)=0. + el_pbl(its:ite,kts:kte,jts:jte)=0. + tsq(its:ite,kts:kte,jts:jte)=0. + qsq(its:ite,kts:kte,jts:jte)=0. + cov(its:ite,kts:kte,jts:jte)=0. + + DO j=JTS,JTF + DO i=ITS,ITF + DO k=KTS,KTF + dz1(k)=dz(i,k,j) + u1(k) = u(i,k,j) + v1(k) = v(i,k,j) + th1(k)=th(i,k,j) + sqc(k)=qc(i,k,j)/(1.+qc(i,k,j)) + sqv(k)=qv(i,k,j)/(1.+qv(i,k,j)) + thetav(k)=th(i,k,j)*(1.+0.61*sqv(k)) + IF (PRESENT(qi) .AND. FLAG_QI ) THEN + sqi(k)=qi(i,k,j)/(1.+qi(i,k,j)) + sqw(k)=sqv(k)+sqc(k)+sqi(k) + thl(k)=th(i,k,j)- xlvcp/exner(i,k,j)*sqc(k) & + & - xlscp/exner(i,k,j)*sqi(k) + ELSE + sqi(k)=0.0 + sqw(k)=sqv(k)+sqc(k) + thl(k)=th(i,k,j)-xlvcp/exner(i,k,j)*sqc(k) + ENDIF + + IF (k==kts) THEN + zw(k)=0. + ELSE + zw(k)=zw(k-1)+dz(i,k-1,j) + ENDIF + + k_m(i,k,j)=0. + k_h(i,k,j)=0. + k_q(i,k,j)=0. + qke(i,k,j)=0.1-MIN(zw(k)*0.001, 0.0) + qke1(k)=qke(i,k,j) + el(k)=el_pbl(i,k,j) + sh(k)=Sh3D(i,k,j) + tsq1(k)=tsq(i,k,j) + qsq1(k)=qsq(i,k,j) + cov1(k)=cov(i,k,j) + + IF ( bl_mynn_tkebudget == 1) THEN + !TKE BUDGET VARIABLES + qWT(i,k,j)=0. + qSHEAR(i,k,j)=0. + qBUOY(i,k,j)=0. + qDISS(i,k,j)=0. + dqke(i,k,j)=0. + ENDIF + ENDDO + + zw(kte+1)=zw(kte)+dz(i,kte,j) + + CALL GET_PBLH(KTS,KTE,PBLH(i,j),thetav,& + & Qke1,zw,dz1,xland(i,j),KPBL(i,j)) + + CALL mym_initialize ( kts,kte,& + &dz1, zw, u1, v1, thl, sqv,& + &PBLH(i,j),th1,& !JOE-BouLac mod + &sh,& !JOE-cloudPDF mod + &ust(i,j), rmol(i,j),& + &el, Qke1, Tsq1, Qsq1, Cov1) + + !UPDATE 3D VARIABLES + DO k=KTS,KTE !KTF + el_pbl(i,k,j)=el(k) + sh3d(i,k,j)=sh(k) + qke(i,k,j)=qke1(k) + tsq(i,k,j)=tsq1(k) + qsq(i,k,j)=qsq1(k) + cov(i,k,j)=cov1(k) +!ACF,JOE- initialize qke_adv array if using advection + IF (bl_mynn_tkeadvect) THEN + qke_adv(i,k,j)=qke1(k) + ENDIF +!ACF,JOE-end + ENDDO + +!*** Begin debugging +! k=kdebug +! IF(I==IMD .AND. J==JMD)THEN +! PRINT*,"MYNN DRIVER INIT: k=",1," sh=",sh(k) +! PRINT*," sqw=",sqw(k)," thl=",thl(k)," k_m=",k_m(i,k,j) +! PRINT*," xland=",xland(i,j)," rmol=",rmol(i,j)," ust=",ust(i,j) +! PRINT*," qke=",qke(i,k,j)," el=",el_pbl(i,k,j)," tsq=",Tsq(i,k,j) +! PRINT*," PBLH=",PBLH(i,j)," u=",u(i,k,j)," v=",v(i,k,j) +! ENDIF +!*** End debugging + + ENDDO + ENDDO + + ENDIF ! end initflag + +!ACF copy qke_adv array into qke if using advection + IF (bl_mynn_tkeadvect) THEN + qke=qke_adv + ENDIF +!ACF-end + + DO j=JTS,JTF + DO i=ITS,ITF + DO k=KTS,KTF + !JOE-TKE BUDGET + IF ( bl_mynn_tkebudget == 1) THEN + dqke(i,k,j)=qke(i,k,j) + END IF + dz1(k)= dz(i,k,j) + u1(k) = u(i,k,j) + v1(k) = v(i,k,j) + th1(k)= th(i,k,j) + qv1(k)= qv(i,k,j) + qc1(k)= qc(i,k,j) + sqv(k)= qv(i,k,j)/(1.+qv(i,k,j)) + sqc(k)= qc(i,k,j)/(1.+qc(i,k,j)) + IF(PRESENT(qi) .AND. FLAG_QI)THEN + qi1(k)= qi(i,k,j) + sqi(k)= qi(i,k,j)/(1.+qi(i,k,j)) + sqw(k)= sqv(k)+sqc(k)+sqi(k) + thl(k)= th(i,k,j) - xlvcp/exner(i,k,j)*sqc(k) & + & - xlscp/exner(i,k,j)*sqi(k) + !print*,"MYNN: Flag_qi=",FLAG_QI,qi(i,k,j) + ELSE + qi1(k)=0.0 + sqi(k)=0.0 + sqw(k)= sqv(k)+sqc(k) + thl(k)= th(i,k,j)-xlvcp/exner(i,k,j)*sqc(k) + ENDIF + IF (PRESENT(qni) .AND. FLAG_QNI ) THEN + qni1(k)=qni(i,k,j) + !print*,"MYNN: Flag_qni=",FLAG_QNI,qni(i,k,j) + ELSE + qni1(k)=0.0 + ENDIF + !IF (PRESENT(qnc) .AND. FLAG_QNC ) THEN + ! qnc1(k)=qnc(i,k,j) + ! !print*,"MYNN: Flag_qnc=",FLAG_QNC,qnc(i,k,j) + !ELSE + ! qnc1(k)=0.0 + !ENDIF + thetav(k)=th(i,k,j)*(1.+0.608*sqv(k)) + p1(k) = p(i,k,j) + ex1(k)= exner(i,k,j) + el(k) = el_pbl(i,k,j) + qke1(k)=qke(i,k,j) + sh(k) = sh3d(i,k,j) + tsq1(k)=tsq(i,k,j) + qsq1(k)=qsq(i,k,j) + cov1(k)=cov(i,k,j) + + IF (k==kts) THEN + zw(k)=0. + ELSE + zw(k)=zw(k-1)+dz(i,k-1,j) + ENDIF + ENDDO + + zw(kte+1)=zw(kte)+dz(i,kte,j) + + CALL GET_PBLH(KTS,KTE,PBLH(i,j),thetav,& + & Qke1,zw,dz1,xland(i,j),KPBL(i,j)) + + sqcg= 0.0 !JOE, it was: qcg(i,j)/(1.+qcg(i,j)) + cpm=cp*(1.+0.84*qv(i,kts,j)) + exnerg=(ps(i,j)/p1000mb)**rcp + + !----------------------------------------------------- + !ORIGINAL CODE + !flt = hfx(i,j)/( rho(i,kts,j)*cpm ) & + ! +xlvcp*ch(i,j)*(sqc(kts)/exner(i,kts,j) -sqcg/exnerg) + !flq = qfx(i,j)/ rho(i,kts,j) & + ! -ch(i,j)*(sqc(kts) -sqcg ) + !----------------------------------------------------- + ! Katata-added - The deposition velocity of cloud (fog) + ! water is used instead of CH. + flt = hfx(i,j)/( rho(i,kts,j)*cpm ) & + & +xlvcp*vdfg(i,j)*(sqc(kts)/exner(i,kts,j)- sqcg/exnerg) + flq = qfx(i,j)/ rho(i,kts,j) & + & -vdfg(i,j)*(sqc(kts) - sqcg ) + flqv = qfx(i,j)/rho(i,kts,j) + flqc = -vdfg(i,j)*(sqc(kts) - sqcg ) + + zet = 0.5*dz(i,kts,j)*rmol(i,j) + if ( zet >= 0.0 ) then + pmz = 1.0 + (cphm_st-1.0) * zet + phh = 1.0 + cphh_st * zet + else + pmz = 1.0/ (1.0-cphm_unst*zet)**0.25 - zet + phh = 1.0/SQRT(1.0-cphh_unst*zet) + end if + +!!!!! estimate wstar & delta for GRIMS shallow-cu + govrth = g/th1(kts) + sflux = hfx(i,j)/rho(i,kts,j)/cpm + & + qfx(i,j)/rho(i,kts,j)*ep_1*th1(kts) + bfx0 = max(sflux,0.) + wstar3 = (govrth*bfx0*pblh(i,j)) + wstar(i,j) = wstar3**h1 + wm3 = wstar3 + 5.*ust(i,j)**3. + wm2 = wm3**h2 + delb = govrth*d3*pblh(i,j) + delta(i,j) = min(d1*pblh(i,j) + d2*wm2/delb, 100.) +!!!!! end GRIMS + + CALL mym_condensation ( kts,kte,& + &dz1,thl,sqw,p1,ex1, & + &tsq1, qsq1, cov1, & + &Sh,el,bl_mynn_cloudpdf, & !JOE-added for cloud PDF testing (from Kuwano-Yoshida et al. 2010) + &Vt, Vq) + + CALL mym_turbulence ( kts,kte,levflag, & + &dz1, zw, u1, v1, thl, sqc, sqw, & + &qke1, tsq1, qsq1, cov1, & + &vt, vq,& + &rmol(i,j), flt, flq, & + &PBLH(i,j),th1,& !JOE-BouLac mod + &Sh,& !JOE-cloudPDF mod + &el,& + &Dfm,Dfh,Dfq, & + &Tcd,Qcd,Pdk, & + &Pdt,Pdq,Pdc & + &,qWT1,qSHEAR1,qBUOY1,qDISS1 & !JOE-TKE BUDGET + &,bl_mynn_tkebudget & !JOE-TKE BUDGET + &) + + CALL mym_predict (kts,kte,levflag, & + &delt, dz1, & + &ust(i,j), flt, flq, pmz, phh, & + &el, dfq, pdk, pdt, pdq, pdc, & + &Qke1, Tsq1, Qsq1, Cov1) + + CALL mynn_tendencies(kts,kte,& + &levflag,grav_settling,& + &delt, dz1,& + &u1, v1, th1, qv1, qc1, qi1, qni1,&! qnc1,& + &p1, ex1, thl, sqv, sqc, sqi, sqw,& + &ust(i,j),flt,flq,flqv,flqc,wspd(i,j),qcg(i,j),& + &uoce(i,j),voce(i,j),& + &tsq1, qsq1, cov1,& + &tcd, qcd, & + &dfm, dfh, dfq,& + &Du1, Dv1, Dth1, Dqv1, Dqc1, Dqi1, Dqni1& !, Dqnc1& + &,vdfg(i,j)& !JOE/Katata- fog deposition + &,FLAG_QI,FLAG_QNI,FLAG_QC,FLAG_QNC & + &) + + !print*,"MYNN: qi_ten, qni_ten=",Dqi1(4),Dqni1(4) + !print*,"MYNN: qc_ten, qnc_ten=",Dqc1(4),Dqnc1(4) + + CALL retrieve_exchange_coeffs(kts,kte,& + &dfm, dfh, dfq, dz1,& + &K_m1, K_h1, K_q1) + + !UPDATE 3D ARRAYS + DO k=KTS,KTF + K_m(i,k,j)=K_m1(k) + K_h(i,k,j)=K_h1(k) + K_q(i,k,j)=K_q1(k) + du(i,k,j)=du1(k) + dv(i,k,j)=dv1(k) + dth(i,k,j)=dth1(k) + dqv(i,k,j)=dqv1(k) + dqc(i,k,j)=dqc1(k) + IF (PRESENT(qi) .AND. FLAG_QI) dqi(i,k,j)=dqi1(k) + !IF (PRESENT(qnc) .AND. FLAG_QNC) dqnc(i,k,j)=dqnc1(k) + IF (PRESENT(qni) .AND. FLAG_QNI) dqni(i,k,j)=dqni1(k) + el_pbl(i,k,j)=el(k) + qke(i,k,j)=qke1(k) + tsq(i,k,j)=tsq1(k) + qsq(i,k,j)=qsq1(k) + cov(i,k,j)=cov1(k) + sh3d(i,k,j)=sh(k) + IF ( bl_mynn_tkebudget == 1) THEN + dqke(i,k,j) = (qke1(k)-dqke(i,k,j))*0.5 !qke->tke + qWT(i,k,j) = qWT1(k)*delt + qSHEAR(i,k,j)= qSHEAR1(k)*delt + qBUOY(i,k,j) = qBUOY1(k)*delt + qDISS(i,k,j) = qDISS1(k)*delt + ENDIF + !*** Begin debugging +! IF ( sh(k) < 0. .OR. sh(k)> 200. .OR. & +! & qke(i,k,j) < -5. .OR. qke(i,k,j)> 200. .OR. & +! & el_pbl(i,k,j) < 0. .OR. el_pbl(i,k,j)> 2000. .OR. & +! & ABS(vt(k)) > 0.8 .OR. ABS(vq(k)) > 1100. .OR. & +! & k_m(i,k,j) < 0. .OR. k_m(i,k,j)> 2000. .OR. & +! & vdfg(i,j) < 0. .OR. vdfg(i,j)>5. ) THEN +! PRINT*,"SUSPICIOUS VALUES AT: k=",k," sh=",sh(k) +! PRINT*," sqw=",sqw(k)," thl=",thl(k)," k_m=",k_m(i,k,j) +! PRINT*," xland=",xland(i,j)," rmol=",rmol(i,j)," ust=",ust(i,j) +! PRINT*," qke=",qke(i,k,j)," el=",el_pbl(i,k,j)," tsq=",tsq(i,k,j) +! PRINT*," PBLH=",PBLH(i,j)," u=",u(i,k,j)," v=",v(i,k,j) +! PRINT*," vq=",vq(k)," vt=",vt(k)," vdfg=",vdfg(i,j) +! ENDIF + !*** End debugging + ENDDO +!JOE-add tke_pbl for coupling w/shallow-cu schemes (TKE_PBL = QKE/2.) +! TKE_PBL is defined on interfaces, while QKE is at middle of layer. + tke_pbl(i,kts,j) = 0.5*MAX(qke(i,kts,j),1.0e-10) + DO k = kts+1,kte + afk = dz1(k)/( dz1(k)+dz1(k-1) ) + abk = 1.0 -afk + tke_pbl(i,k,j) = 0.5*MAX(qke(i,k,j)*abk+qke(i,k-1,j)*afk,1.0e-3) + ENDDO +!JOE-end tke_pbl +!JOE-end addition + +!*** Begin debugging +! IF(I==IMD .AND. J==JMD)THEN +! k=kdebug +! PRINT*,"MYNN DRIVER END: k=",1," sh=",sh(k) +! PRINT*," sqw=",sqw(k)," thl=",thl(k)," k_m=",k_m(i,k,j) +! PRINT*," xland=",xland(i,j)," rmol=",rmol(i,j)," ust=",ust(i,j) +! PRINT*," qke=",qke(i,k,j)," el=",el_pbl(i,k,j)," tsq=",tsq(i,k,j) +! PRINT*," PBLH=",PBLH(i,j)," u=",u(i,k,j)," v=",v(i,k,j) +! PRINT*," vq=",vq(k)," vt=",vt(k)," vdfg=",vdfg(i,j) +! ENDIF +!*** End debugging + + ENDDO + ENDDO + +!ACF copy qke into qke_adv if using advection + IF (bl_mynn_tkeadvect) THEN + qke_adv=qke + ENDIF +!ACF-end + + END SUBROUTINE mynn_bl_driver + +#if !defined(mpas) +! ================================================================== + SUBROUTINE mynn_bl_init_driver(& + &Du,Dv,Dth,Dqv,Dqc,Dqi & + !&,Dqnc,Dqni & + &,QKE,TKE_PBL,EXCH_H & + &,RESTART,ALLOWED_TO_READ,LEVEL & + &,IDS,IDE,JDS,JDE,KDS,KDE & + &,IMS,IME,JMS,JME,KMS,KME & + &,ITS,ITE,JTS,JTE,KTS,KTE) + + !--------------------------------------------------------------- + LOGICAL,INTENT(IN) :: ALLOWED_TO_READ,RESTART + INTEGER,INTENT(IN) :: LEVEL + + INTEGER,INTENT(IN) :: IDS,IDE,JDS,JDE,KDS,KDE, & + & IMS,IME,JMS,JME,KMS,KME, & + & ITS,ITE,JTS,JTE,KTS,KTE + + + REAL,DIMENSION(IMS:IME,KMS:KME,JMS:JME),INTENT(INOUT) :: & + &Du,Dv,Dth,Dqv,Dqc,Dqi, & !Dqnc,Dqni, + &QKE,TKE_PBL,EXCH_H + + INTEGER :: I,J,K,ITF,JTF,KTF + + JTF=MIN0(JTE,JDE-1) + KTF=MIN0(KTE,KDE-1) + ITF=MIN0(ITE,IDE-1) + + IF(.NOT.RESTART)THEN + DO J=JTS,JTF + DO K=KTS,KTF + DO I=ITS,ITF + Du(i,k,j)=0. + Dv(i,k,j)=0. + Dth(i,k,j)=0. + Dqv(i,k,j)=0. + if( p_qc >= param_first_scalar ) Dqc(i,k,j)=0. + if( p_qi >= param_first_scalar ) Dqi(i,k,j)=0. + !if( p_qnc >= param_first_scalar ) Dqnc(i,k,j)=0. + !if( p_qni >= param_first_scalar ) Dqni(i,k,j)=0. + QKE(i,k,j)=0. + TKE_PBL(i,k,j)=0. + EXCH_H(i,k,j)=0. + ENDDO + ENDDO + ENDDO + ENDIF + + mynn_level=level + + END SUBROUTINE mynn_bl_init_driver + +#endif +! ================================================================== + + SUBROUTINE GET_PBLH(KTS,KTE,zi,thetav1D,qke1D,zw1D,dz1D,landsea,kzi) + + !--------------------------------------------------------------- + ! NOTES ON THE PBLH FORMULATION + ! + !The 1.5-theta-increase method defines PBL heights as the level at + !which the potential temperature first exceeds the minimum potential + !temperature within the boundary layer by 1.5 K. When applied to + !observed temperatures, this method has been shown to produce PBL- + !height estimates that are unbiased relative to profiler-based + !estimates (Nielsen-Gammon et al. 2008). However, their study did not + !include LLJs. Banta and Pichugina (2008) show that a TKE-based + !threshold is a good estimate of the PBL height in LLJs. Therefore, + !a hybrid definition is implemented that uses both methods, weighting + !the TKE-method more during stable conditions (PBLH < 400 m). + !A variable tke threshold (TKEeps) is used since no hard-wired + !value could be found to work best in all conditions. + !--------------------------------------------------------------- + + INTEGER,INTENT(IN) :: KTS,KTE + REAL, INTENT(OUT) :: zi + REAL, INTENT(IN) :: landsea + REAL, DIMENSION(KTS:KTE), INTENT(IN) :: thetav1D, qke1D, dz1D + REAL, DIMENSION(KTS:KTE+1), INTENT(IN) :: zw1D + !LOCAL VARS + REAL :: PBLH_TKE,qtke,qtkem1,wt,maxqke,TKEeps,minthv + REAL :: delt_thv !delta theta-v; dependent on land/sea point + REAL, PARAMETER :: sbl_lim = 200. !typical scale of stable BL (m). + REAL, PARAMETER :: sbl_damp = 400. !transition length for blending (m). + INTEGER :: I,J,K,kthv,ktke,kzi,kzi2 + + !ADD KPBL (kzi) for coupling to some Cu schemes, initialize at k=2 + !kzi2 is the TKE-based part of the hybrid KPBL + kzi = 1 + kzi2= 1 + + !FIND MAX TKE AND MIN THETAV IN THE LOWEST 500 M + k = kts+1 + kthv = 1 + ktke = 1 + maxqke = 0. + minthv = 9.E9 + DO WHILE (zw1D(k) .LE. 500.) + qtke =MAX(Qke1D(k),0.) ! maximum QKE + IF (maxqke < qtke) then + maxqke = qtke + ktke = k + ENDIF + IF (minthv > thetav1D(k)) then + minthv = thetav1D(k) + kthv = k + ENDIF + k = k+1 + ENDDO + !TKEeps = maxtke/20. = maxqke/40. + TKEeps = maxqke/40. + TKEeps = MAX(TKEeps,0.025) + + !FIND THETAV-BASED PBLH (BEST FOR DAYTIME). + zi=0. + k = kthv+1 + IF((landsea-1.5).GE.0)THEN + ! WATER + delt_thv = 0.75 + ELSE + ! LAND + delt_thv = 1.25 + ENDIF + + zi=0. + k = kthv+1 + DO WHILE (zi .EQ. 0.) + IF (thetav1D(k) .GE. (minthv + delt_thv))THEN + kzi = MAX(k-1,1) + zi = zw1D(k) - dz1D(k-1)* & + & MIN((thetav1D(k)-(minthv + delt_thv))/ & + & MAX(thetav1D(k)-thetav1D(k-1),1E-6),1.0) + ENDIF + k = k+1 + IF (k .EQ. kte-1) zi = zw1D(kts+1) !EXIT SAFEGUARD + ENDDO + !print*,"IN GET_PBLH:",thsfc,zi + + !FOR STABLE BOUNDARY LAYERS, USE TKE METHOD TO COMPLEMENT THE + !THETAV-BASED DEFINITION (WHEN THE THETA-V BASED PBLH IS BELOW ~0.5 KM). + !THE TANH WEIGHTING FUNCTION WILL MAKE THE TKE-BASED DEFINITION NEGLIGIBLE + !WHEN THE THETA-V-BASED DEFINITION IS ABOVE ~1 KM. + + PBLH_TKE=0. + k = ktke+1 + DO WHILE (PBLH_TKE .EQ. 0.) + !QKE CAN BE NEGATIVE (IF CKmod == 0)... MAKE TKE NON-NEGATIVE. + qtke =MAX(Qke1D(k)/2.,0.) ! maximum TKE + qtkem1=MAX(Qke1D(k-1)/2.,0.) + IF (qtke .LE. TKEeps) THEN + kzi2 = MAX(k-1,1) + PBLH_TKE = zw1D(k) - dz1D(k-1)* & + & MIN((TKEeps-qtke)/MAX(qtkem1-qtke, 1E-6), 1.0) + !IN CASE OF NEAR ZERO TKE, SET PBLH = LOWEST LEVEL. + PBLH_TKE = MAX(PBLH_TKE,zw1D(kts+1)) + !print *,"PBLH_TKE:",i,j,PBLH_TKE, Qke1D(k)/2., zw1D(kts+1) + ENDIF + k = k+1 + IF (k .EQ. kte-1) PBLH_TKE = zw1D(kts+1) !EXIT SAFEGUARD + ENDDO + + !With TKE advection turned on, the TKE-based PBLH can be very large + !in grid points with convective precipitation (> 8 km!), + !so an artificial limit is imposed to not let PBLH_TKE exceed 4km. + !This has no impact on 98-99% of the domain, but is the simplest patch + !that adequately addresses these extremely large PBLHs. + !PBLH_TKE = MIN(PBLH_TKE,4000.) + PBLH_TKE = MIN(PBLH_TKE,zi+500.) + + !BLEND THE TWO PBLH TYPES HERE: + wt=.5*TANH((zi - sbl_lim)/sbl_damp) + .5 + zi=PBLH_TKE*(1.-wt) + zi*wt + + !ADD KPBL (kzi) for coupling to some Cu schemes + kzi = INT(kzi2*(1.-wt) + kzi*wt) + + END SUBROUTINE GET_PBLH + +! ================================================================== + +END MODULE module_bl_mynn diff --git a/src/core_atmosphere/physics/physics_wrf/module_bl_ysu.F b/src/core_atmosphere/physics/physics_wrf/module_bl_ysu.F index 1e575ee394..9061651398 100644 --- a/src/core_atmosphere/physics/physics_wrf/module_bl_ysu.F +++ b/src/core_atmosphere/physics/physics_wrf/module_bl_ysu.F @@ -1,3 +1,13 @@ +!================================================================================================================= +!module_bl_ysu.F was originally copied from ./phys/module_bl_ysu.F from WRF version 3.8.1. +!Laura D. Fowler (laura@ucar.edu) / 2016-10-26. + +!modifications to sourcecode for MPAS: +! * calculated the dry hydrostatic pressure using the dry air density. +! * added outputs of the vertical diffusivity coefficients. +! Laura D. Fowler (laura@ucar.edu) / 2016-10-26. + +!================================================================================================================= !WRF:model_layer:physics ! ! @@ -10,7 +20,7 @@ module module_bl_ysu contains ! ! -!------------------------------------------------------------------- +!------------------------------------------------------------------------------- ! subroutine ysu(u3d,v3d,th3d,t3d,qv3d,qc3d,qi3d,p3d,p3di,pi3d, & rublten,rvblten,rthblten, & @@ -22,7 +32,10 @@ subroutine ysu(u3d,v3d,th3d,t3d,qv3d,qc3d,qi3d,p3d,p3di,pi3d, & xland,hfx,qfx,wspd,br, & dt,kpbl2d, & exch_h, & + wstar,delta, & u10,v10, & + uoce,voce, & + rthraten,ysu_topdown_pblmix, & ctopo,ctopo2, & ids,ide, jds,jde, kds,kde, & ims,ime, jms,jme, kms,kme, & @@ -34,12 +47,12 @@ subroutine ysu(u3d,v3d,th3d,t3d,qv3d,qc3d,qi3d,p3d,p3di,pi3d, & ,rho,kzhout,kzmout,kzqout & #endif ) -!------------------------------------------------------------------- - implicit none -!------------------------------------------------------------------- +!------------------------------------------------------------------------------- + implicit none +!------------------------------------------------------------------------------- !-- u3d 3d u-velocity interpolated to theta points (m/s) !-- v3d 3d v-velocity interpolated to theta points (m/s) -!-- th3d 3d potential temperature (k) +!-- th3d 3d potential temperature (k) !-- t3d temperature (k) !-- qv3d 3d water vapor mixing ratio (kg/kg) !-- qc3d 3d cloud mixing ratio (kg/kg) @@ -47,8 +60,8 @@ subroutine ysu(u3d,v3d,th3d,t3d,qv3d,qc3d,qi3d,p3d,p3di,pi3d, & ! (note: if P_QI increase pbl height when sfc is stable (hong 2010) +! ==> increase pbl height when sfc is stable (hong 2010) ! pressure-level diffusion, april 2009 -! ==> negligible differences +! ==> negligible differences ! implicit forcing for momentum with clean up, july 2009 -! ==> prevents model blowup when sfc layer is too low +! ==> prevents model blowup when sfc layer is too low ! incresea of lamda, maximum (30, 0.1 x del z) feb 2010 -! ==> prevents model blowup when delz is extremely large +! ==> prevents model blowup when delz is extremely large ! revised prandtl number at surface, peggy lemone, feb 2010 -! ==> increase kh, decrease mixing due to counter-gradient term +! ==> increase kh, decrease mixing due to counter-gradient term ! revised thermal, shin et al. mon. wea. rev. , songyou hong, aug 2011 -! ==> reduce the thermal strength when z1 < 0.1 h +! ==> reduce the thermal strength when z1 < 0.1 h ! revised prandtl number for free convection, dudhia, mar 2012 -! ==> pr0 = 1 + bke (=0.272) when newtral, kh is reduced +! ==> pr0 = 1 + bke (=0.272) when newtral, kh is reduced ! minimum kzo = 0.01, lo = min (30m,delz), hong, mar 2012 -! ==> weaker mixing when stable, and les resolution in vertical +! ==> weaker mixing when stable, and les resolution in vertical ! gz1oz0 is removed, and phim phih are ln(z1/z0)-phim,h, hong, mar 2012 -! ==> consider thermal z0 when differs from mechanical z0 +! ==> consider thermal z0 when differs from mechanical z0 ! a bug fix in wscale computation in stable bl, sukanta basu, jun 2012 -! ==> wscale becomes small with height, and less mixing in stable bl -! ==> ri = max(ri,rimin). limits the richardson number to -100 in -! unstable layers, following Hong et al. 2006. -! Laura D. Fowler (2013-04-18). +! ==> wscale becomes small with height, and less mixing in stable bl +! revision in background diffusion (kzo), jan 2016 +! ==> kzo = 0.1 for momentum and = 0.01 for mass to account for +! internal wave mixing of large et al. (1994), songyou hong, feb 2016 +! ==> alleviate superious excessive mixing when delz is large ! ! references: ! ! hong (2010) quart. j. roy. met. soc -! hong, noh, and dudhia (2006), mon. wea. rev. +! hong, noh, and dudhia (2006), mon. wea. rev. ! hong and pan (1996), mon. wea. rev. ! noh, chun, hong, and raasch (2003), boundary layer met. ! troen and mahrt (1986), boundary layer met. ! -!------------------------------------------------------------------- +!------------------------------------------------------------------------------- ! + real,parameter :: xkzminm = 0.1,xkzminh = 0.01 real,parameter :: xkzmin = 0.01,xkzmax = 1000.,rimin = -100. -! real,parameter :: rlam = 150.,prmin = 0.25,prmax = 4. real,parameter :: rlam = 30.,prmin = 0.25,prmax = 4. real,parameter :: brcr_ub = 0.0,brcr_sb = 0.25,cori = 1.e-4 real,parameter :: afac = 6.8,bfac = 6.8,pfac = 2.0,pfac_q = 2.0 real,parameter :: phifac = 8.,sfcfrac = 0.1 real,parameter :: d1 = 0.02, d2 = 0.05, d3 = 0.001 real,parameter :: h1 = 0.33333335, h2 = 0.6666667 - real,parameter :: ckz = 0.001,zfmin = 1.e-8,aphi5 = 5.,aphi16 = 16. + real,parameter :: zfmin = 1.e-8,aphi5 = 5.,aphi16 = 16. real,parameter :: tmin=1.e-2 real,parameter :: gamcrt = 3.,gamcrq = 2.e-3 real,parameter :: xka = 2.4e-5 @@ -412,6 +451,8 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & ims,ime, jms,jme, kms,kme, & its,ite, jts,jte, kts,kte, & j,ndiff + + integer, intent(in) :: ysu_topdown_pblmix ! real, intent(in ) :: dt,rcl,cp,g,rovcp,rovg,rd,xlv,rv ! @@ -419,7 +460,8 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & ! real, dimension( ims:ime, kms:kme ), & intent(in) :: dz8w2d, & - pi2d + pi2d, & + p2diorg ! real, dimension( ims:ime, kms:kme ) , & intent(in ) :: tx @@ -438,7 +480,6 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & ! real, dimension( its:ite, kts:kte ) , & intent(in ) :: p2d -! ! real, dimension( ims:ime ) , & intent(inout) :: ust, & @@ -454,14 +495,14 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & ! real, dimension( ims:ime ), intent(in ) :: psim, & psih - ! real, dimension( ims:ime ), intent(in ) :: psfcpa integer, dimension( ims:ime ), intent(out ) :: kpbl1d ! real, dimension( ims:ime, kms:kme ) , & intent(in ) :: ux, & - vx + vx, & + rthraten real, dimension( ims:ime ) , & optional , & intent(in ) :: ctopo, & @@ -476,11 +517,12 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & real, dimension( its:ite, kts:kte+1 ) :: zq ! real, dimension( its:ite, kts:kte ) :: & - thx,thvx, & + thx,thvx,thlix, & del, & dza, & dzq, & - xkzo, & + xkzom, & + xkzoh, & za ! real, dimension( its:ite ) :: & @@ -494,7 +536,7 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & dusfc,dvsfc, & dtsfc,dqsfc, & prpbl, & - wspd1 + wspd1,thermalli ! real, dimension( its:ite, kts:kte ) :: xkzm,xkzh, & f1,f2, & @@ -503,28 +545,38 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & cu, & al, & xkzq, & - zfac + zfac, & + rhox2, & + hgamt2 ! !jdf added exch_hx +! real, dimension( ims:ime, kms:kme ) , & intent(inout) :: exch_hx ! real, dimension( ims:ime ) , & intent(inout) :: u10, & v10 + real, dimension( ims:ime ) , & + intent(in ) :: uox, & + vox real, dimension( its:ite ) :: & brcr, & sflux, & + zol1, & brcr_sbro ! real, dimension( its:ite, kts:kte, ndiff) :: r3,f3 - integer, dimension( its:ite ) :: kpbl + integer, dimension( its:ite ) :: kpbl,kpblold ! logical, dimension( its:ite ) :: pblflg, & sfcflg, & - stable + stable, & + cloudflg + + logical :: definebrup ! - integer :: n,i,k,l,ic,is + integer :: n,i,k,l,ic,is,kk integer :: klpbl, ktrace1, ktrace2, ktrace3 ! ! @@ -533,32 +585,36 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & real :: brint,dtodsd,dtodsu,rdz,dsdzt,dsdzq,dsdz2,rlamdz real :: utend,vtend,ttend,qtend real :: dtstep,govrthv - real :: cont, conq, conw, conwrc + real :: cont, conq, conw, conwrc ! - real, dimension( its:ite, kts:kte ) :: wscalek - real, dimension( its:ite ) :: delta + + real, dimension( its:ite, kts:kte ) :: wscalek,wscalek2 + real, dimension( ims:ime ) :: wstar + real, dimension( ims:ime ) :: delta real, dimension( its:ite, kts:kte ) :: xkzml,xkzhl, & zfacent,entfac real, dimension( its:ite ) :: ust3, & - wstar3,wstar, & + wstar3, & + wstar3_2, & hgamu,hgamv, & wm2, we, & bfxpbl, & hfxpbl,qfxpbl, & ufxpbl,vfxpbl, & - dthvx, & - zol1 + dthvx real :: prnumfac,bfx0,hfx0,qfx0,delb,dux,dvx, & dsdzu,dsdzv,wm3,dthx,dqx,wspd10,ross,tem1,dsig,tvcon,conpr, & - prfac,prfac2,phim8z + prfac,prfac2,phim8z,radsum,tmp1,templ,rvls,temps,ent_eff, & + rcldb,bruptmp,radflux ! #if defined(mpas) -!MPAS specific begin (Laura Fowler - 2013-03-01): +!MPAS specific begin: real,intent(out),dimension(ims:ime,kms:kme),optional::kzh,kzm,kzq !MPAS specific end. #endif -!---------------------------------------------------------------------- +! +!------------------------------------------------------------------------------- ! klpbl = kte ! @@ -577,6 +633,7 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & do k = kts,kte do i = its,ite thx(i,k) = tx(i,k)/pi2d(i,k) + thlix(i,k) = (tx(i,k)-xlv*qx(i,ktrace2+k)/cp-2.834E6*qx(i,ktrace3+k)/cp)/pi2d(i,k) enddo enddo ! @@ -603,9 +660,11 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & do k = kts,kte do i = its,ite zq(i,k+1) = dz8w2d(i,k)+zq(i,k) + tvcon = (1.+ep1*qx(i,k)) + rhox2(i,k) = p2d(i,k)/(rd*tx(i,k)*tvcon) enddo enddo -! +! do k = kts,kte do i = its,ite za(i,k) = 0.5*(zq(i,k)+zq(i,k+1)) @@ -633,12 +692,12 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & qtnp(its:ite,:) = 0. ! do i = its,ite - wspd1(i) = sqrt(ux(i,1)*ux(i,1)+vx(i,1)*vx(i,1))+1.e-9 + wspd1(i) = sqrt( (ux(i,1)-uox(i))*(ux(i,1)-uox(i)) + (vx(i,1)-vox(i))*(vx(i,1)-vox(i)) )+1.e-9 enddo ! !---- compute vertical diffusion ! -! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ! compute preliminary variables ! dtstep = dt @@ -654,10 +713,10 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & hgamu(i) = 0.0 hgamv(i) = 0.0 delta(i) = 0.0 + wstar3_2(i) = 0.0 enddo ! -!MPAS specific begin (Laura Fowler - 2013-03-01): Added initialization of local -!vertical diffusion coefficients: +!MPAS specific begin: Added initialization of local vertical diffusion coefficients: if(present(kzh) .and. present(kzm) .and. present(kzq)) then do k = kts,kte do i = its,ite @@ -673,6 +732,7 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & do k = kts,klpbl do i = its,ite wscalek(i,k) = 0.0 + wscalek2(i,k) = 0.0 enddo enddo ! @@ -683,7 +743,8 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & enddo do k = kts,klpbl-1 do i = its,ite - xkzo(i,k) = ckz*dza(i,k+1) + xkzom(i,k) = xkzminm + xkzoh(i,k) = xkzminh enddo enddo ! @@ -702,6 +763,7 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & hpbl(i) = zq(i,1) zl1(i) = za(i,1) thermal(i)= thvx(i,1) + thermalli(i) = thlix(i,1) pblflg(i) = .true. sfcflg(i) = .true. sflux(i) = hfx(i)/rhox(i)/cp + qfx(i)/rhox(i)*ep1*thx(i,1) @@ -782,6 +844,7 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & hgamq(i) = min(gamfac*qfx(i),gamcrq) vpert = (hgamt(i)+ep1*thx(i,1)*hgamq(i))/bfac*afac thermal(i) = thermal(i)+max(vpert,0.)*min(za(i,1)/(sfcfrac*hpbl(i)),1.0) + thermalli(i)= thermalli(i)+max(vpert,0.)*min(za(i,1)/(sfcfrac*hpbl(i)),1.0) hgamt(i) = max(hgamt(i),0.0) hgamq(i) = max(hgamq(i),0.0) brint = -15.9*ust(i)*ust(i)/wspd(i)*wstar3(i)/(wscale(i)**4.) @@ -821,6 +884,30 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & enddo enddo ! +! enhance pbl by theta-li +! + if (ysu_topdown_pblmix.eq.1)then + do i = its,ite + kpblold(i) = kpbl(i) + definebrup=.false. + do k = kpblold(i), kte-1 + spdk2 = max(ux(i,k)**2+vx(i,k)**2,1.) + bruptmp = (thlix(i,k)-thermalli(i))*(g*za(i,k)/thlix(i,1))/spdk2 + stable(i) = bruptmp.ge.brcr(i) + if (definebrup) then + kpbl(i) = k + brup(i) = bruptmp + definebrup=.false. + endif + if (.not.stable(i)) then !overwrite brup brdn values + brdn(i)=bruptmp + definebrup=.true. + pblflg(i)=.true. + endif + enddo + enddo + endif + do i = its,ite if(pblflg(i)) then k = kpbl(i) @@ -866,6 +953,7 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & endif endif enddo +! do k = 2,klpbl do i = its,ite if(.not.stable(i))then @@ -897,16 +985,78 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & ! estimate the entrainment parameters ! do i = its,ite + cloudflg(i)=.false. if(pblflg(i)) then k = kpbl(i) - 1 - prpbl(i) = 1.0 wm3 = wstar3(i) + 5. * ust3(i) wm2(i) = wm3**h2 bfxpbl(i) = -0.15*thvx(i,1)/g*wm3/hpbl(i) dthvx(i) = max(thvx(i,k+1)-thvx(i,k),tmin) + we(i) = max(bfxpbl(i)/dthvx(i),-sqrt(wm2(i))) + if((qx(i,ktrace2+k)+qx(i,ktrace3+k)).gt.0.01e-3.and.ysu_topdown_pblmix.eq.1)then + if ( kpbl(i) .ge. 2) then + cloudflg(i)=.true. + templ=thlix(i,k)*(p2di(i,k+1)/100000)**rovcp + !rvls is ws at full level + rvls=100.*6.112*EXP(17.67*(templ-273.16)/(templ-29.65))*(ep2/p2di(i,k+1)) + temps=templ + ((qx(i,k)+qx(i,ktrace2+k))-rvls)/(cp/xlv + & + ep2*xlv*rvls/(rd*templ**2)) + rvls=100.*6.112*EXP(17.67*(temps-273.15)/(temps-29.65))*(ep2/p2di(i,k+1)) + rcldb=max((qx(i,k)+qx(i,ktrace2+k))-rvls,0.) + !entrainment efficiency + dthvx(i) = (thlix(i,k+2)+thx(i,k+2)*ep1*(qx(i,k+2)+qx(i,ktrace2+k+2))) & + - (thlix(i,k) + thx(i,k) *ep1*(qx(i,k) +qx(i,ktrace2+k))) + dthvx(i) = max(dthvx(i),0.1) + tmp1 = xlv/cp * rcldb/(pi2d(i,k)*dthvx(i)) + ent_eff = 0.2 * 8. * tmp1 +0.2 + + radsum=0. + do kk = 1,kpbl(i)-1 + radflux=rthraten(i,kk)*pi2d(i,kk) !converts theta/s to temp/s + radflux=radflux*cp/g*(p2diORG(i,kk)-p2diORG(i,kk+1)) ! converts temp/s to W/m^2 + if (radflux < 0.0 ) radsum=abs(radflux)+radsum + enddo + radsum=max(radsum,0.0) + + !recompute entrainment from sfc thermals + bfx0 = max(max(sflux(i),0.0)-radsum/rhox2(i,k)/cp,0.) + bfx0 = max(sflux(i),0.0) + wm3 = (govrth(i)*bfx0*hpbl(i))+5. * ust3(i) + wm2(i) = wm3**h2 + bfxpbl(i) = -0.15*thvx(i,1)/g*wm3/hpbl(i) + dthvx(i) = max(thvx(i,k+1)-thvx(i,k),tmin) + we(i) = max(bfxpbl(i)/dthvx(i),-sqrt(wm2(i))) + + !entrainment from PBL top thermals + bfx0 = max(radsum/rhox2(i,k)/cp-max(sflux(i),0.0),0.) + bfx0 = max(radsum/rhox2(i,k)/cp,0.) + wm3 = (g/thvx(i,k)*bfx0*hpbl(i)) ! this is wstar3(i) + wm2(i) = wm2(i)+wm3**h2 + bfxpbl(i) = - ent_eff * bfx0 + dthvx(i) = max(thvx(i,k+1)-thvx(i,k),0.1) + we(i) = we(i) + max(bfxpbl(i)/dthvx(i),-sqrt(wm3**h2)) + + !wstar3_2 + bfx0 = max(radsum/rhox2(i,k)/cp,0.) + wstar3_2(i) = (g/thvx(i,k)*bfx0*hpbl(i)) + !recompute hgamt + wscale(i) = (ust3(i)+phifac*karman*(wstar3(i)+wstar3_2(i))*0.5)**h1 + wscale(i) = min(wscale(i),ust(i)*aphi16) + wscale(i) = max(wscale(i),ust(i)/aphi5) + gamfac = bfac/rhox(i)/wscale(i) + hgamt(i) = min(gamfac*hfx(i)/cp,gamcrt) + hgamq(i) = min(gamfac*qfx(i),gamcrq) + gamfac = bfac/rhox2(i,k)/wscale(i) + hgamt2(i,k) = min(gamfac*radsum/cp,gamcrt) + hgamt(i) = max(hgamt(i),0.0) + max(hgamt2(i,k),0.0) + brint = -15.9*ust(i)*ust(i)/wspd(i)*(wstar3(i)+wstar3_2(i))/(wscale(i)**4.) + hgamu(i) = brint*ux(i,1) + hgamv(i) = brint*vx(i,1) + endif + endif + prpbl(i) = 1.0 dthx = max(thx(i,k+1)-thx(i,k),tmin) dqx = min(qx(i,k+1)-qx(i,k),0.0) - we(i) = max(bfxpbl(i)/dthvx(i),-sqrt(wm2(i))) hfxpbl(i) = we(i)*dthx qfxpbl(i) = we(i)*dqx ! @@ -949,9 +1099,10 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & zfac(i,k) = min(max((1.-(zq(i,k+1)-zl1(i))/(hpbl(i)-zl1(i))),zfmin),1.) zfacent(i,k) = (1.-zfac(i,k))**3. wscalek(i,k) = (ust3(i)+phifac*karman*wstar3(i)*(1.-zfac(i,k)))**h1 - if(sfcflg(i)) then + wscalek2(i,k) = (phifac*karman*wstar3_2(i)*(zfac(i,k)))**h1 + if(sfcflg(i)) then prfac = conpr - prfac2 = 15.9*wstar3(i)/ust3(i)/(1.+4.*karman*wstar3(i)/ust3(i)) + prfac2 = 15.9*(wstar3(i)+wstar3_2(i))/ust3(i)/(1.+4.*karman*(wstar3(i)+wstar3_2(i))/ust3(i)) prnumfac = -3.*(max(zq(i,k+1)-sfcfrac*hpbl(i),0.))**2./hpbl(i)**2. else prfac = 0. @@ -963,18 +1114,23 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & endif prnum0 = (phih(i)/phim(i)+prfac) prnum0 = max(min(prnum0,prmax),prmin) - xkzm(i,k) = wscalek(i,k)*karman*zq(i,k+1)*zfac(i,k)**pfac + xkzm(i,k) = wscalek(i,k) *karman* zq(i,k+1) * zfac(i,k)**pfac+ & + wscalek2(i,k)*karman*(hpbl(i)-zq(i,k+1))*(1-zfac(i,k))**pfac + !Do not include xkzm at kpbl-1 since it changes entrainment + if (k.eq.kpbl(i)-1.and.cloudflg(i).and.we(i).lt.0.0) then + xkzm(i,k) = 0.0 + endif prnum = 1. + (prnum0-1.)*exp(prnumfac) xkzq(i,k) = xkzm(i,k)/prnum*zfac(i,k)**(pfac_q-pfac) prnum0 = prnum0/(1.+prfac2*karman*sfcfrac) prnum = 1. + (prnum0-1.)*exp(prnumfac) xkzh(i,k) = xkzm(i,k)/prnum + xkzm(i,k) = xkzm(i,k)+xkzom(i,k) + xkzh(i,k) = xkzh(i,k)+xkzoh(i,k) + xkzq(i,k) = xkzq(i,k)+xkzoh(i,k) xkzm(i,k) = min(xkzm(i,k),xkzmax) - xkzm(i,k) = max(xkzm(i,k),xkzo(i,k)) xkzh(i,k) = min(xkzh(i,k),xkzmax) - xkzh(i,k) = max(xkzh(i,k),xkzo(i,k)) xkzq(i,k) = min(xkzq(i,k),xkzmax) - xkzq(i,k) = max(xkzq(i,k),xkzo(i,k)) endif enddo enddo @@ -1005,9 +1161,9 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & rlamdz = min(dza(i,k+1),rlamdz) rl2 = (zk*rlamdz/(rlamdz+zk))**2 dk = rl2*sqrt(ss) - ri = max(ri,rimin) if(ri.lt.0.)then ! unstable regime + ri = max(ri, rimin) sri = sqrt(-ri) xkzm(i,k) = dk*(1+8.*(-ri)/(1+1.746*sri)) xkzh(i,k) = dk*(1+8.*(-ri)/(1+1.286*sri)) @@ -1019,10 +1175,10 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & xkzm(i,k) = xkzh(i,k)*prnum endif ! + xkzm(i,k) = xkzm(i,k)+xkzom(i,k) + xkzh(i,k) = xkzh(i,k)+xkzoh(i,k) xkzm(i,k) = min(xkzm(i,k),xkzmax) - xkzm(i,k) = max(xkzm(i,k),xkzo(i,k)) xkzh(i,k) = min(xkzh(i,k),xkzmax) - xkzh(i,k) = max(xkzh(i,k),xkzo(i,k)) xkzml(i,k) = xkzm(i,k) xkzhl(i,k) = xkzh(i,k) endif @@ -1059,8 +1215,8 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & elseif(pblflg(i).and.k.ge.kpbl(i).and.entfac(i,k).lt.4.6) then xkzh(i,k) = -we(i)*dza(i,kpbl(i))*exp(-entfac(i,k)) xkzh(i,k) = sqrt(xkzh(i,k)*xkzhl(i,k)) + xkzh(i,k) = max(xkzh(i,k),xkzoh(i,k)) xkzh(i,k) = min(xkzh(i,k),xkzmax) - xkzh(i,k) = max(xkzh(i,k),xkzo(i,k)) f1(i,k+1) = thx(i,k+1)-300. else f1(i,k+1) = thx(i,k+1)-300. @@ -1128,7 +1284,7 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & enddo endif ! - do k = kts,kte + do k = kts,kte-1 do i = its,ite if(k.ge.kpbl(i)) then xkzq(i,k) = xkzh(i,k) @@ -1150,8 +1306,8 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & elseif(pblflg(i).and.k.ge.kpbl(i).and.entfac(i,k).lt.4.6) then xkzq(i,k) = -we(i)*dza(i,kpbl(i))*exp(-entfac(i,k)) xkzq(i,k) = sqrt(xkzq(i,k)*xkzhl(i,k)) + xkzq(i,k) = max(xkzq(i,k),xkzoh(i,k)) xkzq(i,k) = min(xkzq(i,k),xkzmax) - xkzq(i,k) = max(xkzq(i,k),xkzo(i,k)) f3(i,k+1,1) = qx(i,k+1) else f3(i,k+1,1) = qx(i,k+1) @@ -1233,16 +1389,16 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & ! do i = its,ite ! paj: ctopo=1 if topo_wind=0 (default) -! mchen add this line to make sure NMM can still work with YSU PBL - if(present(ctopo)) then +! mchen add this line to make sure NMM can still work with YSU PBL + if(present(ctopo)) then ad(i,1) = 1.+ctopo(i)*ust(i)**2/wspd1(i)*rhox(i)*g/del(i,1)*dt2 & *(wspd1(i)/wspd(i))**2 - else + else ad(i,1) = 1.+ust(i)**2/wspd1(i)*rhox(i)*g/del(i,1)*dt2 & *(wspd1(i)/wspd(i))**2 - endif - f1(i,1) = ux(i,1) - f2(i,1) = vx(i,1) + endif + f1(i,1) = ux(i,1)+uox(i)*ust(i)**2*g/del(i,1)*dt2/wspd1(i) + f2(i,1) = vx(i,1)+vox(i)*ust(i)**2*g/del(i,1)*dt2/wspd1(i) enddo ! do k = kts,kte-1 @@ -1252,24 +1408,24 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & dsig = p2d(i,k)-p2d(i,k+1) rdz = 1./dza(i,k+1) tem1 = dsig*xkzm(i,k)*rdz - if(pblflg(i).and.k.lt.kpbl(i))then - dsdzu = tem1*(-hgamu(i)/hpbl(i)-ufxpbl(i)*zfacent(i,k)/xkzm(i,k)) - dsdzv = tem1*(-hgamv(i)/hpbl(i)-vfxpbl(i)*zfacent(i,k)/xkzm(i,k)) - f1(i,k) = f1(i,k)+dtodsd*dsdzu - f1(i,k+1) = ux(i,k+1)-dtodsu*dsdzu - f2(i,k) = f2(i,k)+dtodsd*dsdzv - f2(i,k+1) = vx(i,k+1)-dtodsu*dsdzv - elseif(pblflg(i).and.k.ge.kpbl(i).and.entfac(i,k).lt.4.6) then - xkzm(i,k) = prpbl(i)*xkzh(i,k) - xkzm(i,k) = sqrt(xkzm(i,k)*xkzml(i,k)) - xkzm(i,k) = min(xkzm(i,k),xkzmax) - xkzm(i,k) = max(xkzm(i,k),xkzo(i,k)) - f1(i,k+1) = ux(i,k+1) - f2(i,k+1) = vx(i,k+1) - else - f1(i,k+1) = ux(i,k+1) - f2(i,k+1) = vx(i,k+1) - endif + if(pblflg(i).and.k.lt.kpbl(i))then + dsdzu = tem1*(-hgamu(i)/hpbl(i)-ufxpbl(i)*zfacent(i,k)/xkzm(i,k)) + dsdzv = tem1*(-hgamv(i)/hpbl(i)-vfxpbl(i)*zfacent(i,k)/xkzm(i,k)) + f1(i,k) = f1(i,k)+dtodsd*dsdzu + f1(i,k+1) = ux(i,k+1)-dtodsu*dsdzu + f2(i,k) = f2(i,k)+dtodsd*dsdzv + f2(i,k+1) = vx(i,k+1)-dtodsu*dsdzv + elseif(pblflg(i).and.k.ge.kpbl(i).and.entfac(i,k).lt.4.6) then + xkzm(i,k) = prpbl(i)*xkzh(i,k) + xkzm(i,k) = sqrt(xkzm(i,k)*xkzml(i,k)) + xkzm(i,k) = max(xkzm(i,k),xkzom(i,k)) + xkzm(i,k) = min(xkzm(i,k),xkzmax) + f1(i,k+1) = ux(i,k+1) + f2(i,k+1) = vx(i,k+1) + else + f1(i,k+1) = ux(i,k+1) + f2(i,k+1) = vx(i,k+1) + endif tem1 = dsig*xkzm(i,k)*rdz dsdz2 = tem1*rdz au(i,k) = -dtodsd*dsdz2 @@ -1309,10 +1465,10 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & ! paj: ctopo2=1 if topo_wind=0 (default) ! do i = its,ite - if(present(ctopo).and.present(ctopo2)) then ! mchen for NMM - u10(i) = ctopo2(i)*u10(i)+(1-ctopo2(i))*ux(i,1) - v10(i) = ctopo2(i)*v10(i)+(1-ctopo2(i))*vx(i,1) - endif !mchen + if(present(ctopo).and.present(ctopo2)) then ! mchen for NMM + u10(i) = ctopo2(i)*u10(i)+(1-ctopo2(i))*ux(i,1) + v10(i) = ctopo2(i)*v10(i)+(1-ctopo2(i))*vx(i,1) + endif !mchen enddo ! !---- end of vertical diffusion @@ -1321,7 +1477,7 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & kpbl1d(i) = kpbl(i) enddo ! -!MPAS specific begin (Laura D. Fowler - 2013-03-01):: +!MPAS specific begin: if(present(kzh) .and. present(kzm) .and. present(kzq)) then do i = its,ite do k = kts,kte @@ -1332,13 +1488,15 @@ subroutine ysu2d(j,ux,vx,tx,qx,p2d,p2di,pi2d, & enddo endif !MPAS specific end. - +! end subroutine ysu2d +!------------------------------------------------------------------------------- ! +!------------------------------------------------------------------------------- subroutine tridi1n(cl,cm,cu,r1,r2,au,f1,f2,its,ite,kts,kte,nt) -!---------------------------------------------------------------- +!------------------------------------------------------------------------------- implicit none -!---------------------------------------------------------------- +!------------------------------------------------------------------------------- ! integer, intent(in ) :: its,ite, kts,kte, nt ! @@ -1347,21 +1505,21 @@ subroutine tridi1n(cl,cm,cu,r1,r2,au,f1,f2,its,ite,kts,kte,nt) ! real, dimension( its:ite, kts:kte ) , & intent(in ) :: cm, & - r1 + r1 real, dimension( its:ite, kts:kte,nt ) , & intent(in ) :: r2 ! real, dimension( its:ite, kts:kte ) , & intent(inout) :: au, & - cu, & - f1 + cu, & + f1 real, dimension( its:ite, kts:kte,nt ) , & intent(inout) :: f2 ! real :: fk integer :: i,k,l,n,it ! -!---------------------------------------------------------------- +!------------------------------------------------------------------------------- ! l = ite n = kte @@ -1371,12 +1529,14 @@ subroutine tridi1n(cl,cm,cu,r1,r2,au,f1,f2,its,ite,kts,kte,nt) au(i,1) = fk*cu(i,1) f1(i,1) = fk*r1(i,1) enddo +! do it = 1,nt do i = its,l fk = 1./cm(i,1) f2(i,1,it) = fk*r2(i,1,it) enddo enddo +! do k = kts+1,n-1 do i = its,l fk = 1./(cm(i,k)-cl(i,k)*au(i,k-1)) @@ -1384,43 +1544,50 @@ subroutine tridi1n(cl,cm,cu,r1,r2,au,f1,f2,its,ite,kts,kte,nt) f1(i,k) = fk*(r1(i,k)-cl(i,k)*f1(i,k-1)) enddo enddo +! do it = 1,nt - do k = kts+1,n-1 - do i = its,l - fk = 1./(cm(i,k)-cl(i,k)*au(i,k-1)) - f2(i,k,it) = fk*(r2(i,k,it)-cl(i,k)*f2(i,k-1,it)) + do k = kts+1,n-1 + do i = its,l + fk = 1./(cm(i,k)-cl(i,k)*au(i,k-1)) + f2(i,k,it) = fk*(r2(i,k,it)-cl(i,k)*f2(i,k-1,it)) + enddo enddo enddo - enddo +! do i = its,l fk = 1./(cm(i,n)-cl(i,n)*au(i,n-1)) f1(i,n) = fk*(r1(i,n)-cl(i,n)*f1(i,n-1)) enddo +! do it = 1,nt - do i = its,l - fk = 1./(cm(i,n)-cl(i,n)*au(i,n-1)) - f2(i,n,it) = fk*(r2(i,n,it)-cl(i,n)*f2(i,n-1,it)) - enddo + do i = its,l + fk = 1./(cm(i,n)-cl(i,n)*au(i,n-1)) + f2(i,n,it) = fk*(r2(i,n,it)-cl(i,n)*f2(i,n-1,it)) + enddo enddo +! do k = n-1,kts,-1 do i = its,l f1(i,k) = f1(i,k)-au(i,k)*f1(i,k+1) enddo enddo +! do it = 1,nt - do k = n-1,kts,-1 - do i = its,l - f2(i,k,it) = f2(i,k,it)-au(i,k)*f2(i,k+1,it) + do k = n-1,kts,-1 + do i = its,l + f2(i,k,it) = f2(i,k,it)-au(i,k)*f2(i,k+1,it) + enddo enddo enddo - enddo ! end subroutine tridi1n +!------------------------------------------------------------------------------- ! +!------------------------------------------------------------------------------- subroutine tridin_ysu(cl,cm,cu,r2,au,f2,its,ite,kts,kte,nt) -!---------------------------------------------------------------- +!------------------------------------------------------------------------------- implicit none -!---------------------------------------------------------------- +!------------------------------------------------------------------------------- ! integer, intent(in ) :: its,ite, kts,kte, nt ! @@ -1434,14 +1601,14 @@ subroutine tridin_ysu(cl,cm,cu,r2,au,f2,its,ite,kts,kte,nt) ! real, dimension( its:ite, kts:kte ) , & intent(inout) :: au, & - cu + cu real, dimension( its:ite, kts:kte,nt ) , & intent(inout) :: f2 ! real :: fk integer :: i,k,l,n,it ! -!---------------------------------------------------------------- +!------------------------------------------------------------------------------- ! l = ite n = kte @@ -1453,40 +1620,45 @@ subroutine tridin_ysu(cl,cm,cu,r2,au,f2,its,ite,kts,kte,nt) f2(i,1,it) = fk*r2(i,1,it) enddo enddo +! do it = 1,nt - do k = kts+1,n-1 - do i = its,l - fk = 1./(cm(i,k)-cl(i,k)*au(i,k-1)) - au(i,k) = fk*cu(i,k) - f2(i,k,it) = fk*(r2(i,k,it)-cl(i,k)*f2(i,k-1,it)) + do k = kts+1,n-1 + do i = its,l + fk = 1./(cm(i,k)-cl(i,k)*au(i,k-1)) + au(i,k) = fk*cu(i,k) + f2(i,k,it) = fk*(r2(i,k,it)-cl(i,k)*f2(i,k-1,it)) + enddo enddo enddo - enddo - do it = 1,nt - do i = its,l - fk = 1./(cm(i,n)-cl(i,n)*au(i,n-1)) - f2(i,n,it) = fk*(r2(i,n,it)-cl(i,n)*f2(i,n-1,it)) - enddo - enddo +! do it = 1,nt - do k = n-1,kts,-1 do i = its,l - f2(i,k,it) = f2(i,k,it)-au(i,k)*f2(i,k+1,it) + fk = 1./(cm(i,n)-cl(i,n)*au(i,n-1)) + f2(i,n,it) = fk*(r2(i,n,it)-cl(i,n)*f2(i,n-1,it)) enddo enddo +! + do it = 1,nt + do k = n-1,kts,-1 + do i = its,l + f2(i,k,it) = f2(i,k,it)-au(i,k)*f2(i,k+1,it) + enddo + enddo enddo ! end subroutine tridin_ysu +!------------------------------------------------------------------------------- ! +!------------------------------------------------------------------------------- subroutine ysuinit(rublten,rvblten,rthblten,rqvblten, & rqcblten,rqiblten,p_qi,p_first_scalar, & restart, allowed_to_read, & ids, ide, jds, jde, kds, kde, & ims, ime, jms, jme, kms, kme, & its, ite, jts, jte, kts, kte ) -!------------------------------------------------------------------- +!------------------------------------------------------------------------------- implicit none -!------------------------------------------------------------------- +!------------------------------------------------------------------------------- ! logical , intent(in) :: restart, allowed_to_read integer , intent(in) :: ids, ide, jds, jde, kds, kde, & @@ -1508,28 +1680,29 @@ subroutine ysuinit(rublten,rvblten,rthblten,rqvblten, & ! if(.not.restart)then do j = jts,jtf - do k = kts,ktf - do i = its,itf - rublten(i,k,j) = 0. - rvblten(i,k,j) = 0. - rthblten(i,k,j) = 0. - rqvblten(i,k,j) = 0. - rqcblten(i,k,j) = 0. - enddo - enddo + do k = kts,ktf + do i = its,itf + rublten(i,k,j) = 0. + rvblten(i,k,j) = 0. + rthblten(i,k,j) = 0. + rqvblten(i,k,j) = 0. + rqcblten(i,k,j) = 0. + enddo + enddo enddo endif ! if (p_qi .ge. p_first_scalar .and. .not.restart) then - do j = jts,jtf - do k = kts,ktf - do i = its,itf - rqiblten(i,k,j) = 0. - enddo - enddo - enddo + do j = jts,jtf + do k = kts,ktf + do i = its,itf + rqiblten(i,k,j) = 0. + enddo + enddo + enddo endif ! end subroutine ysuinit -!------------------------------------------------------------------- +!------------------------------------------------------------------------------- end module module_bl_ysu +!------------------------------------------------------------------------------- diff --git a/src/core_atmosphere/physics/physics_wrf/module_cam_error_function.F b/src/core_atmosphere/physics/physics_wrf/module_cam_error_function.F new file mode 100644 index 0000000000..1796a73f12 --- /dev/null +++ b/src/core_atmosphere/physics/physics_wrf/module_cam_error_function.F @@ -0,0 +1,655 @@ +!================================================================================================== +! copied for implementation in MPAS from WRF version 3.6.1 +! Laura D. Fowler (laura@ucar.edu / 2015-01-06). + +!================================================================================================== +#define WRF_PORT +#define MODAL_AERO +! Updated to CESM1.0.3 (CAM5.1.01) by Balwinder.Singh@pnnl.gov + +module error_function + +! This module provides generic interfaces for functions that evaluate +! erf(x), erfc(x), and exp(x*x)*erfc(x) in either single or double precision. + +implicit none +private +save + +! Public functions +public :: erf, erfc, erfcx + +interface erf + module procedure erf_r4 + module procedure derf +end interface + +interface erfc + module procedure erfc_r4 + module procedure derfc +end interface + +interface erfcx + module procedure erfcx_r4 + module procedure derfcx +end interface + +! Private variables +integer, parameter :: r4 = selected_real_kind(6) ! 4 byte real +integer, parameter :: r8 = selected_real_kind(12) ! 8 byte real + +contains + +!------------------------------------------------------------------ +! +! 6 December 2006 -- B. Eaton +! The following comments are from the original version of CALERF. +! The only changes in implementing this module are that the function +! names previously used for the single precision versions have been +! adopted for the new generic interfaces. To support these interfaces +! there is now both a single precision version (calerf_r4) and a +! double precision version (calerf_r8) of CALERF below. These versions +! are hardcoded to use IEEE arithmetic. +! +!------------------------------------------------------------------ +! +! This packet evaluates erf(x), erfc(x), and exp(x*x)*erfc(x) +! for a real argument x. It contains three FUNCTION type +! subprograms: ERF, ERFC, and ERFCX (or DERF, DERFC, and DERFCX), +! and one SUBROUTINE type subprogram, CALERF. The calling +! statements for the primary entries are: +! +! Y=ERF(X) (or Y=DERF(X)), +! +! Y=ERFC(X) (or Y=DERFC(X)), +! and +! Y=ERFCX(X) (or Y=DERFCX(X)). +! +! The routine CALERF is intended for internal packet use only, +! all computations within the packet being concentrated in this +! routine. The function subprograms invoke CALERF with the +! statement +! +! CALL CALERF(ARG,RESULT,JINT) +! +! where the parameter usage is as follows +! +! Function Parameters for CALERF +! call ARG Result JINT +! +! ERF(ARG) ANY REAL ARGUMENT ERF(ARG) 0 +! ERFC(ARG) ABS(ARG) .LT. XBIG ERFC(ARG) 1 +! ERFCX(ARG) XNEG .LT. ARG .LT. XMAX ERFCX(ARG) 2 +! +! The main computation evaluates near-minimax approximations +! from "Rational Chebyshev approximations for the error function" +! by W. J. Cody, Math. Comp., 1969, PP. 631-638. This +! transportable program uses rational functions that theoretically +! approximate erf(x) and erfc(x) to at least 18 significant +! decimal digits. The accuracy achieved depends on the arithmetic +! system, the compiler, the intrinsic functions, and proper +! selection of the machine-dependent constants. +! +!******************************************************************* +!******************************************************************* +! +! Explanation of machine-dependent constants +! +! XMIN = the smallest positive floating-point number. +! XINF = the largest positive finite floating-point number. +! XNEG = the largest negative argument acceptable to ERFCX; +! the negative of the solution to the equation +! 2*exp(x*x) = XINF. +! XSMALL = argument below which erf(x) may be represented by +! 2*x/sqrt(pi) and above which x*x will not underflow. +! A conservative value is the largest machine number X +! such that 1.0 + X = 1.0 to machine precision. +! XBIG = largest argument acceptable to ERFC; solution to +! the equation: W(x) * (1-0.5/x**2) = XMIN, where +! W(x) = exp(-x*x)/[x*sqrt(pi)]. +! XHUGE = argument above which 1.0 - 1/(2*x*x) = 1.0 to +! machine precision. A conservative value is +! 1/[2*sqrt(XSMALL)] +! XMAX = largest acceptable argument to ERFCX; the minimum +! of XINF and 1/[sqrt(pi)*XMIN]. +! +! Approximate values for some important machines are: +! +! XMIN XINF XNEG XSMALL +! +! CDC 7600 (S.P.) 3.13E-294 1.26E+322 -27.220 7.11E-15 +! CRAY-1 (S.P.) 4.58E-2467 5.45E+2465 -75.345 7.11E-15 +! IEEE (IBM/XT, +! SUN, etc.) (S.P.) 1.18E-38 3.40E+38 -9.382 5.96E-8 +! IEEE (IBM/XT, +! SUN, etc.) (D.P.) 2.23D-308 1.79D+308 -26.628 1.11D-16 +! IBM 195 (D.P.) 5.40D-79 7.23E+75 -13.190 1.39D-17 +! UNIVAC 1108 (D.P.) 2.78D-309 8.98D+307 -26.615 1.73D-18 +! VAX D-Format (D.P.) 2.94D-39 1.70D+38 -9.345 1.39D-17 +! VAX G-Format (D.P.) 5.56D-309 8.98D+307 -26.615 1.11D-16 +! +! +! XBIG XHUGE XMAX +! +! CDC 7600 (S.P.) 25.922 8.39E+6 1.80X+293 +! CRAY-1 (S.P.) 75.326 8.39E+6 5.45E+2465 +! IEEE (IBM/XT, +! SUN, etc.) (S.P.) 9.194 2.90E+3 4.79E+37 +! IEEE (IBM/XT, +! SUN, etc.) (D.P.) 26.543 6.71D+7 2.53D+307 +! IBM 195 (D.P.) 13.306 1.90D+8 7.23E+75 +! UNIVAC 1108 (D.P.) 26.582 5.37D+8 8.98D+307 +! VAX D-Format (D.P.) 9.269 1.90D+8 1.70D+38 +! VAX G-Format (D.P.) 26.569 6.71D+7 8.98D+307 +! +!******************************************************************* +!******************************************************************* +! +! Error returns +! +! The program returns ERFC = 0 for ARG .GE. XBIG; +! +! ERFCX = XINF for ARG .LT. XNEG; +! and +! ERFCX = 0 for ARG .GE. XMAX. +! +! +! Intrinsic functions required are: +! +! ABS, AINT, EXP +! +! +! Author: W. J. Cody +! Mathematics and Computer Science Division +! Argonne National Laboratory +! Argonne, IL 60439 +! +! Latest modification: March 19, 1990 +! +!------------------------------------------------------------------ + +SUBROUTINE CALERF_r8(ARG, RESULT, JINT) + + !------------------------------------------------------------------ + ! This version uses 8-byte reals + !------------------------------------------------------------------ + integer, parameter :: rk = r8 + + ! arguments + real(rk), intent(in) :: arg + integer, intent(in) :: jint + real(rk), intent(out) :: result + + ! local variables + INTEGER :: I + + real(rk) :: X, Y, YSQ, XNUM, XDEN, DEL + + !------------------------------------------------------------------ + ! Mathematical constants + !------------------------------------------------------------------ + real(rk), parameter :: ZERO = 0.0E0_rk + real(rk), parameter :: FOUR = 4.0E0_rk + real(rk), parameter :: ONE = 1.0E0_rk + real(rk), parameter :: HALF = 0.5E0_rk + real(rk), parameter :: TWO = 2.0E0_rk + real(rk), parameter :: SQRPI = 5.6418958354775628695E-1_rk + real(rk), parameter :: THRESH = 0.46875E0_rk + real(rk), parameter :: SIXTEN = 16.0E0_rk + +!------------------------------------------------------------------ +! Machine-dependent constants: IEEE single precision values +!------------------------------------------------------------------ +!S real, parameter :: XINF = 3.40E+38 +!S real, parameter :: XNEG = -9.382E0 +!S real, parameter :: XSMALL = 5.96E-8 +!S real, parameter :: XBIG = 9.194E0 +!S real, parameter :: XHUGE = 2.90E3 +!S real, parameter :: XMAX = 4.79E37 + + !------------------------------------------------------------------ + ! Machine-dependent constants: IEEE double precision values + !------------------------------------------------------------------ + real(rk), parameter :: XINF = 1.79E308_r8 + real(rk), parameter :: XNEG = -26.628E0_r8 + real(rk), parameter :: XSMALL = 1.11E-16_r8 + real(rk), parameter :: XBIG = 26.543E0_r8 + real(rk), parameter :: XHUGE = 6.71E7_r8 + real(rk), parameter :: XMAX = 2.53E307_r8 + + !------------------------------------------------------------------ + ! Coefficients for approximation to erf in first interval + !------------------------------------------------------------------ + real(rk), parameter :: A(5) = (/ 3.16112374387056560E00_rk, 1.13864154151050156E02_rk, & + 3.77485237685302021E02_rk, 3.20937758913846947E03_rk, & + 1.85777706184603153E-1_rk /) + real(rk), parameter :: B(4) = (/ 2.36012909523441209E01_rk, 2.44024637934444173E02_rk, & + 1.28261652607737228E03_rk, 2.84423683343917062E03_rk /) + + !------------------------------------------------------------------ + ! Coefficients for approximation to erfc in second interval + !------------------------------------------------------------------ + real(rk), parameter :: C(9) = (/ 5.64188496988670089E-1_rk, 8.88314979438837594E00_rk, & + 6.61191906371416295E01_rk, 2.98635138197400131E02_rk, & + 8.81952221241769090E02_rk, 1.71204761263407058E03_rk, & + 2.05107837782607147E03_rk, 1.23033935479799725E03_rk, & + 2.15311535474403846E-8_rk /) + real(rk), parameter :: D(8) = (/ 1.57449261107098347E01_rk, 1.17693950891312499E02_rk, & + 5.37181101862009858E02_rk, 1.62138957456669019E03_rk, & + 3.29079923573345963E03_rk, 4.36261909014324716E03_rk, & + 3.43936767414372164E03_rk, 1.23033935480374942E03_rk /) + + !------------------------------------------------------------------ + ! Coefficients for approximation to erfc in third interval + !------------------------------------------------------------------ + real(rk), parameter :: P(6) = (/ 3.05326634961232344E-1_rk, 3.60344899949804439E-1_rk, & + 1.25781726111229246E-1_rk, 1.60837851487422766E-2_rk, & + 6.58749161529837803E-4_rk, 1.63153871373020978E-2_rk /) + real(rk), parameter :: Q(5) = (/ 2.56852019228982242E00_rk, 1.87295284992346047E00_rk, & + 5.27905102951428412E-1_rk, 6.05183413124413191E-2_rk, & + 2.33520497626869185E-3_rk /) + + !------------------------------------------------------------------ + X = ARG + Y = ABS(X) + IF (Y .LE. THRESH) THEN + !------------------------------------------------------------------ + ! Evaluate erf for |X| <= 0.46875 + !------------------------------------------------------------------ + YSQ = ZERO + IF (Y .GT. XSMALL) YSQ = Y * Y + XNUM = A(5)*YSQ + XDEN = YSQ + DO I = 1, 3 + XNUM = (XNUM + A(I)) * YSQ + XDEN = (XDEN + B(I)) * YSQ + end do + RESULT = X * (XNUM + A(4)) / (XDEN + B(4)) + IF (JINT .NE. 0) RESULT = ONE - RESULT + IF (JINT .EQ. 2) RESULT = EXP(YSQ) * RESULT + GO TO 80 + ELSE IF (Y .LE. FOUR) THEN + !------------------------------------------------------------------ + ! Evaluate erfc for 0.46875 <= |X| <= 4.0 + !------------------------------------------------------------------ + XNUM = C(9)*Y + XDEN = Y + DO I = 1, 7 + XNUM = (XNUM + C(I)) * Y + XDEN = (XDEN + D(I)) * Y + end do + RESULT = (XNUM + C(8)) / (XDEN + D(8)) + IF (JINT .NE. 2) THEN + YSQ = AINT(Y*SIXTEN)/SIXTEN + DEL = (Y-YSQ)*(Y+YSQ) + RESULT = EXP(-YSQ*YSQ) * EXP(-DEL) * RESULT + END IF + ELSE + !------------------------------------------------------------------ + ! Evaluate erfc for |X| > 4.0 + !------------------------------------------------------------------ + RESULT = ZERO + IF (Y .GE. XBIG) THEN + IF ((JINT .NE. 2) .OR. (Y .GE. XMAX)) GO TO 30 + IF (Y .GE. XHUGE) THEN + RESULT = SQRPI / Y + GO TO 30 + END IF + END IF + YSQ = ONE / (Y * Y) + XNUM = P(6)*YSQ + XDEN = YSQ + DO I = 1, 4 + XNUM = (XNUM + P(I)) * YSQ + XDEN = (XDEN + Q(I)) * YSQ + end do + RESULT = YSQ *(XNUM + P(5)) / (XDEN + Q(5)) + RESULT = (SQRPI - RESULT) / Y + IF (JINT .NE. 2) THEN + YSQ = AINT(Y*SIXTEN)/SIXTEN + DEL = (Y-YSQ)*(Y+YSQ) + RESULT = EXP(-YSQ*YSQ) * EXP(-DEL) * RESULT + END IF + END IF +30 continue + !------------------------------------------------------------------ + ! Fix up for negative argument, erf, etc. + !------------------------------------------------------------------ + IF (JINT .EQ. 0) THEN + RESULT = (HALF - RESULT) + HALF + IF (X .LT. ZERO) RESULT = -RESULT + ELSE IF (JINT .EQ. 1) THEN + IF (X .LT. ZERO) RESULT = TWO - RESULT + ELSE + IF (X .LT. ZERO) THEN + IF (X .LT. XNEG) THEN + RESULT = XINF + ELSE + YSQ = AINT(X*SIXTEN)/SIXTEN + DEL = (X-YSQ)*(X+YSQ) + Y = EXP(YSQ*YSQ) * EXP(DEL) + RESULT = (Y+Y) - RESULT + END IF + END IF + END IF +80 continue +end SUBROUTINE CALERF_r8 + +!------------------------------------------------------------------------------------------ + +SUBROUTINE CALERF_r4(ARG, RESULT, JINT) + + !------------------------------------------------------------------ + ! This version uses 4-byte reals + !------------------------------------------------------------------ + integer, parameter :: rk = r4 + + ! arguments + real(rk), intent(in) :: arg + integer, intent(in) :: jint + real(rk), intent(out) :: result + + ! local variables + INTEGER :: I + + real(rk) :: X, Y, YSQ, XNUM, XDEN, DEL + + !------------------------------------------------------------------ + ! Mathematical constants + !------------------------------------------------------------------ + real(rk), parameter :: ZERO = 0.0E0_rk + real(rk), parameter :: FOUR = 4.0E0_rk + real(rk), parameter :: ONE = 1.0E0_rk + real(rk), parameter :: HALF = 0.5E0_rk + real(rk), parameter :: TWO = 2.0E0_rk + real(rk), parameter :: SQRPI = 5.6418958354775628695E-1_rk + real(rk), parameter :: THRESH = 0.46875E0_rk + real(rk), parameter :: SIXTEN = 16.0E0_rk + + !------------------------------------------------------------------ + ! Machine-dependent constants: IEEE single precision values + !------------------------------------------------------------------ + real(rk), parameter :: XINF = 3.40E+38_r4 + real(rk), parameter :: XNEG = -9.382E0_r4 + real(rk), parameter :: XSMALL = 5.96E-8_r4 + real(rk), parameter :: XBIG = 9.194E0_r4 + real(rk), parameter :: XHUGE = 2.90E3_r4 + real(rk), parameter :: XMAX = 4.79E37_r4 + + !------------------------------------------------------------------ + ! Coefficients for approximation to erf in first interval + !------------------------------------------------------------------ + real(rk), parameter :: A(5) = (/ 3.16112374387056560E00_rk, 1.13864154151050156E02_rk, & + 3.77485237685302021E02_rk, 3.20937758913846947E03_rk, & + 1.85777706184603153E-1_rk /) + real(rk), parameter :: B(4) = (/ 2.36012909523441209E01_rk, 2.44024637934444173E02_rk, & + 1.28261652607737228E03_rk, 2.84423683343917062E03_rk /) + + !------------------------------------------------------------------ + ! Coefficients for approximation to erfc in second interval + !------------------------------------------------------------------ + real(rk), parameter :: C(9) = (/ 5.64188496988670089E-1_rk, 8.88314979438837594E00_rk, & + 6.61191906371416295E01_rk, 2.98635138197400131E02_rk, & + 8.81952221241769090E02_rk, 1.71204761263407058E03_rk, & + 2.05107837782607147E03_rk, 1.23033935479799725E03_rk, & + 2.15311535474403846E-8_rk /) + real(rk), parameter :: D(8) = (/ 1.57449261107098347E01_rk, 1.17693950891312499E02_rk, & + 5.37181101862009858E02_rk, 1.62138957456669019E03_rk, & + 3.29079923573345963E03_rk, 4.36261909014324716E03_rk, & + 3.43936767414372164E03_rk, 1.23033935480374942E03_rk /) + + !------------------------------------------------------------------ + ! Coefficients for approximation to erfc in third interval + !------------------------------------------------------------------ + real(rk), parameter :: P(6) = (/ 3.05326634961232344E-1_rk, 3.60344899949804439E-1_rk, & + 1.25781726111229246E-1_rk, 1.60837851487422766E-2_rk, & + 6.58749161529837803E-4_rk, 1.63153871373020978E-2_rk /) + real(rk), parameter :: Q(5) = (/ 2.56852019228982242E00_rk, 1.87295284992346047E00_rk, & + 5.27905102951428412E-1_rk, 6.05183413124413191E-2_rk, & + 2.33520497626869185E-3_rk /) + + !------------------------------------------------------------------ + X = ARG + Y = ABS(X) + IF (Y .LE. THRESH) THEN + !------------------------------------------------------------------ + ! Evaluate erf for |X| <= 0.46875 + !------------------------------------------------------------------ + YSQ = ZERO + IF (Y .GT. XSMALL) YSQ = Y * Y + XNUM = A(5)*YSQ + XDEN = YSQ + DO I = 1, 3 + XNUM = (XNUM + A(I)) * YSQ + XDEN = (XDEN + B(I)) * YSQ + end do + RESULT = X * (XNUM + A(4)) / (XDEN + B(4)) + IF (JINT .NE. 0) RESULT = ONE - RESULT + IF (JINT .EQ. 2) RESULT = EXP(YSQ) * RESULT + GO TO 80 + ELSE IF (Y .LE. FOUR) THEN + !------------------------------------------------------------------ + ! Evaluate erfc for 0.46875 <= |X| <= 4.0 + !------------------------------------------------------------------ + XNUM = C(9)*Y + XDEN = Y + DO I = 1, 7 + XNUM = (XNUM + C(I)) * Y + XDEN = (XDEN + D(I)) * Y + end do + RESULT = (XNUM + C(8)) / (XDEN + D(8)) + IF (JINT .NE. 2) THEN + YSQ = AINT(Y*SIXTEN)/SIXTEN + DEL = (Y-YSQ)*(Y+YSQ) + RESULT = EXP(-YSQ*YSQ) * EXP(-DEL) * RESULT + END IF + ELSE + !------------------------------------------------------------------ + ! Evaluate erfc for |X| > 4.0 + !------------------------------------------------------------------ + RESULT = ZERO + IF (Y .GE. XBIG) THEN + IF ((JINT .NE. 2) .OR. (Y .GE. XMAX)) GO TO 30 + IF (Y .GE. XHUGE) THEN + RESULT = SQRPI / Y + GO TO 30 + END IF + END IF + YSQ = ONE / (Y * Y) + XNUM = P(6)*YSQ + XDEN = YSQ + DO I = 1, 4 + XNUM = (XNUM + P(I)) * YSQ + XDEN = (XDEN + Q(I)) * YSQ + end do + RESULT = YSQ *(XNUM + P(5)) / (XDEN + Q(5)) + RESULT = (SQRPI - RESULT) / Y + IF (JINT .NE. 2) THEN + YSQ = AINT(Y*SIXTEN)/SIXTEN + DEL = (Y-YSQ)*(Y+YSQ) + RESULT = EXP(-YSQ*YSQ) * EXP(-DEL) * RESULT + END IF + END IF +30 continue + !------------------------------------------------------------------ + ! Fix up for negative argument, erf, etc. + !------------------------------------------------------------------ + IF (JINT .EQ. 0) THEN + RESULT = (HALF - RESULT) + HALF + IF (X .LT. ZERO) RESULT = -RESULT + ELSE IF (JINT .EQ. 1) THEN + IF (X .LT. ZERO) RESULT = TWO - RESULT + ELSE + IF (X .LT. ZERO) THEN + IF (X .LT. XNEG) THEN + RESULT = XINF + ELSE + YSQ = AINT(X*SIXTEN)/SIXTEN + DEL = (X-YSQ)*(X+YSQ) + Y = EXP(YSQ*YSQ) * EXP(DEL) + RESULT = (Y+Y) - RESULT + END IF + END IF + END IF +80 continue +end SUBROUTINE CALERF_r4 + +!------------------------------------------------------------------------------------------ + +FUNCTION DERF(X) +!-------------------------------------------------------------------- +! +! This subprogram computes approximate values for erf(x). +! (see comments heading CALERF). +! +! Author/date: W. J. Cody, January 8, 1985 +! +!-------------------------------------------------------------------- + integer, parameter :: rk = r8 ! 8 byte real + + ! argument + real(rk), intent(in) :: X + + ! return value + real(rk) :: DERF + + ! local variables + INTEGER :: JINT = 0 + !------------------------------------------------------------------ + + CALL CALERF_r8(X, DERF, JINT) +END FUNCTION DERF + +!------------------------------------------------------------------------------------------ + +FUNCTION ERF_r4(X) +!-------------------------------------------------------------------- +! +! This subprogram computes approximate values for erf(x). +! (see comments heading CALERF). +! +! Author/date: W. J. Cody, January 8, 1985 +! +!-------------------------------------------------------------------- + integer, parameter :: rk = r4 ! 4 byte real + + ! argument + real(rk), intent(in) :: X + + ! return value + real(rk) :: ERF_r4 + + ! local variables + INTEGER :: JINT = 0 + !------------------------------------------------------------------ + + CALL CALERF_r4(X, ERF_r4, JINT) +END FUNCTION ERF_r4 + +!------------------------------------------------------------------------------------------ + +FUNCTION DERFC(X) +!-------------------------------------------------------------------- +! +! This subprogram computes approximate values for erfc(x). +! (see comments heading CALERF). +! +! Author/date: W. J. Cody, January 8, 1985 +! +!-------------------------------------------------------------------- + integer, parameter :: rk = r8 ! 8 byte real + + ! argument + real(rk), intent(in) :: X + + ! return value + real(rk) :: DERFC + + ! local variables + INTEGER :: JINT = 1 + !------------------------------------------------------------------ + + CALL CALERF_r8(X, DERFC, JINT) +END FUNCTION DERFC + +!------------------------------------------------------------------------------------------ + +FUNCTION ERFC_r4(X) +!-------------------------------------------------------------------- +! +! This subprogram computes approximate values for erfc(x). +! (see comments heading CALERF). +! +! Author/date: W. J. Cody, January 8, 1985 +! +!-------------------------------------------------------------------- + integer, parameter :: rk = r4 ! 4 byte real + + ! argument + real(rk), intent(in) :: X + + ! return value + real(rk) :: ERFC_r4 + + ! local variables + INTEGER :: JINT = 1 + !------------------------------------------------------------------ + + CALL CALERF_r4(X, ERFC_r4, JINT) +END FUNCTION ERFC_r4 + +!------------------------------------------------------------------------------------------ + +FUNCTION DERFCX(X) +!-------------------------------------------------------------------- +! +! This subprogram computes approximate values for exp(x*x) * erfc(x). +! (see comments heading CALERF). +! +! Author/date: W. J. Cody, March 30, 1987 +! +!-------------------------------------------------------------------- + integer, parameter :: rk = r8 ! 8 byte real + + ! argument + real(rk), intent(in) :: X + + ! return value + real(rk) :: DERFCX + + ! local variables + INTEGER :: JINT = 2 + !------------------------------------------------------------------ + + CALL CALERF_r8(X, DERFCX, JINT) +END FUNCTION DERFCX + +!------------------------------------------------------------------------------------------ + +FUNCTION ERFCX_R4(X) +!-------------------------------------------------------------------- +! +! This subprogram computes approximate values for exp(x*x) * erfc(x). +! (see comments heading CALERF). +! +! Author/date: W. J. Cody, March 30, 1987 +! +!-------------------------------------------------------------------- + integer, parameter :: rk = r4 ! 8 byte real + + ! argument + real(rk), intent(in) :: X + + ! return value + real(rk) :: ERFCX_R4 + + ! local variables + INTEGER :: JINT = 2 + !------------------------------------------------------------------ + + CALL CALERF_r4(X, ERFCX_R4, JINT) +END FUNCTION ERFCX_R4 + +!------------------------------------------------------------------------------------------ + +end module error_function diff --git a/src/core_atmosphere/physics/physics_wrf/module_cu_gf.mpas.F b/src/core_atmosphere/physics/physics_wrf/module_cu_gf.mpas.F new file mode 100644 index 0000000000..8e2d8309a5 --- /dev/null +++ b/src/core_atmosphere/physics/physics_wrf/module_cu_gf.mpas.F @@ -0,0 +1,5085 @@ +!WRF:MODEL_LAYER:PHYSICS +! + +MODULE module_cu_gf + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! +! This convective parameterization is build to attempt ! +! a smooth transition to cloud resolving scales as proposed! +! by Arakawa et al (2011, ACP). It currently does not use ! +! subsidencespreading as in G3. Difference and details ! +! will be described in a forthcoming paper by ! +! Grell and Freitas (2013). The parameterization also ! +! offers options to couple with aerosols. Both, the smooth ! +! transition part as well as the aerosol coupling are ! +! experimental. While the smooth transition part is turned ! +! on, nd has been tested dow to a resolution of about 3km ! +! the aerosol coupling is turned off. ! +! More clean-up as well as a direct coupling to chemistry ! +! will follow for V3.5.1 ! +! ! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +CONTAINS + +!----------------------------------------------------------------------------------------------------------------- + +!modifications to sourcecode for MPAS: +! * replaced variable dx with variables dxCell and areaCell, to include the impact of the mean +! distance between cells, and the area of individual cells when using variable-resolution meshes. +! * replaced variable ichoice with variables ichoice_deep and ichoice_shallow to include separate +! closures for deep and shallow convection. +! Laura D. Fowler (laura@ucar.edu) / 2014-05-29. +! * replaced .1 in the calculation of the initial radius with .2, prior to calculating frh. +! Laura D. Fowler (laura@ucar.edu) / 2014-06-04. +! * added choice for closures in the shallow convection scheme. +! Laura D. Fowler (laura@ucar.edu) / 2104-06-19. +! * renamed the original subroutine gfdrv to cu_grell_freitas, and clean-up the initialization of +! local and inout variables before and after the calls to cup_gf and cup_gf_sh. +! Laura D. Fowler (laura@ucar.edu) / 2016-03-30. +! -> removed variables that are never computed from the argument list (for instance, all the apr_* arrays). +! -> removed the variable ktop_deep which is a duplicate of the variable htop (ktop_deep was not properly +! initialized in the original sourcecode). +! -> corrected the declarations of the local arrays gswi,edti_out,massi_flx, and corrected the initialization +! of those variables accordingly. +! -> added the initialization of the in-cloud cloud water (gdc) and in-cloud cloud ice (gdc2) mixing ratios. +! gdc and gdc2 were not initialized in the original sourcecode which led to non-zero values for gdc when +! temperatures are colder than tcrit. +! -> corrected the calculation of the moisture convergence. the conversion of w to omeg now uses the air +! density rho interpolated to w levels. +! Laura D. Fowler (laura@ucar.edu) / 2016-04-20. + + + subroutine cu_grell_freitas( & + itimestep,dt,dxcell,areacell & + ,u,v,t,w,q,p,pi,rho,dz8w,p8w & + ,xland,ht,hfx,qfx,gsw,rqvften,rthften & + ,rthblten,rqvblten,rthraten,kpbl,xlv,cp,g,r_v & + ,ichoice_deep,ichoice_shallow,ishallow_g3 & + ,htop,hbot,k22_shallow,kbcon_shallow,ktop_shallow & + ,xmb_total,xmb_shallow,raincv,pratec,gdc,gdc2 & + ,rthcuten,rqvcuten,rqccuten,rqicuten & + ,ims, ime, jms, jme, kms,kme & + ,ids, ide, jds, jde, kds,kde & + ,its, ite, jts, jte, kts,kte) + + implicit none + +!----------------------------------------------------------------------------------------------------------------- + +!autoconv, 1=old c0, 2=berry c0 + integer, parameter:: autoconv = 1 +!aeroevap, 1=old,2=?, 3=average + integer, parameter:: aeroevap = 1 + integer, parameter:: training = 0 + integer, parameter:: use_excess = 0 + integer, parameter:: use_excess_sh = 0 + integer, parameter:: maxiens = 1 + integer, parameter:: maxens = 1 + integer, parameter:: maxens2 = 1 + integer, parameter:: maxens3 = 16 + integer, parameter:: ensdim = 16 + integer, parameter:: iens = 1 + + integer, parameter:: ens4_spread = 3 ! max(3,cugd_avedx) + integer, parameter:: ens4 = ens4_spread*ens4_spread + + real, parameter:: ccnclean = 250. + real, parameter:: aodccn = 0.1 + real, parameter:: beta = 0.02 + real, parameter:: tcrit = 258. + +!----------------------------------------------------------------------------------------------------------------- + +!intent arguments: + integer,intent(in):: ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + its,ite,jts,jte,kts,kte + + integer,intent(in):: ichoice_deep,ichoice_shallow,ishallow_g3,itimestep + + integer,dimension(ims:ime,jms:jme ),intent(in):: kpbl + + real,intent(in):: dt + real,intent(in):: xlv,r_v,cp,g + real,dimension(ims:ime,jms:jme),intent(in):: areaCell,dxCell + real,dimension(ims:ime,jms:jme),intent(in):: hfx,qfx,gsw,ht,xland + + real,dimension(ims:ime,kms:kme,jms:jme),intent(in):: u,v,w,p,pi,q,rho,t + real,dimension(ims:ime,kms:kme,jms:jme),intent(in):: dz8w,p8w + real,dimension(ims:ime,kms:kme,jms:jme),intent(in):: rqvblten,rthblten,rthraten + real,dimension(ims:ime,kms:kme,jms:jme),intent(in),optional:: rthften,rqvften + + +!inout arguments: + integer,dimension(ims:ime,jms:jme),intent(inout):: k22_shallow,kbcon_shallow,ktop_shallow + + real,dimension(ims:ime,jms:jme),intent(inout):: hbot,htop,raincv,pratec,xmb_total,xmb_shallow + real,dimension(ims:ime,kms:kme,jms:jme),intent(inout):: rthcuten,rqvcuten,rqccuten,rqicuten + real,dimension(ims:ime,kms:kme,jms:jme),intent(inout),optional:: gdc,gdc2 + +!local variables: + character(len=50),dimension(its:ite):: ierrc + character(len=50),dimension(its:ite):: ierrcs + + integer:: i,j,k,n + integer:: ipr,jpr + integer:: itf,jtf,ktf + + integer,dimension(its:ite):: ierr,ierrs + integer,dimension(its:ite):: kpbli + integer,dimension(its:ite):: kbcon,ktop,k22s,kbcons,ktops + + real:: dp,dq,pahfs,pgeoh,pqhfl,zkhvfl,zrho,zws + real,dimension(its:ite):: area_loc,dx_loc,tscl_kf + real,dimension(its:ite):: xlandi + real,dimension(its:ite):: xmb,xmbs + real,dimension(its:ite):: ccn + real,dimension(its:ite):: aaeq,cuten,psur,pret,ter11,zqexec,ztexec,pmean,umean,vmean + real,dimension(its:ite,kts:kte):: zo,t2d,q2d,po,p2d,us,vs,rhoi,tn,qo,tshall,qshall + real,dimension(its:ite,kts:kte):: subt,subq,outt,outq,outqc,outts,outqs,outqcs,phh, & + subm,cupclw,cupclws,dhdt + + real,dimension(its:ite,jts:jte):: apri_gr,apri_w,apri_mc,apri_st,apri_as,apri_capma, & + apri_capme,apri_capmi + real,dimension(its:ite,jts:jte):: gswi,edti_out,massi_flx + + real,dimension(its:ite,jts:jte,1:ensdim):: pri_ens,xfi_ens + real,dimension (its:ite,1:ens4) :: mconv + real,dimension(its:ite,kts:kte,1:ens4) :: omeg + + itf = min(ite,ide-1) + ktf = min(kte,kde-1) + jtf = min(jte,jde-1) +!write(0,*) '--- its = ', its +!write(0,*) '--- kts = ', kts +!write(0,*) '--- jts = ', jts +!write(0,*) '--- itf = ', itf +!write(0,*) '--- ktf = ', ktf +!write(0,*) '--- jtf = ', jtf + + ipr = ite + jpr = jte + + do j = jts, jte + do i = its, ite + hbot(i,j) = real(kte) + htop(i,j) = real(kts) + xmb_total(i,j) = 0. + raincv(i,j) = 0. + pratec(i,j) = 0. + + !shallow convection: + k22_shallow(i,j) = 0 + kbcon_shallow(i,j) = 0 + ktop_shallow(i,j) = 0 + xmb_shallow(i,j) = 0. + enddo + enddo + +!in-cloud cloud water and cloud ice mixing ratios: + if(present(gdc)) then + do j = jts, jte + do k = jts, kte + do i = its, ite + gdc(i,k,j) = 0. + enddo + enddo + enddo + endif + if(present(gdc2)) then + do j = jts, jte + do k = jts, kte + do i = its, ite + gdc2(i,k,j) = 0. + enddo + enddo + enddo + endif + + j_loop: do j = jts, jtf + +!initialization of local variables: + do i = its, itf + apri_gr(i,j) = 0. + apri_w(i,j) = 0. + apri_mc(i,j) = 0. + apri_st(i,j) = 0. + apri_as(i,j) = 0. + apri_capma(i,j) = 0. + apri_capme(i,j) = 0. + apri_capmi(i,j) = 0. + edti_out(i,j) = 0. + gswi(i,j) = gsw(i,j) + enddo + + do n = 1, ensdim + do i = its, itf + pri_ens(i,j,n) = 0. + xfi_ens(i,j,n) = 0. + enddo + enddo + + do n = 1, ens4 + do k = kts, ktf + do i = its, ite + omeg(i,k,n) = 0. + enddo + enddo + do i = its, itf + mconv(i,n) = 0. + enddo + enddo + + do i = its, itf + ierrc(i) = " " + ierr(i) = 0 + kbcon(i) = 0 + ktop(i) = 0 + xmb(i) = 0. + + !shallow convection: + ierrcs(i) = " " + ierrs(i) = 0 + kbcons(i) = 0 + ktops(i) = 0 + k22s(i) = 0 + xmbs(i) = 0. + tscl_kf(i) = dxCell(i,j) / 25. + enddo + + do i = its, itf + dx_loc(i) = dxCell(i,j) + area_loc(i) = areaCell(i,j) + ter11(i) = max(0.,ht(i,j)) + zo(i,kts) = ter11(i) + 0.5*dz8w(i,1,j) + do k = kts+1, ktf + zo(i,k) = zo(i,k-1) + 0.5*(dz8w(i,k-1,j)+dz8w(i,k,j)) + enddo + psur(i) = p8w(i,1,j)*.01 + kpbli(i) = kpbl(i,j) + xlandi(i) = xland(i,j) + + ccn(i) = 1500. + + cuten(i) = 0. + umean(i) = 0. + vmean(i) = 0. + pmean(i) = 0. + pret(i) = 0. + aaeq(i) = 0. + zqexec(i) = 0. + ztexec(i) = 0. + enddo + + do k = kts, ktf + do i = its, itf + us(i,k) = u(i,k,j) + vs(i,k) = v(i,k,j) + rhoi(i,k) = rho(i,k,j) + t2d(i,k) = t(i,k,j) + q2d(i,k) = q(i,k,j) + if(q2d(i,k) .lt. 1.e-08) q2d(i,k) = 1.e-08 + + tn(i,k) = t2d(i,k) + (rthften(i,k,j)+rthraten(i,k,j)+rthblten(i,k,j))*pi(i,k,j)*dt + qo(i,k) = q2d(i,k) + (rqvften(i,k,j)+rqvblten(i,k,j))*dt + if(tn(i,k) .lt. 200.) tn(i,k) = t2d(i,k) + if(qo(i,k) .lt. 1.e-08) qo(i,k) = 1.e-08 + + phh(i,k) = p(i,k,j) + po(i,k) = phh(i,k)*.01 + p2d(i,k) = po(i,k) + + cupclw(i,k) = 0. + outq(i,k) = 0. + outqc(i,k) = 0. + outt(i,k) = 0. + subm(i,k) = 0. + subq(i,k) = 0. + subt(i,k) = 0. + + !shallow convection: + tshall(i,k) = t2d(i,k) + rthblten(i,k,j)*pi(i,k,j)*dt + qshall(i,k) = q2d(i,k) + rqvblten(i,k,j)*dt + dhdt(i,k) = cp*rthblten(i,k,j)*pi(i,k,j) + xlv*rqvblten(i,k,j) + + cupclws(i,k) = 0. + outqcs(i,k) = 0. + outqs(i,k) = 0. + outts(i,k) = 0. + enddo + enddo + + !calculation of the moisture convergence: + do n = 1, ens4 + do k = kts+1, ktf + do i = its, itf + omeg(i,k,n) = -g*0.5*(rho(i,k,j)+rho(i,k-1,j))*w(i,k,j) + enddo + enddo + + do k = kts+1, ktf + do i = its, itf + dq = (q2d(i,k)-q2d(i,k-1)) + mconv(i,n) = mconv(i,n) + omeg(i,k,n)*dq/g + enddo + enddo + do i = its, itf + if(mconv(i,n) .lt. 0.) mconv(i,n) = 0. + enddo + enddo + !calculation of the moisture convergence as Georg Grell (subroutine gfdrv): + !do n = 1, ens4 + ! do k = kts, ktf + ! do i = its,itf + ! omeg(i,k,n)= -g*rho(i,k,j)*w(i,k,j) + ! enddo + ! enddo + + ! do k = kts, ktf-1 + ! do i = its, itf + ! dq = (q2d(i,k+1)-q2d(i,k)) + ! mconv(i,n) = mconv(i,n) + omeg(i,k,n)*dq/g + ! enddo + ! enddo + ! do i = its, itf + ! if(mconv(i,n) .lt. 0.) mconv(i,n) = 0. + ! enddo + !enddo + + if(use_excess.gt.0 .or. use_excess_sh.gt.0)then + do i = its, itf + zrho = 100.*psur(i)/(287.04*(t2d(i,1)*(1.+0.608*q2d(i,1)))) + + !- le and h fluxes + pahfs = -hfx(i,j) + pqhfl = -qfx(i,j)/xlv + !- buoyancy flux (h+le) + zkhvfl = (pahfs/1004.64+0.608*t2d(i,1)*pqhfl)/zrho + !- height of the 1st level + pgeoh = zo(i,1)-ht(i,j) + !-convective-scale velocity w* + zws = max(0.,0.001-1.5*0.41*zkhvfl*pgeoh/t2d(i,1)) + + if(zws > tiny(pgeoh)) then + !-convective-scale velocity w* + zws = 1.2*zws**.3333 + !- temperature excess + ztexec(i) = max(-1.5*pahfs/(zrho*zws*1004.64),0.0) + !- moisture excess + zqexec(i) = max(-1.5*pqhfl/(zrho*zws),0.) + endif + enddo + endif ! use_excess + + do k = kts+1, kte-1 + do i = its, itf + if((p2d(i,1)-p2d(i,k)).gt.150. .and. p2d(i,k).gt.300.) then + dp = -.5*(p2d(i,k+1)-p2d(i,k-1)) + umean(i) = umean(i) + us(i,k)*dp + vmean(i) = vmean(i) + vs(i,k)*dp + pmean(i) = pmean(i) + dp + endif + enddo + enddo + +! +!---- CALL CUMULUS PARAMETERIZATION: +! + call cup_gf(zo,outqc,j,aaeq,t2d,q2d,ter11,subm,tn,qo,po,pret, & + p2d,outt,outq,dt,itimestep,psur,us,vs,tcrit,iens, & + ztexec,zqexec,ccn,ccnclean,rhoi,dx_loc,area_loc, & + mconv,omeg,maxiens,maxens,maxens2,maxens3,ensdim, & + apri_gr,apri_w,apri_mc,apri_st,apri_as,apri_capma, & + apri_capme,apri_capmi,kbcon,ktop,cupclw,xfi_ens, & + pri_ens,xlandi,gswi,subt,subq,xlv,r_v,cp,g, & + ichoice_deep,ipr,jpr,ierrc,ens4,beta,autoconv, & + aeroevap,itf,jtf,ktf,training,xmb,use_excess,its, & + ite,jts,jte,kts,kte) + + call neg_check(j,subt,subq,dt,q2d,outq,outt,outqc,pret,its,ite,kts,kte,itf,ktf) + + !... shallow convection: + if(ishallow_g3 == 1 )then + call cup_gf_sh( & + xmbs,zo,outqcs,j,aaeq,t2d,q2d,ter11,tshall,qshall,p2d, & + pret,p2d,outts,outqs,dt,itimestep,psur,us,vs,tcrit,ztexec, & + zqexec,ccn,ccnclean,rhoi,dx_loc,area_loc,dhdt,kpbli,kbcons, & + ktops,cupclws,k22s,xlandi,gswi,tscl_kf,xlv,r_v,cp,g, & + ichoice_shallow,0,0,ierrs,ierrcs,autoconv,itf,jtf,ktf, & + use_excess_sh,its,ite,jts,jte,kts,kte) + + do i = its, ite + xmb_shallow(i,j) = xmbs(i) + k22_shallow(i,j) = k22s(i) + kbcon_shallow(i,j) = kbcons(i) + ktop_shallow(i,j) = ktops(i) + enddo + endif + + do i = its, ite + xmb_total(i,j) = xmb(i) + if(pret(i) .gt. 0.) then + cuten(i) = 1. + pratec(i,j) = pret(i) + raincv(i,j) = pret(i)*dt + + if(ktop(i) > htop(i,j) ) htop(i,j) = ktop(i) + .001 + if(kbcon(i) < hbot(i,j)) hbot(i,j) = kbcon(i) + .001 + endif + enddo + + !... always save the tendencies of potential temperature, water vapor, cloud water, and cloud ice: + do k = kts, kte + do i = its, ite + rthcuten(i,k,j) = (outts(i,k) + (subt(i,k) + outt(i,k))*cuten(i))/pi(i,k,j) + rqvcuten(i,k,j) = outqs(i,k) + (subq(i,k) + outq(i,k))*cuten(i) + + if(t2d(i,k) .lt. tcrit) then + rqccuten(i,k,j) = 0. + rqicuten(i,k,j) = outqcs(i,k) + outqc(i,k)*cuten(i) + if(present(gdc2)) gdc2(i,k,j) = cupclws(i,k) + cupclw(i,k)*cuten(i) + else + rqicuten(i,k,j) = 0. + rqccuten(i,k,j) = outqcs(i,k) + outqc(i,k)*cuten(i) + if(present(gdc)) gdc(i,k,j) = cupclws(i,k) + cupclw(i,k)*cuten(i) + endif + enddo + enddo + + enddo j_loop + + end subroutine cu_grell_freitas + +!----------------------------------------------------------------------------------------------------------------- + +#if defined(mpas) + SUBROUTINE CUP_gf(zo,OUTQC,J,AAEQ,T,Q,Z1,sub_mas, & + TN,QO,PO,PRE,P,OUTT,OUTQ,DTIME,ktau,PSUR,US,VS, & + TCRIT,iens, & + ztexec,zqexec,ccn,ccnclean,rho,dxCell,areaCell,mconv, & + omeg,maxiens, & + maxens,maxens2,maxens3,ensdim, & + APR_GR,APR_W,APR_MC,APR_ST,APR_AS, & + APR_CAPMA,APR_CAPME,APR_CAPMI,kbcon,ktop,cupclw, & + xf_ens,pr_ens,xland,gsw,subt,subq, & + xl,rv,cp,g,ichoice,ipr,jpr,ierrc,ens4, & + beta,autoconv,aeroevap,itf,jtf,ktf,training, & + xmb, & +#if ( WRF_DFI_RADAR == 1 ) + do_capsuppress,cap_suppress_j, & +#endif + use_excess,its,ite, jts,jte, kts,kte & + ) +#else + SUBROUTINE CUP_gf(zo,OUTQC,J,AAEQ,T,Q,Z1,sub_mas, & + TN,QO,PO,PRE,P,OUTT,OUTQ,DTIME,ktau,PSUR,US,VS, & + TCRIT,iens, & + ztexec,zqexec,ccn,ccnclean,rho,dx,mconv, & + omeg,maxiens, & + maxens,maxens2,maxens3,ensdim, & + APR_GR,APR_W,APR_MC,APR_ST,APR_AS, & + APR_CAPMA,APR_CAPME,APR_CAPMI,kbcon,ktop,cupclw, & !-lxz + xf_ens,pr_ens,xland,gsw,subt,subq, & + xl,rv,cp,g,ichoice,ipr,jpr,ierrc,ens4, & + beta,autoconv,aeroevap,itf,jtf,ktf,training, & +#if ( WRF_DFI_RADAR == 1 ) + do_capsuppress,cap_suppress_j, & +#endif + use_excess,its,ite, jts,jte, kts,kte & + ) +#endif + + IMPLICIT NONE + + integer & + ,intent (in ) :: & + autoconv,aeroevap,itf,jtf,ktf,ktau,training,use_excess, & + its,ite, jts,jte, kts,kte,ipr,jpr,ens4 + integer, intent (in ) :: & + j,ensdim,maxiens,maxens,maxens2,maxens3,ichoice,iens + ! + ! + ! + real, dimension (its:ite,jts:jte,1:ensdim) & + ,intent (inout) :: & + xf_ens,pr_ens + real, dimension (its:ite,jts:jte) & + ,intent (inout ) :: & + APR_GR,APR_W,APR_MC,APR_ST,APR_AS,APR_CAPMA, & + APR_CAPME,APR_CAPMI + real, dimension( its:ite , jts:jte ) & + :: weight_GR,weight_W,weight_MC,weight_ST,weight_AS + real, dimension (its:ite,jts:jte) & + ,intent (in ) :: & + gsw + +#if ( WRF_DFI_RADAR == 1 ) + INTEGER, INTENT(IN ) ,OPTIONAL :: do_capsuppress + REAL, DIMENSION( its:ite ) :: cap_suppress_j +#endif + + ! outtem = output temp tendency (per s) + ! outq = output q tendency (per s) + ! outqc = output qc tendency (per s) + ! pre = output precip + real, dimension (its:ite,kts:kte) & + ,intent (inout ) :: & + OUTT,OUTQ,OUTQC,subt,subq,sub_mas,cupclw + real, dimension (its:ite) & + ,intent (out ) :: & + pre + integer, dimension (its:ite) & + ,intent (out ) :: & + kbcon,ktop +! integer, dimension (its:ite) & +! ,intent (in ) :: & +! kpbl + ! + ! basic environmental input includes moisture convergence (mconv) + ! omega (omeg), windspeed (us,vs), and a flag (aaeq) to turn off + ! convection for this call only and at that particular gridpoint + ! + real, dimension (its:ite,kts:kte) & + ,intent (in ) :: & + rho,T,PO,P,US,VS,tn + real, dimension (its:ite,kts:kte,1:ens4) & + ,intent (inout ) :: & + omeg + real, dimension (its:ite,kts:kte) & + ,intent (inout) :: & + Q,QO + real, dimension (its:ite) & + ,intent (in ) :: & + ztexec,zqexec,ccn,Z1,PSUR,AAEQ,xland + real, dimension (its:ite,1:ens4) & + ,intent (in ) :: & + mconv + +#if defined(mpas) + real & + ,intent (in ) :: & + beta,ccnclean,dtime,tcrit,xl,cp,rv,g + real,intent(in),dimension(its:ite):: dxCell,areaCell +#else + real & + ,intent (in ) :: & + beta,dx,ccnclean,dtime,tcrit,xl,cp,rv,g +#endif + +! +! local ensemble dependent variables in this routine +! + real, dimension (its:ite,1:maxens) :: & + xaa0_ens + real, dimension (1:maxens) :: & + mbdt_ens + real, dimension (1:maxens2) :: & + edt_ens + real, dimension (its:ite,1:maxens2) :: & + edtc + real, dimension (its:ite,kts:kte,1:maxens2) :: & + dellat_ens,dellaqc_ens,dellaq_ens,pwo_ens,subt_ens,subq_ens +! +! +! +!***************** the following are your basic environmental +! variables. They carry a "_cup" if they are +! on model cloud levels (staggered). They carry +! an "o"-ending (z becomes zo), if they are the forced +! variables. They are preceded by x (z becomes xz) +! to indicate modification by some typ of cloud +! + ! z = heights of model levels + ! q = environmental mixing ratio + ! qes = environmental saturation mixing ratio + ! t = environmental temp + ! p = environmental pressure + ! he = environmental moist static energy + ! hes = environmental saturation moist static energy + ! z_cup = heights of model cloud levels + ! q_cup = environmental q on model cloud levels + ! qes_cup = saturation q on model cloud levels + ! t_cup = temperature (Kelvin) on model cloud levels + ! p_cup = environmental pressure + ! he_cup = moist static energy on model cloud levels + ! hes_cup = saturation moist static energy on model cloud levels + ! gamma_cup = gamma on model cloud levels +! +! + ! hcd = moist static energy in downdraft + ! zd normalized downdraft mass flux + ! dby = buoancy term + ! entr = entrainment rate + ! zd = downdraft normalized mass flux + ! entr= entrainment rate + ! hcd = h in model cloud + ! bu = buoancy term + ! zd = normalized downdraft mass flux + ! gamma_cup = gamma on model cloud levels + ! qcd = cloud q (including liquid water) after entrainment + ! qrch = saturation q in cloud + ! pwd = evaporate at that level + ! pwev = total normalized integrated evaoprate (I2) + ! entr= entrainment rate + ! z1 = terrain elevation + ! entr = downdraft entrainment rate + ! jmin = downdraft originating level + ! kdet = level above ground where downdraft start detraining + ! psur = surface pressure + ! z1 = terrain elevation + ! pr_ens = precipitation ensemble + ! xf_ens = mass flux ensembles + ! massfln = downdraft mass flux ensembles used in next timestep + ! omeg = omega from large scale model + ! mconv = moisture convergence from large scale model + ! zd = downdraft normalized mass flux + ! zu = updraft normalized mass flux + ! dir = "storm motion" + ! mbdt = arbitrary numerical parameter + ! dtime = dt over which forcing is applied + ! iact_gr_old = flag to tell where convection was active + ! kbcon = LFC of parcel from k22 + ! k22 = updraft originating level + ! icoic = flag if only want one closure (usually set to zero!) + ! dby = buoancy term + ! ktop = cloud top (output) + ! xmb = total base mass flux + ! hc = cloud moist static energy + ! hkb = moist static energy at originating level + + real, dimension (its:ite,kts:kte) :: & + entr_rate_2d,mentrd_rate_2d,he,hes,qes,z, & + heo,heso,qeso,zo, & + xhe,xhes,xqes,xz,xt,xq, & + + qes_cup,q_cup,he_cup,hes_cup,z_cup,p_cup,gamma_cup,t_cup, & + qeso_cup,qo_cup,heo_cup,heso_cup,zo_cup,po_cup,gammao_cup, & + tn_cup, & + xqes_cup,xq_cup,xhe_cup,xhes_cup,xz_cup,xp_cup,xgamma_cup, & + xt_cup, & + + xlamue,dby,qc,qrcd,pwd,pw,hcd,qcd,dbyd,hc,qrc,zu,zd,clw_all, & + dbyo,qco,qrcdo,pwdo,pwo,hcdo,qcdo,dbydo,hco,qrco,zuo,zdo, & + xdby,xqc,xqrcd,xpwd,xpw,xhcd,xqcd,xhc,xqrc,xzu,xzd, & + + ! cd = detrainment function for updraft + ! cdd = detrainment function for downdraft + ! dellat = change of temperature per unit mass flux of cloud ensemble + ! dellaq = change of q per unit mass flux of cloud ensemble + ! dellaqc = change of qc per unit mass flux of cloud ensemble + + cd,cdd,DELLAH,DELLAQ,DELLAT,DELLAQC,dsubt,dsubh,dsubq + + ! aa0 cloud work function for downdraft + ! edt = epsilon + ! aa0 = cloud work function without forcing effects + ! aa1 = cloud work function with forcing effects + ! xaa0 = cloud work function with cloud effects (ensemble dependent) + ! edt = epsilon + + real, dimension (its:ite,1:ens4) :: & + axx + integer, dimension (its:ite) :: & + kzdown,KDET,K22,KB,JMIN,kstabi,kstabm,K22x, & !-lxz + KBCONx,KBx,KTOPx,ierr,ierr2,ierr3,KBMAX + + integer :: & + nall,iedt,nens,nens3,ki,I,K,KK,iresult +#if defined(mpas) + real,intent(inout),dimension(its:ite):: xmb + + real,dimension(its:ite):: & + edt,edto,edtx,aa1,aa0,xaa0,hkb, & + hkbo,xhkb,qkb,qkbo, & + xpwav,xpwev,pwav,pwev,pwavo, & + pwevo,bu,bud,buo,cap_max,xland1, & + cap_max_increment,closure_n,psum,psumh,sig,zuhe + real:: & + day,dz,dzo,mbdt,entrd_rate, & + zcutdown,edtmax,edtmin,depth_min,zkbmax,z_detr,zktop, & + massfld,dh,cap_maxs,trash + real:: & + detdo1,detdo2,entdo,dp,subin,detdo,entup, & + detup,subdown,entdoj,entupk,detupk,totmas + real,dimension(its:ite):: entr_rate,mentrd_rate,radius,frh,xlamdd +#else + real, dimension (its:ite) :: & + edt,edto,edtx,AA1,AA0,XAA0,HKB, & + HKBO,XHKB,QKB,QKBO, & + XMB,XPWAV,XPWEV,PWAV,PWEV,PWAVO, & + PWEVO,BU,BUD,BUO,cap_max,xland1, & + cap_max_increment,closure_n,psum,psumh,sig,zuhe + real :: & + day,dz,dzo,mbdt,entr_rate,radius,entrd_rate,mentrd_rate, & + zcutdown,edtmax,edtmin,depth_min,zkbmax,z_detr,zktop, & + massfld,dh,cap_maxs,trash,frh,xlamdd + real detdo1,detdo2,entdo,dp,subin,detdo,entup, & + detup,subdown,entdoj,entupk,detupk,totmas +#endif + real :: power_entr,zustart,zufinal,dzm1,dzp1 + + + integer :: k1,k2,kbegzu,kfinalzu,kstart,jmini,levadj + logical :: keep_going + real xff_shal(9),blqe,xkshal + character*50 :: ierrc(its:ite) + real, dimension (its:ite,kts:kte) :: & + up_massentr,up_massdetr,dd_massentr,dd_massdetr & + ,up_massentro,up_massdetro,dd_massentro,dd_massdetro + real, dimension (kts:kte) :: smth + + levadj=5 + power_entr=2. ! 1.2 + zustart=.1 + zufinal=1. + day=86400. + do i=its,itf + closure_n(i)=16. + xland1(i)=1. + if(xland(i).gt.1.5)xland1(i)=0. + cap_max_increment(i)=25. + ierrc(i)=" " +! cap_max_increment(i)=1. + enddo +! +!--- specify entrainmentrate and detrainmentrate +!--- highly tuneable ! +! +#if defined(mpas) + + do i = its, ite + entr_rate(i) = 7.e-5 + radius(i) = .2 / entr_rate(i) + frh(i) = 3.14*(radius(i)*radius(i))/dxCell(i)/dxCell(i) + if(frh(i) .gt. 0.7)then + frh(i) = .7 + radius(i) = sqrt(frh(i)*dxCell(i)*dxCell(i)/3.14) + entr_rate(i) = .2/radius(i) + endif + sig(i)=(1.-frh(i))**2 + +! +!--- entrainment of mass +! + mentrd_rate(i) = entr_rate(i) + xlamdd(i) = mentrd_rate(i) + enddo + +! +!--- initial detrainmentrates +! + do k=kts,ktf + do i=its,itf + z(i,k) = zo(i,k) + xz(i,k) = zo(i,k) + cupclw(i,k) = 0. + cd(i,k) = 1.*entr_rate(i) + cdd(i,k) = xlamdd(i) + hcdo(i,k) = 0. + qrcdo(i,k) = 0. + dellaqc(i,k) = 0. + enddo + enddo + +#else + + entr_rate=7.e-5 + radius=.2/entr_rate + frh=3.14*(radius*radius)/dx/dx + if(frh .gt. 0.7)then + frh=.7 + radius=sqrt(frh*dx*dx/3.14) + entr_rate=.2/radius + endif + do i=its,itf + sig(i)=(1.-frh)**2 + enddo +! sig(:)=1. + +! +!--- entrainment of mass +! + mentrd_rate=entr_rate ! 0. + xlamdd=mentrd_rate +! +!--- initial detrainmentrates +! + do k=kts,ktf + do i=its,itf + z(i,k)=zo(i,k) + xz(i,k)=zo(i,k) + cupclw(i,k)=0. + cd(i,k)=1.*entr_rate + cdd(i,k)=xlamdd(i) + hcdo(i,k)=0. + qrcdo(i,k)=0. + dellaqc(i,k)=0. + enddo + enddo + +#endif + +! +!--- max/min allowed value for epsilon (ratio downdraft base mass flux/updraft +! base mass flux +! + edtmax=1. + edtmin=.1 +! +!--- minimum depth (m), clouds must have +! + depth_min=1000. ! gg 500 +! +!--- maximum depth (mb) of capping +!--- inversion (larger cap = no convection) +! + cap_maxs=75. + DO i=its,itf + kbmax(i)=1 + aa0(i)=0. + aa1(i)=0. + edt(i)=0. + kstabm(i)=ktf-1 + IERR(i)=0 + IERR2(i)=0 + IERR3(i)=0 + enddo + do i=its,itf + cap_max(i)=cap_maxs + iresult=0 + + enddo +! +!--- max height(m) above ground where updraft air can originate +! + zkbmax=4000. +! +!--- height(m) above which no downdrafts are allowed to originate +! + zcutdown=3000. +! +!--- depth(m) over which downdraft detrains all its mass +! + z_detr=1250. !1000 +! + do nens=1,maxens + mbdt_ens(nens)=(float(nens)-3.)*dtime*1.e-3+dtime*5.E-03 + enddo + do nens=1,maxens2 + edt_ens(nens)=.95-float(nens)*.01 + enddo +! +!--- environmental conditions, FIRST HEIGHTS +! + do i=its,itf + if(ierr(i).ne.20)then + do k=1,maxens*maxens2*maxens3 + xf_ens(i,j,(iens-1)*maxens*maxens2*maxens3+k)=0. + pr_ens(i,j,(iens-1)*maxens*maxens2*maxens3+k)=0. + enddo + endif + enddo +! +!--- calculate moist static energy, heights, qes +! + call cup_env(z,qes,he,hes,t,q,p,z1, & + psur,ierr,tcrit,-1,xl,cp, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + call cup_env(zo,qeso,heo,heso,tn,qo,po,z1, & + psur,ierr,tcrit,-1,xl,cp, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + +! +!--- environmental values on cloud levels +! + call cup_env_clev(t,qes,q,he,hes,z,p,qes_cup,q_cup,he_cup, & + hes_cup,z_cup,p_cup,gamma_cup,t_cup,psur, & + ierr,z1,xl,rv,cp, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + call cup_env_clev(tn,qeso,qo,heo,heso,zo,po,qeso_cup,qo_cup, & + heo_cup,heso_cup,zo_cup,po_cup,gammao_cup,tn_cup,psur, & + ierr,z1,xl,rv,cp, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + do i=its,itf + if(ierr(i).eq.0)then + if(aaeq(i).lt.-0.1)then + ierr(i)=20 + endif +! + do k=kts,ktf + if(zo_cup(i,k).gt.zkbmax+z1(i))then + kbmax(i)=k + go to 25 + endif + enddo + 25 continue +! +!--- level where detrainment for downdraft starts +! + do k=kts,ktf + if(zo_cup(i,k).gt.z_detr+z1(i))then + kdet(i)=k + go to 26 + endif + enddo + 26 continue +! + endif + enddo + +! +! +! +!------- DETERMINE LEVEL WITH HIGHEST MOIST STATIC ENERGY CONTENT - K22 +! + CALL cup_MAXIMI(HEO_CUP,3,KBMAX,K22,ierr, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) +#if defined(mpas) + DO 36 i=its,itf + IF(ierr(I).eq.0)THEN + frh(i)=q_cup(i,k22(i))/qes_cup(i,k22(i)) + IF(omeg(i,k22(i),1).lt.0. .and. frh(i).ge.0.99 .and. sig(i).lt.0.091)ierr(i)=1200 + IF(K22(I).GE.KBMAX(i))THEN + ierr(i)=2 + ierrc(i)="could not find k22" + ENDIF + ENDIF + 36 CONTINUE +#else + DO 36 i=its,itf + IF(ierr(I).eq.0)THEN + frh=q_cup(i,k22(i))/qes_cup(i,k22(i)) + IF(omeg(i,k22(i),1).lt.0. .and. frh.ge.0.99 .and. sig(i).lt.0.091)ierr(i)=1200 + IF(K22(I).GE.KBMAX(i))THEN + ierr(i)=2 + ierrc(i)="could not find k22" + ENDIF + ENDIF + 36 CONTINUE +#endif +! +!--- DETERMINE THE LEVEL OF CONVECTIVE CLOUD BASE - KBCON +! + do i=its,itf + IF(ierr(I).eq.0)THEN + if(use_excess == 2) then + k1=max(1,k22(i)-1) + k2=k22(i)+1 + hkb(i) =he_cup(i,k22(i)) ! sum(he_cup(i,k1:k2))/float(k2-k1+1)+xl*zqexec(i)+cp*ztexec(i) + hkbo(i)=sum(heo_cup(i,k1:k2))/float(k2-k1+1)+xl*zqexec(i)+cp*ztexec(i) + else if(use_excess <= 1)then + hkb(i)=he_cup(i,k22(i)) ! +float(use_excess)*(xl*zqexec(i)+cp*ztexec(i)) + hkbo(i)=heo_cup(i,k22(i))+float(use_excess)*(xl*zqexec(i)+cp*ztexec(i)) + endif ! excess + endif ! ierr + enddo + + + call cup_kbcon(ierrc,cap_max_increment,1,k22,kbcon,heo_cup,heso_cup, & + hkbo,ierr,kbmax,po_cup,cap_max, & + xl,cp,ztexec,zqexec,use_excess, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) +! +!--- increase detrainment in stable layers +! + CALL cup_minimi(HEso_cup,Kbcon,kstabm,kstabi,ierr, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) +#if defined(mpas) + DO i=its,itf + IF(ierr(I).eq.0)THEN + do k=k22(i),kbcon(i) + frh(i)=q_cup(i,k)/qes_cup(i,k) + if(omeg(i,k,1).lt.-1.e-6 .and. frh(i).ge.0.99 .and. sig(i).lt.0.091)ierr(i)=1200 + enddo + endif + enddo +#else + DO i=its,itf + IF(ierr(I).eq.0)THEN + do k=k22(i),kbcon(i) + frh=q_cup(i,k)/qes_cup(i,k) + if(omeg(i,k,1).lt.-1.e-6 .and. frh.ge.0.99 .and. sig(i).lt.0.091)ierr(i)=1200 + enddo + endif + enddo +#endif +! +! the following section insures a smooth normalized mass flux profile. See Grell +! and Freitas (2013) for a description +! +#if defined(mpas) + + DO i=its,itf + IF(ierr(I).eq.0)THEN + do k=kts,ktf + frh(i) = min(qo_cup(i,k)/qeso_cup(i,k),1.) + entr_rate_2d(i,k)=entr_rate(i)*(1.3-frh(i)) + enddo + zuhe(i)=zustart + kstart=1 + frh(i)=(zufinal-zustart)/((float(kbcon(i)*kbcon(i)))-(float(kstart*kstart))) + dh=zuhe(i)-frh(i)*(float(kstart*kstart)) + do k=kstart,kbcon(i)-1 + dz=z_cup(i,k+1)-z_cup(i,k) +! cd(i,k)=entr_rate_2d(i,kbcon(i)) + if(p_cup(i,k).gt. p_cup(i,kstabi(i)))cd(i,k)=1.e-6 + entr_rate_2d(i,k)=((frh(i)*(float((k+1)*(k+1)))+dh)/zuhe(i)-1.+cd(i,k)*dz)/dz + zuhe(i)=zuhe(i)+entr_rate_2d(i,k)*dz*zuhe(i)-cd(i,k)*dz*zuhe(i) + enddo + kbegzu=kstabi(i)+4 + kbegzu=min(kbegzu,ktf-1) + kfinalzu=kbegzu+1 + do k=kts,ktf + cd(i,k)=entr_rate_2d(i,kbcon(i)) + enddo + do k=kbcon(i),kbegzu + cd(i,k)=entr_rate_2d(i,kbcon(i)) + if(p_cup(i,k).gt. p_cup(i,kstabi(i)))cd(i,k)=1.e-6 + dz=z_cup(i,k+1)-z_cup(i,k) + zuhe(i)=zuhe(i)+entr_rate_2d(i,k)*dz*zuhe(i)-cd(i,k)*dz*zuhe(i) + enddo + do k=kstabi(i),ktf-2 + if((hkb(i)-hes_cup(i,k)).lt.0)then + kfinalzu=k-3 + go to 411 + endif + enddo +411 continue + kfinalzu=max(kfinalzu,kbegzu+1) + kfinalzu=min(kfinalzu,ktf-1) + frh(i)=-(0.2-zuhe(i))/((float(kfinalzu*kfinalzu))-(float(kbegzu*kbegzu))) + dh=zuhe(i)+frh(i)*(float(kbegzu*kbegzu)) + do k=kbegzu+1,kfinalzu + dz=z_cup(i,k+1)-z_cup(i,k) + cd(i,k)=-((-frh(i)*(float((k+1)*(k+1)))+dh)/zuhe(i)-1.-entr_rate_2d(i,k)*dz)/dz + zuhe(i)=zuhe(i)+entr_rate_2d(i,k)*dz*zuhe(i)-cd(i,k)*dz*zuhe(i) + enddo + do k=kfinalzu+1,ktf + cd(i,k)=entr_rate_2d(i,k) + enddo + do k=kts+1,ktf-2 + dzm1=z_cup(i,k)-z_cup(i,k-1) + dz=z_cup(i,k+1)-z_cup(i,k) + dzp1=z_cup(i,k+2)-z_cup(i,k+1) + smth(k)=.25*(dzm1*cd(i,k-1)+2.*dz*cd(i,k)+dzp1*cd(i,k+1)) + enddo + do k=kts+1,ktf-2 + dzm1=z_cup(i,k)-z_cup(i,k-1) + dz=z_cup(i,k+1)-z_cup(i,k) + dzp1=z_cup(i,k+2)-z_cup(i,k+1) + cd(i,k)=smth(k)/dz ! (.25*(dzm1+2.*dz+dzp1)) + enddo + + smth(:)=0. + do k=2,ktf-2 + dzm1=z_cup(i,k)-z_cup(i,k-1) + dz=z_cup(i,k+1)-z_cup(i,k) + dzp1=z_cup(i,k+2)-z_cup(i,k+1) + smth(k)=.25*(dzm1*entr_rate_2d(i,k-1)+2.*dz*entr_rate_2d(i,k)+dzp1*entr_rate_2d(i,k+1)) + enddo + do k=2,ktf-2 + dz=z_cup(i,k+1)-z_cup(i,k) + entr_rate_2d(i,k)=smth(k)/dz + enddo + zuhe(i)=zustart + do k=2,kbegzu + dz=z_cup(i,k+1)-z_cup(i,k) + frh(i)=zuhe(i) + zuhe(i)=zuhe(i)+entr_rate_2d(i,k)*dz*zuhe(i)-cd(i,k)*dz*zuhe(i) + enddo + ENDIF + enddo + +#else + + DO i=its,itf + IF(ierr(I).eq.0)THEN + do k=kts,ktf + frh = min(qo_cup(i,k)/qeso_cup(i,k),1.) + entr_rate_2d(i,k)=entr_rate*(1.3-frh) + enddo + zuhe(i)=zustart + kstart=1 + frh=(zufinal-zustart)/((float(kbcon(i)*kbcon(i)))-(float(kstart*kstart))) + dh=zuhe(i)-frh*(float(kstart*kstart)) + do k=kstart,kbcon(i)-1 + dz=z_cup(i,k+1)-z_cup(i,k) +! cd(i,k)=entr_rate_2d(i,kbcon(i)) + if(p_cup(i,k).gt. p_cup(i,kstabi(i)))cd(i,k)=1.e-6 + entr_rate_2d(i,k)=((frh*(float((k+1)*(k+1)))+dh)/zuhe(i)-1.+cd(i,k)*dz)/dz + zuhe(i)=zuhe(i)+entr_rate_2d(i,k)*dz*zuhe(i)-cd(i,k)*dz*zuhe(i) + enddo + kbegzu=kstabi(i)+4 + kbegzu=min(kbegzu,ktf-1) + kfinalzu=kbegzu+1 + do k=kts,ktf + cd(i,k)=entr_rate_2d(i,kbcon(i)) + enddo + do k=kbcon(i),kbegzu + cd(i,k)=entr_rate_2d(i,kbcon(i)) + if(p_cup(i,k).gt. p_cup(i,kstabi(i)))cd(i,k)=1.e-6 + dz=z_cup(i,k+1)-z_cup(i,k) + zuhe(i)=zuhe(i)+entr_rate_2d(i,k)*dz*zuhe(i)-cd(i,k)*dz*zuhe(i) + enddo + do k=kstabi(i),ktf-2 + if((hkb(i)-hes_cup(i,k)).lt.0)then + kfinalzu=k-3 + go to 411 + endif + enddo +411 continue + kfinalzu=max(kfinalzu,kbegzu+1) + kfinalzu=min(kfinalzu,ktf-1) + frh=-(0.2-zuhe(i))/((float(kfinalzu*kfinalzu))-(float(kbegzu*kbegzu))) + dh=zuhe(i)+frh*(float(kbegzu*kbegzu)) + do k=kbegzu+1,kfinalzu + dz=z_cup(i,k+1)-z_cup(i,k) + cd(i,k)=-((-frh*(float((k+1)*(k+1)))+dh)/zuhe(i)-1.-entr_rate_2d(i,k)*dz)/dz + zuhe(i)=zuhe(i)+entr_rate_2d(i,k)*dz*zuhe(i)-cd(i,k)*dz*zuhe(i) + enddo + do k=kfinalzu+1,ktf + cd(i,k)=entr_rate_2d(i,k) + enddo + do k=kts+1,ktf-2 + dzm1=z_cup(i,k)-z_cup(i,k-1) + dz=z_cup(i,k+1)-z_cup(i,k) + dzp1=z_cup(i,k+2)-z_cup(i,k+1) + smth(k)=.25*(dzm1*cd(i,k-1)+2.*dz*cd(i,k)+dzp1*cd(i,k+1)) + enddo + do k=kts+1,ktf-2 + dzm1=z_cup(i,k)-z_cup(i,k-1) + dz=z_cup(i,k+1)-z_cup(i,k) + dzp1=z_cup(i,k+2)-z_cup(i,k+1) + cd(i,k)=smth(k)/dz ! (.25*(dzm1+2.*dz+dzp1)) + enddo + + smth(:)=0. + do k=2,ktf-2 + dzm1=z_cup(i,k)-z_cup(i,k-1) + dz=z_cup(i,k+1)-z_cup(i,k) + dzp1=z_cup(i,k+2)-z_cup(i,k+1) + smth(k)=.25*(dzm1*entr_rate_2d(i,k-1)+2.*dz*entr_rate_2d(i,k)+dzp1*entr_rate_2d(i,k+1)) + enddo + do k=2,ktf-2 + dz=z_cup(i,k+1)-z_cup(i,k) + entr_rate_2d(i,k)=smth(k)/dz + enddo + zuhe(i)=zustart + do k=2,kbegzu + dz=z_cup(i,k+1)-z_cup(i,k) + frh=zuhe(i) + zuhe(i)=zuhe(i)+entr_rate_2d(i,k)*dz*zuhe(i)-cd(i,k)*dz*zuhe(i) + enddo + ENDIF + enddo + +#endif + +! +! calculate mass entrainment and detrainment +! + do k=kts,ktf + do i=its,itf + hc(i,k)=0. + DBY(I,K)=0. + hco(i,k)=0. + DBYo(I,K)=0. + enddo + enddo + do i=its,itf + IF(ierr(I).eq.0)THEN + do k=1,kbcon(i)-1 + hc(i,k)=hkb(i) + hco(i,k)=hkbo(i) + enddo + k=kbcon(i) + hc(i,k)=hkb(i) + DBY(I,Kbcon(i))=Hkb(I)-HES_cup(I,K) + hco(i,k)=hkbo(i) + DBYo(I,Kbcon(i))=Hkbo(I)-HESo_cup(I,K) + endif ! ierr + enddo +! +! + do i=its,itf + if(ierr(i).eq.0)then + zu(i,1)=zustart + zuo(i,1)=zustart +! mass entrainment and detrinament is defined on model levels + do k=2,ktf-1 + dz=zo_cup(i,k)-zo_cup(i,k-1) + up_massentro(i,k-1)=entr_rate_2d(i,k-1)*dz*zuo(i,k-1) + up_massdetro(i,k-1)=cd(i,k-1)*dz*zuo(i,k-1) + zuo(i,k)=zuo(i,k-1)+up_massentro(i,k-1)-up_massdetro(i,k-1) + if(zuo(i,k).lt.0.05)then + zuo(i,k)=.05 + up_massdetro(i,k-1)=zuo(i,k-1)-.05 + up_massentro(i,k-1) + cd(i,k-1)=up_massdetro(i,k-1)/dz/zuo(i,k-1) + endif + zu(i,k)=zuo(i,k) + up_massentr(i,k-1)=up_massentro(i,k-1) + up_massdetr(i,k-1)=up_massdetro(i,k-1) + enddo + do k=kbcon(i)+1,ktf-1 + hc(i,k)=(hc(i,k-1)*zu(i,k-1)-.5*up_massdetr(i,k-1)*hc(i,k-1)+ & + up_massentr(i,k-1)*he(i,k-1)) / & + (zu(i,k-1)-.5*up_massdetr(i,k-1)+up_massentr(i,k-1)) + dby(i,k)=hc(i,k)-hes_cup(i,k) + hco(i,k)=(hco(i,k-1)*zuo(i,k-1)-.5*up_massdetro(i,k-1)*hco(i,k-1)+ & + up_massentro(i,k-1)*heo(i,k-1)) / & + (zuo(i,k-1)-.5*up_massdetro(i,k-1)+up_massentro(i,k-1)) + dbyo(i,k)=hco(i,k)-heso_cup(i,k) + enddo + do k=kbcon(i)+1,ktf + if(dbyo(i,k).lt.0)then + ktop(i)=k-1 + go to 41 + endif + enddo +41 continue + if(ktop(i).lt.kbcon(i)+2)ierr(i)=5 + do k=ktop(i)+1,ktf + HC(i,K)=hes_cup(i,k) + HCo(i,K)=heso_cup(i,k) + DBY(I,K)=0. + DBYo(I,K)=0. + zu(i,k)=0. + zuo(i,k)=0. + cd(i,k)=0. + entr_rate_2d(i,k)=0. + up_massentr(i,k)=0. + up_massdetr(i,k)=0. + up_massentro(i,k)=0. + up_massdetro(i,k)=0. + enddo + endif + enddo +! + DO 37 i=its,itf + kzdown(i)=0 + if(ierr(i).eq.0)then + zktop=(zo_cup(i,ktop(i))-z1(i))*.6 + zktop=min(zktop+z1(i),zcutdown+z1(i)) + do k=kts,ktf + if(zo_cup(i,k).gt.zktop)then + kzdown(i)=k + go to 37 + endif + enddo + endif + 37 CONTINUE +! +!--- DOWNDRAFT ORIGINATING LEVEL - JMIN +! + call cup_minimi(HEso_cup,K22,kzdown,JMIN,ierr, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + DO 100 i=its,itf + IF(ierr(I).eq.0)THEN +! +!--- check whether it would have buoyancy, if there where +!--- no entrainment/detrainment +! + jmini = jmin(i) + keep_going = .TRUE. + do while ( keep_going ) + keep_going = .FALSE. + if ( jmini - 1 .lt. kdet(i) ) kdet(i) = jmini-1 + if ( jmini .ge. ktop(i)-1 ) jmini = ktop(i) - 2 + ki = jmini + hcdo(i,ki)=heso_cup(i,ki) + DZ=Zo_cup(i,Ki+1)-Zo_cup(i,Ki) + dh=0. + do k=ki-1,1,-1 + hcdo(i,k)=heso_cup(i,jmini) + DZ=Zo_cup(i,K+1)-Zo_cup(i,K) + dh=dh+dz*(HCDo(i,K)-heso_cup(i,k)) + if(dh.gt.0.)then + jmini=jmini-1 + if ( jmini .gt. 5 ) then + keep_going = .TRUE. + else + ierr(i) = 9 + ierrc(i) = "could not find jmini9" + exit + endif + endif + enddo + enddo + jmin(i) = jmini + if ( jmini .le. 5 ) then + ierr(i)=4 + ierrc(i) = "could not find jmini4" + endif + ENDIF +100 continue +! +! - Must have at least depth_min m between cloud convective base +! and cloud top. +! + do i=its,itf + IF(ierr(I).eq.0)THEN + IF(-zo_cup(I,KBCON(I))+zo_cup(I,KTOP(I)).LT.depth_min)then + ierr(i)=6 + ierrc(i)="cloud depth very shallow" + endif + endif + enddo + +! +!--- normalized downdraft mass flux profile,also work on bottom detrainment +!--- in this routine +! + do k=kts,ktf + do i=its,itf + zd(i,k)=0. + zdo(i,k)=0. + cdd(i,k)=0. + dd_massentr(i,k)=0. + dd_massdetr(i,k)=0. + dd_massentro(i,k)=0. + dd_massdetro(i,k)=0. + hcdo(i,k)=heso_cup(i,k) + dbydo(i,k)=0. + enddo + enddo + +#if defined(mpas) + + do i=its,itf + bud(i)=0. + IF(ierr(I).eq.0)then + mentrd_rate_2d(i,:)=mentrd_rate(i) + cdd(i,1:jmin(i))=xlamdd(i) + cdd(i,jmin(i))=0. +! start from dd origin + zd(i,jmin(i))=0.2 + zdo(i,jmin(i))=0.2 + frh(i)=(zdo(i,jmin(i))-1.)/(-float((jmin(i)-levadj)*(jmin(i)-levadj)) & + +float(jmin(i)*jmin(i))) + dh=zdo(i,jmin(i))-frh(i)*float(jmin(i)*jmin(i)) + zuhe(i)=zdo(i,jmin(i)) + do ki=jmin(i)-1,jmin(i)-levadj,-1 + cdd(i,ki)=0. + dz=z_cup(i,ki+1)-z_cup(i,ki) + mentrd_rate_2d(i,ki)=((frh(i)*float(ki*ki)+dh)/zuhe(i)-1.)/dz + zuhe(i)=zuhe(i)+mentrd_rate_2d(i,ki)*dz*zuhe(i) + enddo +! now we know the max zd, for detrainment we will go back to beta at level 1 + kstart=max(kbcon(i),kdet(i))-1 + kstart=min(jmin(i)-levadj,kstart) + kstart=max(2,kstart) + if(kstart.lt.jmin(i)-levadj-1)then + do ki=jmin(i)-levadj-1,kstart,-1 + dz=z_cup(i,ki+1)-z_cup(i,ki) + mentrd_rate_2d(i,ki)=mentrd_rate(i) + cdd(i,ki)=xlamdd(i) + zuhe(i)=zuhe(i)-cdd(i,ki)*dz*zuhe(i)+mentrd_rate_2d(i,ki)*dz*zuhe(i) + enddo + endif + frh(i)=(zuhe(i)-beta)/(float(kstart*kstart)-1.) + dh=beta-frh(i) + mentrd_rate_2d(i,kstart)=0. + do ki=kstart+1,1,-1 + mentrd_rate_2d(i,ki)=0. + dz=z_cup(i,ki+1)-z_cup(i,ki) + cdd(i,ki)=max(0.,(1.-(frh(i)*float(ki*ki)+dh)/zuhe(i))/dz) + zuhe(i)=zuhe(i)-cdd(i,ki)*dz*zuhe(i) +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'low cd ',ki,zuhe(i),cdd(i,ki) + enddo + +! now that we have entrainment and detrainment rates, +! calculate downdraft mass terms +! + do ki=jmin(i)-1,1,-1 + mentrd_rate(i)=mentrd_rate_2d(i,ki) + dzo=zo_cup(i,ki+1)-zo_cup(i,ki) + dd_massentro(i,ki)=mentrd_rate(i)*dzo*zdo(i,ki+1) + dd_massdetro(i,ki)=cdd(i,ki)*dzo*zdo(i,ki+1) + zdo(i,ki)=zdo(i,ki+1)+dd_massentro(i,ki)-dd_massdetro(i,ki) + enddo +! downdraft moist static energy + moisture budget + dbydo(i,jmin(i))=hcdo(i,jmin(i))-heso_cup(i,jmin(i)) + bud(i)=dbydo(i,jmin(i))*(zo_cup(i,jmin(i)+1)-zo_cup(i,jmin(i))) + do ki=jmin(i)-1,1,-1 + dzo=zo_cup(i,ki+1)-zo_cup(i,ki) + hcdo(i,ki)=(hcdo(i,ki+1)*zdo(i,ki+1) & + -.5*dd_massdetro(i,ki)*hcdo(i,ki+1)+ & + dd_massentro(i,ki)*heo(i,ki)) / & + (zdo(i,ki+1)-.5*dd_massdetro(i,ki)+dd_massentro(i,ki)) + dbydo(i,ki)=hcdo(i,ki)-heso_cup(i,ki) +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'ki,bud = ',ki,bud(i),hcdo(i,ki) + bud(i)=bud(i)+dbydo(i,ki)*dzo + enddo + endif + + if(bud(i).gt.0)then + ierr(i)=7 + ierrc(i)='downdraft is not negatively buoyant ' + endif + enddo + +#else + + do i=its,itf + bud(i)=0. + IF(ierr(I).eq.0)then + mentrd_rate_2d(i,:)=mentrd_rate + cdd(i,1:jmin(i))=xlamdd + cdd(i,jmin(i))=0. +! start from dd origin + zd(i,jmin(i))=0.2 + zdo(i,jmin(i))=0.2 + frh=(zdo(i,jmin(i))-1.)/(-float((jmin(i)-levadj)*(jmin(i)-levadj)) & + +float(jmin(i)*jmin(i))) + dh=zdo(i,jmin(i))-frh*float(jmin(i)*jmin(i)) + zuhe(i)=zdo(i,jmin(i)) + do ki=jmin(i)-1,jmin(i)-levadj,-1 + cdd(i,ki)=0. + dz=z_cup(i,ki+1)-z_cup(i,ki) + mentrd_rate_2d(i,ki)=((frh*float(ki*ki)+dh)/zuhe(i)-1.)/dz + zuhe(i)=zuhe(i)+mentrd_rate_2d(i,ki)*dz*zuhe(i) + enddo +! now we know the max zd, for detrainment we will go back to beta at level 1 + kstart=max(kbcon(i),kdet(i))-1 + kstart=min(jmin(i)-levadj,kstart) + kstart=max(2,kstart) + if(kstart.lt.jmin(i)-levadj-1)then + do ki=jmin(i)-levadj-1,kstart,-1 + dz=z_cup(i,ki+1)-z_cup(i,ki) + mentrd_rate_2d(i,ki)=mentrd_rate + cdd(i,ki)=xlamdd + zuhe(i)=zuhe(i)-cdd(i,ki)*dz*zuhe(i)+mentrd_rate_2d(i,ki)*dz*zuhe(i) + enddo + endif + frh=(zuhe(i)-beta)/(float(kstart*kstart)-1.) + dh=beta-frh + mentrd_rate_2d(i,kstart)=0. + do ki=kstart+1,1,-1 + mentrd_rate_2d(i,ki)=0. + dz=z_cup(i,ki+1)-z_cup(i,ki) + cdd(i,ki)=max(0.,(1.-(frh*float(ki*ki)+dh)/zuhe(i))/dz) + zuhe(i)=zuhe(i)-cdd(i,ki)*dz*zuhe(i) +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'low cd ',ki,zuhe(i),cdd(i,ki) + enddo + +! now that we have entrainment and detrainment rates, +! calculate downdraft mass terms +! + do ki=jmin(i)-1,1,-1 + mentrd_rate=mentrd_rate_2d(i,ki) + dzo=zo_cup(i,ki+1)-zo_cup(i,ki) + dd_massentro(i,ki)=mentrd_rate*dzo*zdo(i,ki+1) + dd_massdetro(i,ki)=cdd(i,ki)*dzo*zdo(i,ki+1) + zdo(i,ki)=zdo(i,ki+1)+dd_massentro(i,ki)-dd_massdetro(i,ki) + enddo +! downdraft moist static energy + moisture budget + dbydo(i,jmin(i))=hcdo(i,jmin(i))-heso_cup(i,jmin(i)) + bud(i)=dbydo(i,jmin(i))*(zo_cup(i,jmin(i)+1)-zo_cup(i,jmin(i))) + do ki=jmin(i)-1,1,-1 + dzo=zo_cup(i,ki+1)-zo_cup(i,ki) + hcdo(i,ki)=(hcdo(i,ki+1)*zdo(i,ki+1) & + -.5*dd_massdetro(i,ki)*hcdo(i,ki+1)+ & + dd_massentro(i,ki)*heo(i,ki)) / & + (zdo(i,ki+1)-.5*dd_massdetro(i,ki)+dd_massentro(i,ki)) + dbydo(i,ki)=hcdo(i,ki)-heso_cup(i,ki) +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'ki,bud = ',ki,bud(i),hcdo(i,ki) + bud(i)=bud(i)+dbydo(i,ki)*dzo + enddo + endif + + if(bud(i).gt.0)then + ierr(i)=7 + ierrc(i)='downdraft is not negatively buoyant ' + endif + enddo + +#endif +! +!--- calculate moisture properties of downdraft +! + call cup_dd_moisture_new(ierrc,zdo,hcdo,heso_cup,qcdo,qeso_cup, & + pwdo,qo_cup,zo_cup,dd_massentro,dd_massdetro,jmin,ierr,gammao_cup, & + pwevo,bu,qrcdo,qo,heo,tn_cup,1,xl, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) +! +!--- calculate moisture properties of updraft +! + call cup_up_moisture('deep',ierr,zo_cup,qco,qrco,pwo,pwavo, & + ccnclean,p_cup,kbcon,ktop,cd,dbyo,clw_all, & + t_cup,qo,GAMMAo_cup,zuo,qeso_cup,k22,qo_cup,xl, & + ZQEXEC,use_excess,ccn,rho,up_massentr,up_massdetr,psum,psumh,& + autoconv,aeroevap,1,itf,jtf,ktf,j,ipr,jpr, & + its,ite, jts,jte, kts,kte) + do k=kts,ktf + do i=its,itf + cupclw(i,k)=qrco(i,k) + enddo + enddo +! +!--- calculate workfunctions for updrafts +! + call cup_up_aa0(aa0,z,zu,dby,GAMMA_CUP,t_cup, & + kbcon,ktop,ierr, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + call cup_up_aa0(aa1,zo,zuo,dbyo,GAMMAo_CUP,tn_cup, & + kbcon,ktop,ierr, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + do i=its,itf + if(ierr(i).eq.0)then + if(aa1(i).eq.0.)then + ierr(i)=17 + ierrc(i)="cloud work function zero" + endif + endif + enddo +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + + do i=1,ens4 + axx(:,i)=aa1(:) + enddo + +! +!--- DETERMINE DOWNDRAFT STRENGTH IN TERMS OF WINDSHEAR +! + call cup_dd_edt(ierr,us,vs,zo,ktop,kbcon,edt,po,pwavo, & + pwo,ccn,pwevo,edtmax,edtmin,maxens2,edtc,psum,psumh, & + ccnclean,rho,aeroevap,itf,jtf,ktf,j,ipr,jpr, & + its,ite, jts,jte, kts,kte) + do 250 iedt=1,maxens2 + do i=its,itf + if(ierr(i).eq.0)then + edt(i)=edtc(i,iedt) + edto(i)=edtc(i,iedt) + edtx(i)=edtc(i,iedt) + if(maxens2.eq.3)then + edt(i)=edtc(i,3) + edto(i)=edtc(i,3) + edtx(i)=edtc(i,3) + endif + endif + enddo + do k=kts,ktf + do i=its,itf + subt_ens(i,k,iedt)=0. + subq_ens(i,k,iedt)=0. + dellat_ens(i,k,iedt)=0. + dellaq_ens(i,k,iedt)=0. + dellaqc_ens(i,k,iedt)=0. + pwo_ens(i,k,iedt)=0. + enddo + enddo +! +! +!--- change per unit mass that a model cloud would modify the environment +! +!--- 1. in bottom layer +! + do k=kts,ktf + do i=its,itf + dellah(i,k)=0. + dsubt(i,k)=0. + dsubh(i,k)=0. + dellaq(i,k)=0. + dsubq(i,k)=0. + enddo + enddo +! +!---------------------------------------------- cloud level ktop +! +!- - - - - - - - - - - - - - - - - - - - - - - - model level ktop-1 +! . . . +! . . . +! . . . +! . . . +! . . . +! . . . +! +!---------------------------------------------- cloud level k+2 +! +!- - - - - - - - - - - - - - - - - - - - - - - - model level k+1 +! +!---------------------------------------------- cloud level k+1 +! +!- - - - - - - - - - - - - - - - - - - - - - - - model level k +! +!---------------------------------------------- cloud level k +! +! . . . +! . . . +! . . . +! . . . +! . . . +! . . . +! . . . +! . . . +! . . . +! . . . +! +!---------------------------------------------- cloud level 3 +! +!- - - - - - - - - - - - - - - - - - - - - - - - model level 2 +! +!---------------------------------------------- cloud level 2 +! +!- - - - - - - - - - - - - - - - - - - - - - - - model level 1 + + do i=its,itf + if(ierr(i).eq.0)then + dp=100.*(po_cup(i,1)-po_cup(i,2)) + dellah(i,1)=(edto(i)*zdo(i,2)*hcdo(i,2) & + -edto(i)*zdo(i,2)*heo_cup(i,2))*g/dp + dellaq(i,1)=(edto(i)*zdo(i,2)*qrcdo(i,2) & + -edto(i)*zdo(i,2)*qo_cup(i,2))*g/dp + dsubt(i,1)=0. + dsubq(i,1)=0. + + do k=kts+1,ktop(i) +! these three are only used at or near mass detrainment and/or entrainment levels + entupk=0. + detupk=0. + entdoj=0. +! detrainment and entrainment for fowndrafts + detdo=edto(i)*dd_massdetro(i,k) + entdo=edto(i)*dd_massentro(i,k) +! entrainment/detrainment for updraft + entup=up_massentro(i,k) + detup=up_massdetro(i,k) +! subsidence by downdrafts only + subin=-zdo(i,k+1)*edto(i) + subdown=-zdo(i,k)*edto(i) +! +! SPECIAL LEVELS +! + if(k.eq.jmin(i))then + entdoj=edto(i)*zdo(i,k) + endif + if(k.eq.ktop(i))then + detupk=zuo(i,ktop(i)) + subin=0. + subdown=0. + detdo=0. + entdo=0. + entup=0. + detup=0. + endif + totmas=subin-subdown+detup-entup-entdo+ & + detdo-entupk-entdoj+detupk+zuo(i,k+1)-zuo(i,k) +! print *,'*********************',k,totmas +! write(0,123)k,subin+zuo(i,k+1),subdown-zuo(i,k),detup,entup, & +! detdo,entdo,entupk,detupk +! write(8,*)'totmas = ',k,totmas + if(abs(totmas).gt.1.e-6)then + write(0,*)'*********************',i,j,k,totmas + write(0,123)k,subin,subdown,detup,entup, & + detdo,entdo,entupk,detupk +123 formAT(1X,i2,8E12.4) +! call wrf_error_fatal ( 'totmas .gt.1.e-6' ) + endif + dp=100.*(po_cup(i,k)-po_cup(i,k+1)) + dellah(i,k)=(detup*.5*(HCo(i,K+1)+HCo(i,K)) & + +detdo*.5*(HCDo(i,K+1)+HCDo(i,K)) & + -entup*heo(i,k) & + -entdo*heo(i,k) & + +subin*heo_cup(i,k+1) & + -subdown*heo_cup(i,k) & + +detupk*(hco(i,ktop(i))-heo_cup(i,ktop(i))) & + -entupk*heo_cup(i,k22(i)) & + -entdoj*heo_cup(i,jmin(i)) & + )*g/dp + dellaq(i,k)=(detup*.5*(qco(i,K+1)+qco(i,K)-qrco(i,k+1)-qrco(i,k)) & + +detdo*.5*(qrcdo(i,K+1)+qrcdo(i,K)) & + -entup*qo(i,k) & + -entdo*qo(i,k) & + +subin*qo_cup(i,k+1) & + -subdown*qo_cup(i,k) & + +detupk*(qco(i,ktop(i))-qrco(i,ktop(i))-qo_cup(i,ktop(i))) & + -entupk*qo_cup(i,k22(i)) & + -entdoj*qo_cup(i,jmin(i)) & + )*g/dp +! +! updraft subsidence only +! + if(k.lt.ktop(i))then + dsubt(i,k)=(zuo(i,k+1)*heo_cup(i,k+1) & + -zuo(i,k)*heo_cup(i,k))*g/dp + dsubq(i,k)=(zuo(i,k+1)*qo_cup(i,k+1) & + -zuo(i,k)*qo_cup(i,k))*g/dp + endif +! + enddo ! k + + endif + enddo +! +!-- take out cloud liquid water for detrainment +! + do k=kts,ktf-1 + do i=its,itf + dellaqc(i,k)=0. + if(ierr(i).eq.0)then + if(k.eq.ktop(i)-0)dellaqc(i,k)= & + .01*zuo(i,ktop(i))*qrco(i,ktop(i))* & + 9.81/(po_cup(i,k)-po_cup(i,k+1)) + if(k.lt.ktop(i).and.k.gt.kbcon(i))then + dz=zo_cup(i,k+1)-zo_cup(i,k) + dellaqc(i,k)=.01*9.81*up_massdetro(i,k)*.5*(qrco(i,k)+qrco(i,k+1))/ & + (po_cup(i,k)-po_cup(i,k+1)) + endif + dellaqc(i,k)=max(0.,dellaqc(i,k)) + endif + enddo + enddo +! +!--- using dellas, calculate changed environmental profiles +! + mbdt=mbdt_ens(1) + do i=its,itf + xaa0_ens(i,:)=0. + enddo + + do k=kts,ktf + do i=its,itf + dellat(i,k)=0. + if(ierr(i).eq.0)then +! if(i.eq.ipr.and.j.eq.jpr.and.k.eq.kts)write(0,*)'mbdt = ',mbdt,mbdt_ens,dtime + dsubh(i,k)=dsubt(i,k) + XHE(I,K)=(dsubt(i,k)+DELLAH(I,K))*MBDT+HEO(I,K) + XQ(I,K)=(dsubq(i,k)+DELLAQ(I,K)+dellaqc(i,k))*MBDT+QO(I,K) + DELLAT(I,K)=(1./cp)*(DELLAH(I,K)-xl*DELLAQ(I,K)) + dSUBT(I,K)=(1./cp)*(dsubt(i,k)-xl*dsubq(i,k)) + XT(I,K)= (DELLAT(I,K)+dsubt(i,k)-dellaqc(i,k)*xl/cp)*MBDT+TN(I,K) + IF(XQ(I,K).LE.0.)XQ(I,K)=1.E-08 + ENDIF + enddo + enddo + do i=its,itf + if(ierr(i).eq.0)then + xhkb(i)=hkbo(i)+(dsubh(i,k22(i))+DELLAH(I,K22(i)))*MBDT + XHE(I,ktf)=HEO(I,ktf) + XQ(I,ktf)=QO(I,ktf) + XT(I,ktf)=TN(I,ktf) + IF(XQ(I,ktf).LE.0.)XQ(I,ktf)=1.E-08 + endif + enddo +! +!--- calculate moist static energy, heights, qes +! + call cup_env(xz,xqes,xhe,xhes,xt,xq,po,z1, & + psur,ierr,tcrit,-1,xl,cp, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) +! +!--- environmental values on cloud levels +! + call cup_env_clev(xt,xqes,xq,xhe,xhes,xz,po,xqes_cup,xq_cup, & + xhe_cup,xhes_cup,xz_cup,po_cup,gamma_cup,xt_cup,psur, & + ierr,z1,xl,rv,cp, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) +! +! +!**************************** static control +! +!--- moist static energy inside cloud +! +! do i=its,itf +! if(ierr(i).eq.0)then +! xhkb(i)=xhe(i,k22(i)) +! endif +! enddo + do k=kts,ktf + do i=its,itf + xhc(i,k)=0. + xDBY(I,K)=0. + enddo + enddo + do i=its,itf + if(ierr(i).eq.0)then +! if(use_excess == 2) then +! k1=max(1,k22(i)-1) +! k2=max(1,min(kbcon(i)-1,k22(i)+1)) +! k1=1 +! k2=k22(i)+1 +! xhkb(i) =sum(xhe_cup(i,k1:k2))/float(k2-k1+1)+xl*zqexec(i)+cp*ztexec(i) +! else if(use_excess <= 1) then +! xhkb(i)=xhe_cup(i,k22(i))+float(use_excess)*(xl*zqexec(i)+cp*ztexec(i)) + +! endif + do k=1,kbcon(i)-1 + xhc(i,k)=xhkb(i) + enddo + k=kbcon(i) + xhc(i,k)=xhkb(i) + xDBY(I,Kbcon(i))=xHkb(I)-xHES_cup(I,K) + endif !ierr + enddo +! +! + do i=its,itf + if(ierr(i).eq.0)then + xzu(i,:)=zuo(i,:) + do k=kbcon(i)+1,ktop(i) + xhc(i,k)=(xhc(i,k-1)*xzu(i,k-1)-.5*up_massdetro(i,k-1)*xhc(i,k-1)+ & + up_massentro(i,k-1)*xhe(i,k-1)) / & + (xzu(i,k-1)-.5*up_massdetro(i,k-1)+up_massentro(i,k-1)) + xdby(i,k)=xhc(i,k)-xhes_cup(i,k) + enddo + do k=ktop(i)+1,ktf + xHC(i,K)=xhes_cup(i,k) + xDBY(I,K)=0. + xzu(i,k)=0. + enddo + endif + enddo + +! +!--- workfunctions for updraft +! + call cup_up_aa0(xaa0,xz,xzu,xdby,GAMMA_CUP,xt_cup, & + kbcon,ktop,ierr, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + do 200 nens=1,maxens + do i=its,itf + if(ierr(i).eq.0)then + xaa0_ens(i,nens)=xaa0(i) + nall=(iens-1)*maxens3*maxens*maxens2 & + +(iedt-1)*maxens*maxens3 & + +(nens-1)*maxens3 + do k=kts,ktf + if(k.le.ktop(i))then + do nens3=1,maxens3 + if(nens3.eq.7)then +!--- b=0 + pr_ens(i,j,nall+nens3)=pr_ens(i,j,nall+nens3) & +! +edto(i)*pwdo(i,k) & + +pwo(i,k) +!--- b=beta + else if(nens3.eq.8)then + pr_ens(i,j,nall+nens3)=pr_ens(i,j,nall+nens3)+ & + pwo(i,k) +!--- b=beta/2 + else if(nens3.eq.9)then + pr_ens(i,j,nall+nens3)=pr_ens(i,j,nall+nens3) & +! +.5*edto(i)*pwdo(i,k) & + + pwo(i,k) + else + pr_ens(i,j,nall+nens3)=pr_ens(i,j,nall+nens3)+ & + pwo(i,k) ! +edto(i)*pwdo(i,k) + endif + enddo + endif + enddo + if(pr_ens(i,j,nall+7).lt.1.e-6)then + ierr(i)=18 + ierrc(i)="total normalized condensate too small" +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)ierr(i),ierrc(i) + do nens3=1,maxens3 + pr_ens(i,j,nall+nens3)=0. + enddo + endif + do nens3=1,maxens3 + if(pr_ens(i,j,nall+nens3).lt.1.e-4)then + pr_ens(i,j,nall+nens3)=0. + endif + enddo + endif +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'ierrc = ',ierr(i),ierrc(i) + enddo + 200 continue +! +!--- LARGE SCALE FORCING +! +! +!------- CHECK wether aa0 should have been zero, assuming this +! ensemble is chosen +! +! + do i=its,itf + ierr2(i)=ierr(i) + ierr3(i)=ierr(i) + k22x(i)=k22(i) + enddo + if(maxens.gt.0)then +! CALL cup_MAXIMI(HEO_CUP,3,KBMAX,K22x,ierr, & +! itf,jtf,ktf, & +! its,ite, jts,jte, kts,kte) + call cup_kbcon(ierrc,cap_max_increment,2,k22x,kbconx,heo_cup, & + heso_cup,hkbo,ierr2,kbmax,po_cup,cap_max, & + xl,cp,ztexec,zqexec,use_excess, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + call cup_kbcon(ierrc,cap_max_increment,3,k22x,kbconx,heo_cup, & + heso_cup,hkbo,ierr3,kbmax,po_cup,cap_max, & + xl,cp,ztexec,zqexec,use_excess, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + endif +! +!--- calculate cloud base mass flux +! + + call cup_forcing_ens_3d(closure_n,xland1,aa0,aa1,xaa0_ens,mbdt_ens,dtime, & + ierr,ierr2,ierr3,xf_ens,j,'deeps',axx, & + maxens,iens,iedt,maxens2,maxens3,mconv, & + po_cup,ktop,omeg,zdo,k22,zuo,pr_ens,edto,kbcon, & + ensdim,ichoice, & + ipr,jpr,itf,jtf,ktf, & + its,ite, jts,jte, kts,kte,ens4,ktau) +! + do k=kts,ktf + do i=its,itf + if(ierr(i).eq.0)then + subt_ens(i,k,iedt)=dsubt(i,k) + subq_ens(i,k,iedt)=dsubq(i,k) + dellat_ens(i,k,iedt)=dellat(i,k) + dellaq_ens(i,k,iedt)=dellaq(i,k) + dellaqc_ens(i,k,iedt)=dellaqc(i,k) + pwo_ens(i,k,iedt)=pwo(i,k)+edt(i)*pwdo(i,k) + else + subt_ens(i,k,iedt)=0. + subq_ens(i,k,iedt)=0. + dellat_ens(i,k,iedt)=0. + dellaq_ens(i,k,iedt)=0. + dellaqc_ens(i,k,iedt)=0. + pwo_ens(i,k,iedt)=0. + endif + enddo + enddo + 250 continue +! +!--- FEEDBACK +! + call cup_output_ens_3d(xf_ens,ierr,dellat_ens,dellaq_ens, & + dellaqc_ens,subt_ens,subq_ens,subt,subq,outt, & + outq,outqc,zuo,sub_mas,pre,pwo_ens,xmb,ktop, & + j,'deep',maxens2,maxens,iens,ierr2,ierr3, & + pr_ens,maxens3,ensdim, & + sig,APR_GR,APR_W,APR_MC,APR_ST,APR_AS, & + APR_CAPMA,APR_CAPME,APR_CAPMI,closure_n,xland1, & + weight_GR,weight_W,weight_MC,weight_ST,weight_AS,training, & + ipr,jpr,itf,jtf,ktf, & + its,ite, jts,jte, kts,kte ) + k=1 + do i=its,itf + if(ierr(i).eq.0) PRE(I)=MAX(PRE(I),0.) + enddo +! +!---------------------------done------------------------------ +! + + END SUBROUTINE CUP_gf + + + SUBROUTINE cup_dd_edt(ierr,us,vs,z,ktop,kbcon,edt,p,pwav, & + pw,ccn,pwev,edtmax,edtmin,maxens2,edtc,psum2,psumh, & + ccnclean,rho,aeroevap,itf,jtf,ktf,j,ipr,jpr, & + its,ite, jts,jte, kts,kte ) + + IMPLICIT NONE + + integer & + ,intent (in ) :: & + j,ipr,jpr,aeroevap,itf,jtf,ktf, & + its,ite, jts,jte, kts,kte + integer, intent (in ) :: & + maxens2 + ! + ! ierr error value, maybe modified in this routine + ! + real, dimension (its:ite,kts:kte) & + ,intent (in ) :: & + rho,us,vs,z,p,pw + real, dimension (its:ite,1:maxens2) & + ,intent (out ) :: & + edtc + real, dimension (its:ite) & + ,intent (out ) :: & + edt + real, dimension (its:ite) & + ,intent (in ) :: & + pwav,pwev,ccn,psum2,psumh + real & + ,intent (in ) :: & + ccnclean,edtmax,edtmin + integer, dimension (its:ite) & + ,intent (in ) :: & + ktop,kbcon + integer, dimension (its:ite) & + ,intent (inout) :: & + ierr +! +! local variables in this routine +! + + integer i,k,kk + real einc,pef,pefb,prezk,zkbc + real, dimension (its:ite) :: & + vshear,sdp,vws + real :: prop_c,pefc,aeroadd,alpha3,beta3,rhoc + prop_c=8. !10.386 + alpha3 = 1.9 + beta3 = -1.13 + pefc=0. + +! +!--- DETERMINE DOWNDRAFT STRENGTH IN TERMS OF WINDSHEAR +! +! */ calculate an average wind shear over the depth of the cloud +! + do i=its,itf + edt(i)=0. + vws(i)=0. + sdp(i)=0. + vshear(i)=0. + enddo + do k=1,maxens2 + do i=its,itf + edtc(i,k)=0. + enddo + enddo + do kk = kts,ktf-1 + do 62 i=its,itf + IF(ierr(i).ne.0)GO TO 62 + if (kk .le. min0(ktop(i),ktf) .and. kk .ge. kbcon(i)) then + vws(i) = vws(i)+ & + (abs((us(i,kk+1)-us(i,kk))/(z(i,kk+1)-z(i,kk))) & + + abs((vs(i,kk+1)-vs(i,kk))/(z(i,kk+1)-z(i,kk)))) * & + (p(i,kk) - p(i,kk+1)) + sdp(i) = sdp(i) + p(i,kk) - p(i,kk+1) + endif + if (kk .eq. ktf-1)vshear(i) = 1.e3 * vws(i) / sdp(i) + 62 continue + end do + do i=its,itf + IF(ierr(i).eq.0)then + pef=(1.591-.639*VSHEAR(I)+.0953*(VSHEAR(I)**2) & + -.00496*(VSHEAR(I)**3)) + if(pef.gt.0.9)pef=0.9 + if(pef.lt.0.1)pef=0.1 +! +!--- cloud base precip efficiency +! + zkbc=z(i,kbcon(i))*3.281e-3 + prezk=.02 + if(zkbc.gt.3.)then + prezk=.96729352+zkbc*(-.70034167+zkbc*(.162179896+zkbc & + *(- 1.2569798E-2+zkbc*(4.2772E-4-zkbc*5.44E-6)))) + endif + if(zkbc.gt.25)then + prezk=2.4 + endif + pefb=1./(1.+prezk) + if(pefb.gt.0.9)pefb=0.9 + if(pefb.lt.0.1)pefb=0.1 + EDT(I)=1.-.5*(pefb+pef) + if(aeroevap.gt.1)then + aeroadd=(ccnclean**beta3)*((psumh(i))**(alpha3-1)) !*1.e6 +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'edt',ccnclean,psumh(i),aeroadd +! prop_c=.9/aeroadd + prop_c=.5*(pefb+pef)/aeroadd + aeroadd=(ccn(i)**beta3)*((psum2(i))**(alpha3-1)) !*1.e6 +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'edt',ccn(i),psum2(i),aeroadd,prop_c + aeroadd=prop_c*aeroadd + pefc=aeroadd + if(pefc.gt.0.9)pefc=0.9 + if(pefc.lt.0.1)pefc=0.1 + EDT(I)=1.-pefc + if(aeroevap.eq.2)EDT(I)=1.-.25*(pefb+pef+2.*pefc) + endif + + +!--- edt here is 1-precipeff! + einc=.2*edt(i) + do k=1,maxens2 + edtc(i,k)=edt(i)+float(k-2)*einc + enddo + endif + enddo + do i=its,itf + IF(ierr(i).eq.0)then + do k=1,maxens2 + EDTC(I,K)=-EDTC(I,K)*PWAV(I)/PWEV(I) + IF(EDTC(I,K).GT.edtmax)EDTC(I,K)=edtmax + IF(EDTC(I,K).LT.edtmin)EDTC(I,K)=edtmin + enddo + endif + enddo + + END SUBROUTINE cup_dd_edt + + + SUBROUTINE cup_dd_moisture_new(ierrc,zd,hcd,hes_cup,qcd,qes_cup, & + pwd,q_cup,z_cup,dd_massentr,dd_massdetr,jmin,ierr, & + gamma_cup,pwev,bu,qrcd, & + q,he,t_cup,iloop,xl, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte ) + + IMPLICIT NONE + + integer & + ,intent (in ) :: & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte + ! cdd= detrainment function + ! q = environmental q on model levels + ! q_cup = environmental q on model cloud levels + ! qes_cup = saturation q on model cloud levels + ! hes_cup = saturation h on model cloud levels + ! hcd = h in model cloud + ! bu = buoancy term + ! zd = normalized downdraft mass flux + ! gamma_cup = gamma on model cloud levels + ! mentr_rate = entrainment rate + ! qcd = cloud q (including liquid water) after entrainment + ! qrch = saturation q in cloud + ! pwd = evaporate at that level + ! pwev = total normalized integrated evaoprate (I2) + ! entr= entrainment rate + ! + real, dimension (its:ite,kts:kte) & + ,intent (in ) :: & + zd,t_cup,hes_cup,hcd,qes_cup,q_cup,z_cup, & + dd_massentr,dd_massdetr,gamma_cup,q,he + real & + ,intent (in ) :: & + xl + integer & + ,intent (in ) :: & + iloop + integer, dimension (its:ite) & + ,intent (in ) :: & + jmin + integer, dimension (its:ite) & + ,intent (inout) :: & + ierr + real, dimension (its:ite,kts:kte) & + ,intent (out ) :: & + qcd,qrcd,pwd + real, dimension (its:ite) & + ,intent (out ) :: & + pwev,bu + character*50 :: ierrc(its:ite) +! +! local variables in this routine +! + + integer :: & + i,k,ki + real :: & + dh,dz,dqeva + + do i=its,itf + bu(i)=0. + pwev(i)=0. + enddo + do k=kts,ktf + do i=its,itf + qcd(i,k)=0. + qrcd(i,k)=0. + pwd(i,k)=0. + enddo + enddo +! +! +! + do 100 i=its,itf + IF(ierr(I).eq.0)then + k=jmin(i) + DZ=Z_cup(i,K+1)-Z_cup(i,K) + qcd(i,k)=q_cup(i,k) + DH=HCD(I,k)-HES_cup(I,K) + if(dh.lt.0)then + QRCD(I,K)=(qes_cup(i,k)+(1./XL)*(GAMMA_cup(i,k) & + /(1.+GAMMA_cup(i,k)))*DH) + else + qrcd(i,k)=qes_cup(i,k) + endif + pwd(i,jmin(i))=zd(i,jmin(i))*min(0.,qcd(i,k)-qrcd(i,k)) + qcd(i,k)=qrcd(i,k) + pwev(i)=pwev(i)+pwd(i,jmin(i)) +! + bu(i)=dz*dh + do ki=jmin(i)-1,1,-1 + DZ=Z_cup(i,Ki+1)-Z_cup(i,Ki) + qcd(i,ki)=(qcd(i,ki+1)*zd(i,ki+1) & + -.5*dd_massdetr(i,ki)*qcd(i,ki+1)+ & + dd_massentr(i,ki)*q(i,ki)) / & + (zd(i,ki+1)-.5*dd_massdetr(i,ki)+dd_massentr(i,ki)) +! write(0,*)'qcd in dd_moi = ',qcd(i,ki) + +! +!--- to be negatively buoyant, hcd should be smaller than hes! +!--- ideally, dh should be negative till dd hits ground, but that is not always +!--- the case +! + DH=HCD(I,ki)-HES_cup(I,Ki) + bu(i)=bu(i)+dz*dh + QRCD(I,Ki)=qes_cup(i,ki)+(1./XL)*(GAMMA_cup(i,ki) & + /(1.+GAMMA_cup(i,ki)))*DH + dqeva=qcd(i,ki)-qrcd(i,ki) + if(dqeva.gt.0.)then + dqeva=0. + qrcd(i,ki)=qcd(i,ki) + endif + pwd(i,ki)=zd(i,ki)*dqeva + qcd(i,ki)=qrcd(i,ki) + pwev(i)=pwev(i)+pwd(i,ki) +! if(iloop.eq.1.and.i.eq.102.and.j.eq.62)then +! print *,'in cup_dd_moi ', hcd(i,ki),HES_cup(I,Ki),dh,dqeva +! endif + enddo +! +!--- end loop over i + if(pwev(I).eq.0.and.iloop.eq.1)then +! print *,'problem with buoy in cup_dd_moisture',i + ierr(i)=7 + ierrc(i)="problem with buoy in cup_dd_moisture" + endif + if(BU(I).GE.0.and.iloop.eq.1)then +! print *,'problem with buoy in cup_dd_moisture',i + ierr(i)=7 + ierrc(i)="problem2 with buoy in cup_dd_moisture" + endif + endif +100 continue + + END SUBROUTINE cup_dd_moisture_new + + SUBROUTINE cup_env(z,qes,he,hes,t,q,p,z1, & + psur,ierr,tcrit,itest,xl,cp, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte ) + + IMPLICIT NONE + + integer & + ,intent (in ) :: & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte + ! + ! ierr error value, maybe modified in this routine + ! q = environmental mixing ratio + ! qes = environmental saturation mixing ratio + ! t = environmental temp + ! tv = environmental virtual temp + ! p = environmental pressure + ! z = environmental heights + ! he = environmental moist static energy + ! hes = environmental saturation moist static energy + ! psur = surface pressure + ! z1 = terrain elevation + ! + ! + real, dimension (its:ite,kts:kte) & + ,intent (in ) :: & + p,t,q + real, dimension (its:ite,kts:kte) & + ,intent (out ) :: & + he,hes,qes + real, dimension (its:ite,kts:kte) & + ,intent (inout) :: & + z + real, dimension (its:ite) & + ,intent (in ) :: & + psur,z1 + real & + ,intent (in ) :: & + xl,cp + integer, dimension (its:ite) & + ,intent (inout) :: & + ierr + integer & + ,intent (in ) :: & + itest +! +! local variables in this routine +! + + integer :: & + i,k,iph + real, dimension (1:2) :: AE,BE,HT + real, dimension (its:ite,kts:kte) :: tv + real :: tcrit,e,tvbar +! real, external :: satvap +! real :: satvap + + + HT(1)=XL/CP + HT(2)=2.834E6/CP + BE(1)=.622*HT(1)/.286 + AE(1)=BE(1)/273.+ALOG(610.71) + BE(2)=.622*HT(2)/.286 + AE(2)=BE(2)/273.+ALOG(610.71) +! print *, 'TCRIT = ', tcrit,its,ite + DO k=kts,ktf + do i=its,itf + if(ierr(i).eq.0)then +!Csgb - IPH is for phase, dependent on TCRIT (water or ice) + IPH=1 + IF(T(I,K).LE.TCRIT)IPH=2 +! print *, 'AE(IPH),BE(IPH) = ',AE(IPH),BE(IPH),AE(IPH)-BE(IPH),T(i,k),i,k +! E=EXP(AE(IPH)-BE(IPH)/T(I,K)) +! print *, 'P, E = ', P(I,K), E +! QES(I,K)=.622*E/(100.*P(I,K)-E) + e=satvap(t(i,k)) + qes(i,k)=0.622*e/max(1.e-8,(p(i,k)-e)) + IF(QES(I,K).LE.1.E-08)QES(I,K)=1.E-08 + IF(QES(I,K).LT.Q(I,K))QES(I,K)=Q(I,K) +! IF(Q(I,K).GT.QES(I,K))Q(I,K)=QES(I,K) + TV(I,K)=T(I,K)+.608*Q(I,K)*T(I,K) + endif + enddo + enddo +! +!--- z's are calculated with changed h's and q's and t's +!--- if itest=2 +! + if(itest.eq.1 .or. itest.eq.0)then + do i=its,itf + if(ierr(i).eq.0)then + Z(I,1)=max(0.,Z1(I))-(ALOG(P(I,1))- & + ALOG(PSUR(I)))*287.*TV(I,1)/9.81 + endif + enddo + +! --- calculate heights + DO K=kts+1,ktf + do i=its,itf + if(ierr(i).eq.0)then + TVBAR=.5*TV(I,K)+.5*TV(I,K-1) + Z(I,K)=Z(I,K-1)-(ALOG(P(I,K))- & + ALOG(P(I,K-1)))*287.*TVBAR/9.81 + endif + enddo + enddo + else if(itest.eq.2)then + do k=kts,ktf + do i=its,itf + if(ierr(i).eq.0)then + z(i,k)=(he(i,k)-1004.*t(i,k)-2.5e6*q(i,k))/9.81 + z(i,k)=max(1.e-3,z(i,k)) + endif + enddo + enddo + else if(itest.eq.-1)then + endif +! +!--- calculate moist static energy - HE +! saturated moist static energy - HES +! + DO k=kts,ktf + do i=its,itf + if(ierr(i).eq.0)then + if(itest.le.0)HE(I,K)=9.81*Z(I,K)+1004.*T(I,K)+2.5E06*Q(I,K) + HES(I,K)=9.81*Z(I,K)+1004.*T(I,K)+2.5E06*QES(I,K) + IF(HE(I,K).GE.HES(I,K))HE(I,K)=HES(I,K) + endif + enddo + enddo + + END SUBROUTINE cup_env + + + SUBROUTINE cup_env_clev(t,qes,q,he,hes,z,p,qes_cup,q_cup, & + he_cup,hes_cup,z_cup,p_cup,gamma_cup,t_cup,psur, & + ierr,z1,xl,rv,cp, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte ) + + IMPLICIT NONE + + integer & + ,intent (in ) :: & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte + ! + ! ierr error value, maybe modified in this routine + ! q = environmental mixing ratio + ! q_cup = environmental mixing ratio on cloud levels + ! qes = environmental saturation mixing ratio + ! qes_cup = environmental saturation mixing ratio on cloud levels + ! t = environmental temp + ! t_cup = environmental temp on cloud levels + ! p = environmental pressure + ! p_cup = environmental pressure on cloud levels + ! z = environmental heights + ! z_cup = environmental heights on cloud levels + ! he = environmental moist static energy + ! he_cup = environmental moist static energy on cloud levels + ! hes = environmental saturation moist static energy + ! hes_cup = environmental saturation moist static energy on cloud levels + ! gamma_cup = gamma on cloud levels + ! psur = surface pressure + ! z1 = terrain elevation + ! + ! + real, dimension (its:ite,kts:kte) & + ,intent (in ) :: & + qes,q,he,hes,z,p,t + real, dimension (its:ite,kts:kte) & + ,intent (out ) :: & + qes_cup,q_cup,he_cup,hes_cup,z_cup,p_cup,gamma_cup,t_cup + real, dimension (its:ite) & + ,intent (in ) :: & + psur,z1 + real & + ,intent (in ) :: & + xl,rv,cp + integer, dimension (its:ite) & + ,intent (inout) :: & + ierr +! +! local variables in this routine +! + + integer :: & + i,k + + + do k=kts,ktf + do i=its,itf + qes_cup(i,k)=0. + q_cup(i,k)=0. + hes_cup(i,k)=0. + he_cup(i,k)=0. + z_cup(i,k)=0. + p_cup(i,k)=0. + t_cup(i,k)=0. + gamma_cup(i,k)=0. + enddo + enddo + do k=kts+1,ktf + do i=its,itf + if(ierr(i).eq.0)then + qes_cup(i,k)=.5*(qes(i,k-1)+qes(i,k)) + q_cup(i,k)=.5*(q(i,k-1)+q(i,k)) + hes_cup(i,k)=.5*(hes(i,k-1)+hes(i,k)) + he_cup(i,k)=.5*(he(i,k-1)+he(i,k)) + if(he_cup(i,k).gt.hes_cup(i,k))he_cup(i,k)=hes_cup(i,k) + z_cup(i,k)=.5*(z(i,k-1)+z(i,k)) + p_cup(i,k)=.5*(p(i,k-1)+p(i,k)) + t_cup(i,k)=.5*(t(i,k-1)+t(i,k)) + gamma_cup(i,k)=(xl/cp)*(xl/(rv*t_cup(i,k) & + *t_cup(i,k)))*qes_cup(i,k) + endif + enddo + enddo + do i=its,itf + if(ierr(i).eq.0)then + qes_cup(i,1)=qes(i,1) + q_cup(i,1)=q(i,1) +! hes_cup(i,1)=hes(i,1) +! he_cup(i,1)=he(i,1) + hes_cup(i,1)=9.81*z1(i)+1004.*t(i,1)+2.5e6*qes(i,1) + he_cup(i,1)=9.81*z1(i)+1004.*t(i,1)+2.5e6*q(i,1) + z_cup(i,1)=.5*(z(i,1)+z1(i)) + p_cup(i,1)=.5*(p(i,1)+psur(i)) + z_cup(i,1)=z1(i) + p_cup(i,1)=psur(i) + t_cup(i,1)=t(i,1) + gamma_cup(i,1)=xl/cp*(xl/(rv*t_cup(i,1) & + *t_cup(i,1)))*qes_cup(i,1) + endif + enddo + + END SUBROUTINE cup_env_clev + + + SUBROUTINE cup_forcing_ens_3d(closure_n,xland,aa0,aa1,xaa0,mbdt,dtime,ierr,ierr2,ierr3,& + xf_ens,j,name,axx,maxens,iens,iedt,maxens2,maxens3,mconv, & + p_cup,ktop,omeg,zd,k22,zu,pr_ens,edt,kbcon, & + ensdim,icoic, & + ipr,jpr,itf,jtf,ktf, & + its,ite, jts,jte, kts,kte,ens4,ktau ) + + IMPLICIT NONE + + integer & + ,intent (in ) :: & + ipr,jpr,itf,jtf,ktf, & + its,ite, jts,jte, kts,kte,ens4,ktau + integer, intent (in ) :: & + j,ensdim,maxens,iens,iedt,maxens2,maxens3 + ! + ! ierr error value, maybe modified in this routine + ! pr_ens = precipitation ensemble + ! xf_ens = mass flux ensembles + ! massfln = downdraft mass flux ensembles used in next timestep + ! omeg = omega from large scale model + ! mconv = moisture convergence from large scale model + ! zd = downdraft normalized mass flux + ! zu = updraft normalized mass flux + ! aa0 = cloud work function without forcing effects + ! aa1 = cloud work function with forcing effects + ! xaa0 = cloud work function with cloud effects (ensemble dependent) + ! edt = epsilon + ! dir = "storm motion" + ! mbdt = arbitrary numerical parameter + ! dtime = dt over which forcing is applied + ! iact_gr_old = flag to tell where convection was active + ! kbcon = LFC of parcel from k22 + ! k22 = updraft originating level + ! icoic = flag if only want one closure (usually set to zero!) + ! name = deep or shallow convection flag + ! + real, dimension (its:ite,jts:jte,1:ensdim) & + ,intent (inout) :: & + pr_ens + real, dimension (its:ite,jts:jte,1:ensdim) & + ,intent (out ) :: & + xf_ens + real, dimension (its:ite,kts:kte) & + ,intent (in ) :: & + zd,zu,p_cup + real, dimension (its:ite,kts:kte,1:ens4) & + ,intent (in ) :: & + omeg + real, dimension (its:ite,1:maxens) & + ,intent (in ) :: & + xaa0 + real, dimension (its:ite) & + ,intent (in ) :: & + aa1,edt,xland + real, dimension (its:ite,1:ens4) & + ,intent (in ) :: & + mconv,axx + real, dimension (its:ite) & + ,intent (inout) :: & + aa0,closure_n + real, dimension (1:maxens) & + ,intent (in ) :: & + mbdt + real & + ,intent (in ) :: & + dtime + integer, dimension (its:ite) & + ,intent (in ) :: & + k22,kbcon,ktop + integer, dimension (its:ite) & + ,intent (inout) :: & + ierr,ierr2,ierr3 + integer & + ,intent (in ) :: & + icoic + character *(*), intent (in) :: & + name +! +! local variables in this routine +! + + real, dimension (1:maxens3) :: & + xff_ens3 + real, dimension (1:maxens) :: & + xk + integer :: & + i,k,nall,n,ne,nens,nens3,iresult,iresultd,iresulte,mkxcrt,kclim + parameter (mkxcrt=15) + real :: & + fens4,a1,massfld,a_ave,xff0,xff00,xxx,xomg,aclim1,aclim2,aclim3,aclim4 + real, dimension(1:mkxcrt) :: & + pcrit,acrit,acritt + + integer :: nall2,ixxx,irandom + integer, dimension (8) :: seed + real, dimension (its:ite) :: ens_adj + + + DATA PCRIT/850.,800.,750.,700.,650.,600.,550.,500.,450.,400., & + 350.,300.,250.,200.,150./ + DATA ACRIT/.0633,.0445,.0553,.0664,.075,.1082,.1521,.2216, & + .3151,.3677,.41,.5255,.7663,1.1686,1.6851/ +! GDAS DERIVED ACRIT + DATA ACRITT/.203,.515,.521,.566,.625,.665,.659,.688, & + .743,.813,.886,.947,1.138,1.377,1.896/ + +! + ens_adj=1. + seed=0 + do i=its,itf + if(ierr(i).eq.0)then + seed(1)=int(aa0(i)) + seed(2)=int(aa1(i)) + exit + endif + enddo + + nens=0 + irandom=0 + fens4=float(ens4) + +!--- LARGE SCALE FORCING +! + DO 100 i=its,itf + if(name.eq.'deeps'.and.ierr(i).gt.995)then + aa0(i)=0. + ierr(i)=0 + endif + IF(ierr(i).eq.0)then + ens_adj(i)=1. + if(ierr2(i).gt.0.and.ierr3(i).eq.0)ens_adj(i)=0. ! 2./3. + if(ierr2(i).gt.0.and.ierr3(i).gt.0)ens_adj(i)=0. +! +!--- +! + if(name.eq.'deeps')then +! + a_ave=0. + do ne=1,ens4 + a_ave=a_ave+axx(i,ne) +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'in forcing, a_ave,axx(i,ne) = ',a_ave,axx(i,ne),maxens,xland(i) + enddo + a_ave=max(0.,a_ave/fens4) + a_ave=min(a_ave,aa1(i)) + a_ave=max(0.,a_ave) + do ne=1,16 + xff_ens3(ne)=0. + enddo + xff0= (AA1(I)-AA0(I))/DTIME + xff_ens3(1)=max(0.,(AA1(I)-AA0(I))/dtime) + xff_ens3(2)=max(0.,(a_ave-AA0(I))/dtime) + +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)AA1(I),AA0(I),xff_ens3(1),xff_ens3(2),dtime + if(irandom.eq.1)then + call random_number (xxx) + ixxx=min(ens4,max(1,int(fens4*xxx+1.e-8))) + xff_ens3(3)=max(0.,(axx(i,ixxx)-AA0(I))/dtime) + call random_number (xxx) + ixxx=min(ens4,max(1,int(fens4*xxx+1.e-8))) + xff_ens3(13)=max(0.,(axx(i,ixxx)-AA0(I))/dtime) + else + xff_ens3(3)=max(0.,(AA1(I)-AA0(I))/dtime) + xff_ens3(13)=max(0.,(AA1(I)-AA0(I))/dtime) + endif +! +!--- more original Arakawa-Schubert (climatologic value of aa0) +! +! +!--- omeg is in bar/s, mconv done with omeg in Pa/s +! more like Brown (1979), or Frank-Cohen (199?) +! + xff_ens3(14)=0. + do ne=1,ens4 + xff_ens3(14)=xff_ens3(14)-omeg(i,k22(i),ne)/(fens4*9.81) + enddo + if(xff_ens3(14).lt.0.)xff_ens3(14)=0. + xff_ens3(5)=0. + do ne=1,ens4 + xff_ens3(5)=xff_ens3(5)-omeg(i,kbcon(i),ne)/(fens4*9.81) + enddo + if(xff_ens3(5).lt.0.)xff_ens3(5)=0. +! +! minimum below kbcon +! + xff_ens3(4)=-omeg(i,2,1)/9.81 + do k=2,kbcon(i)-1 + do ne=1,ens4 + xomg=-omeg(i,k,ne)/9.81 + if(xomg.lt.xff_ens3(4))xff_ens3(4)=xomg + enddo + enddo + if(xff_ens3(4).lt.0.)xff_ens3(4)=0. +! +! max below kbcon + xff_ens3(6)=-omeg(i,2,1)/9.81 + do k=2,kbcon(i)-1 + do ne=1,ens4 + xomg=-omeg(i,k,ne)/9.81 + if(xomg.gt.xff_ens3(6))xff_ens3(6)=xomg + enddo + enddo + if(xff_ens3(6).lt.0.)xff_ens3(6)=0. + xff_ens3(5)=xff_ens3(6) + xff_ens3(4)=xff_ens3(6) +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)xff_ens3(4),xff_ens3(5) +! +!--- more like Krishnamurti et al.; pick max and average values +! + xff_ens3(7)=mconv(i,1) + xff_ens3(8)=mconv(i,1) + xff_ens3(9)=mconv(i,1) + if(ens4.gt.1)then + do ne=2,ens4 + if (mconv(i,ne).gt.xff_ens3(7))xff_ens3(7)=mconv(i,ne) + enddo + do ne=2,ens4 + if (mconv(i,ne).lt.xff_ens3(8))xff_ens3(8)=mconv(i,ne) + enddo + do ne=2,ens4 + xff_ens3(9)=xff_ens3(9)+mconv(i,ne) + enddo + xff_ens3(9)=xff_ens3(9)/fens4 + endif +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)xff_ens3(7),xff_ens3(8) +! + if(irandom.eq.1)then + call random_number (xxx) + ixxx=min(ens4,max(1,int(fens4*xxx+1.e-8))) + xff_ens3(15)=mconv(i,ixxx) + else + xff_ens3(15)=mconv(i,1) + endif +! +!--- more like Fritsch Chappel or Kain Fritsch (plus triggers) +! + xff_ens3(10)=AA0(i)/(60.*20.) + xff_ens3(11)=AA0(I)/(60.*20.) + xff_ens3(16)=AA0(I)/(60.*20.) + if(irandom.eq.1)then + call random_number (xxx) + ixxx=min(ens4,max(1,int(fens4*xxx+1.e-8))) + xff_ens3(12)=AA0(I)/(60.*20.) + else + xff_ens3(12)=AA0(I)/(60.*20.) + endif +! +!--- more original Arakawa-Schubert (climatologic value of aa0) +! +!gtest + if(icoic.eq.0)then + if(xff0.lt.0.)then + xff_ens3(1)=0. + xff_ens3(2)=0. + xff_ens3(3)=0. + xff_ens3(13)=0. + xff_ens3(10)=0. + xff_ens3(11)=0. + xff_ens3(12)=0. + endif + if(xff0.lt.0 .and. xland(i).lt.0.1)then + xff_ens3(:)=0. + endif + endif + +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'xff_ens = ',i,j,ipr,jpr,xff_ens3 + + + do nens=1,maxens + XK(nens)=(XAA0(I,nens)-AA1(I))/MBDT(1) +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'xks = ',xk(nens),XAA0(I,nens),AA1(I),mbdt + if(xk(nens).le.0.and.xk(nens).gt.-1.e-2) & + xk(nens)=-1.e-2 + if(xk(nens).gt.0.and.xk(nens).lt.1.e-2) & + xk(nens)=1.e-2 + enddo +! +!--- add up all ensembles +! + do 350 ne=1,maxens +! +!--- for every xk, we have maxens3 xffs +!--- iens is from outermost ensemble (most expensive! +! +!--- iedt (maxens2 belongs to it) +!--- is from second, next outermost, not so expensive +! +!--- so, for every outermost loop, we have maxens*maxens2*3 +!--- ensembles!!! nall would be 0, if everything is on first +!--- loop index, then ne would start counting, then iedt, then iens.... +! + iresult=0 + iresultd=0 + iresulte=0 + nall=(iens-1)*maxens3*maxens*maxens2 & + +(iedt-1)*maxens*maxens3 & + +(ne-1)*maxens3 +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'maxens',ne,nall,iens,maxens3,maxens,maxens2,iedt +! +! over water, enfor!e small cap for some of the closures +! + if(maxens.gt.0 .and. xland(i).lt.0.1)then + if(ierr2(i).gt.0.or.ierr3(i).gt.0)then + xff_ens3(1) =ens_adj(i)*xff_ens3(1) + xff_ens3(2) =ens_adj(i)*xff_ens3(2) + xff_ens3(3) =ens_adj(i)*xff_ens3(3) + xff_ens3(13) =ens_adj(i)*xff_ens3(13) + xff_ens3(10) =ens_adj(i)*xff_ens3(10) + xff_ens3(11) =ens_adj(i)*xff_ens3(11) + xff_ens3(12) =ens_adj(i)*xff_ens3(12) + xff_ens3(16) =ens_adj(i)*xff_ens3(16) + xff_ens3(7) =ens_adj(i)*xff_ens3(7) + xff_ens3(8) =ens_adj(i)*xff_ens3(8) + xff_ens3(9) =ens_adj(i)*xff_ens3(9) + xff_ens3(15) =ens_adj(i)*xff_ens3(15) +! xff_ens3(7) =0. +! xff_ens3(8) =0. +! xff_ens3(9) =0. + endif + endif +! +! end water treatment +! +! +!--- check for upwind convection +! iresult=0 + massfld=0. + + IF(XK(ne).lt.0.and.xff0.gt.0.)iresultd=1 + iresulte=max(iresult,iresultd) + iresulte=1 + if(iresulte.eq.1)then +! +!--- special treatment for stability closures +! +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'xffs = ',xff_ens3(1:16) + + if(xff0.ge.0.)then + if(xff_ens3(1).gt.0)xf_ens(i,j,nall+1)=max(0.,-xff_ens3(1)/xk(ne)) + if(xff_ens3(2).gt.0)xf_ens(i,j,nall+2)=max(0.,-xff_ens3(2)/xk(ne)) + if(xff_ens3(3).gt.0)xf_ens(i,j,nall+3)=max(0.,-xff_ens3(3)/xk(ne)) + if(xff_ens3(13).gt.0)xf_ens(i,j,nall+13)=max(0.,-xff_ens3(13)/xk(ne)) +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'xf_ens(nall+1) ',i,j,nall,xf_ens(i,j,nall+1) + endif +! +!--- if iresult.eq.1, following independent of xff0 +! + xf_ens(i,j,nall+4)=max(0.,xff_ens3(4)) + xf_ens(i,j,nall+5)=max(0.,xff_ens3(5)) + xf_ens(i,j,nall+6)=max(0.,xff_ens3(6)) + xf_ens(i,j,nall+14)=max(0.,xff_ens3(14)) + a1=max(1.e-3,pr_ens(i,j,nall+7)) + xf_ens(i,j,nall+7)=max(0.,xff_ens3(7)/a1) +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'a1 = ',xff_ens3(7),a1,xf_ens(i,j,nall+7) + a1=max(1.e-3,pr_ens(i,j,nall+8)) + xf_ens(i,j,nall+8)=max(0.,xff_ens3(8)/a1) + a1=max(1.e-3,pr_ens(i,j,nall+9)) + xf_ens(i,j,nall+9)=max(0.,xff_ens3(9)/a1) + a1=max(1.e-3,pr_ens(i,j,nall+15)) + xf_ens(i,j,nall+15)=max(0.,xff_ens3(15)/a1) + if(XK(ne).lt.0.)then + xf_ens(i,j,nall+10)=max(0.,-xff_ens3(10)/xk(ne)) + xf_ens(i,j,nall+11)=max(0.,-xff_ens3(11)/xk(ne)) + xf_ens(i,j,nall+12)=max(0.,-xff_ens3(12)/xk(ne)) + xf_ens(i,j,nall+16)=max(0.,-xff_ens3(16)/xk(ne)) + endif + if(icoic.ge.1)then + closure_n(i)=0. + xf_ens(i,j,nall+1)=xf_ens(i,j,nall+icoic) + xf_ens(i,j,nall+2)=xf_ens(i,j,nall+icoic) + xf_ens(i,j,nall+3)=xf_ens(i,j,nall+icoic) + xf_ens(i,j,nall+4)=xf_ens(i,j,nall+icoic) + xf_ens(i,j,nall+5)=xf_ens(i,j,nall+icoic) + xf_ens(i,j,nall+6)=xf_ens(i,j,nall+icoic) + xf_ens(i,j,nall+7)=xf_ens(i,j,nall+icoic) + xf_ens(i,j,nall+8)=xf_ens(i,j,nall+icoic) + xf_ens(i,j,nall+9)=xf_ens(i,j,nall+icoic) + xf_ens(i,j,nall+10)=xf_ens(i,j,nall+icoic) + xf_ens(i,j,nall+11)=xf_ens(i,j,nall+icoic) + xf_ens(i,j,nall+12)=xf_ens(i,j,nall+icoic) + xf_ens(i,j,nall+13)=xf_ens(i,j,nall+icoic) + xf_ens(i,j,nall+14)=xf_ens(i,j,nall+icoic) + xf_ens(i,j,nall+15)=xf_ens(i,j,nall+icoic) + xf_ens(i,j,nall+16)=xf_ens(i,j,nall+icoic) + endif +! +! 16 is a randon pick from the oher 15 +! + if(irandom.eq.1)then + call random_number (xxx) + ixxx=min(15,max(1,int(15.*xxx+1.e-8))) + xf_ens(i,j,nall+16)=xf_ens(i,j,nall+ixxx) +! else +! xf_ens(i,j,nall+16)=xf_ens(i,j,nall+1) + endif +! +! +!--- do some more on the caps!!! ne=1 for 175, ne=2 for 100,.... +! +! do not care for caps here for closure groups 1 and 5, +! they are fine, do not turn them off here +! +!!!! NOT USED FOR "NORMAL" APPLICATION (maxens=1) +! + if(maxens.gt.1)then + if(ne.eq.2.and.ierr2(i).gt.0)then + xf_ens(i,j,nall+1) =0. + xf_ens(i,j,nall+2) =0. + xf_ens(i,j,nall+3) =0. + xf_ens(i,j,nall+4) =0. + xf_ens(i,j,nall+5) =0. + xf_ens(i,j,nall+6) =0. + xf_ens(i,j,nall+7) =0. + xf_ens(i,j,nall+8) =0. + xf_ens(i,j,nall+9) =0. + xf_ens(i,j,nall+10)=0. + xf_ens(i,j,nall+11)=0. + xf_ens(i,j,nall+12)=0. + xf_ens(i,j,nall+13)=0. + xf_ens(i,j,nall+14)=0. + xf_ens(i,j,nall+15)=0. + xf_ens(i,j,nall+16)=0. + endif + if(ne.eq.3.and.ierr3(i).gt.0)then + xf_ens(i,j,nall+1) =0. + xf_ens(i,j,nall+2) =0. + xf_ens(i,j,nall+3) =0. + xf_ens(i,j,nall+4) =0. + xf_ens(i,j,nall+5) =0. + xf_ens(i,j,nall+6) =0. + xf_ens(i,j,nall+7) =0. + xf_ens(i,j,nall+8) =0. + xf_ens(i,j,nall+9) =0. + xf_ens(i,j,nall+10)=0. + xf_ens(i,j,nall+11)=0. + xf_ens(i,j,nall+12)=0. + xf_ens(i,j,nall+13)=0. + xf_ens(i,j,nall+14)=0. + xf_ens(i,j,nall+15)=0. + xf_ens(i,j,nall+16)=0. + endif + endif + + endif + 350 continue + if(maxens.gt.1)then +! ne=1, cap=175 +! + nall=(iens-1)*maxens3*maxens*maxens2 & + +(iedt-1)*maxens*maxens3 +! ne=2, cap=100 +! + nall2=(iens-1)*maxens3*maxens*maxens2 & + +(iedt-1)*maxens*maxens3 & + +(2-1)*maxens3 + xf_ens(i,j,nall+4) = xf_ens(i,j,nall2+4) + xf_ens(i,j,nall+5) =xf_ens(i,j,nall2+5) + xf_ens(i,j,nall+6) =xf_ens(i,j,nall2+6) + xf_ens(i,j,nall+14) =xf_ens(i,j,nall2+14) + xf_ens(i,j,nall+7) =xf_ens(i,j,nall2+7) + xf_ens(i,j,nall+8) =xf_ens(i,j,nall2+8) + xf_ens(i,j,nall+9) =xf_ens(i,j,nall2+9) + xf_ens(i,j,nall+15) =xf_ens(i,j,nall2+15) + xf_ens(i,j,nall+10)=xf_ens(i,j,nall2+10) + xf_ens(i,j,nall+11)=xf_ens(i,j,nall2+11) + xf_ens(i,j,nall+12)=xf_ens(i,j,nall2+12) +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'should not be here' + endif +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'xff_ens3=',xff_ens3 + go to 100 + endif + elseif(ierr(i).ne.20.and.ierr(i).ne.0)then + do n=1,ensdim + xf_ens(i,j,n)=0. + enddo + endif +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'xff_ens3=',xff_ens3 + 100 continue + + END SUBROUTINE cup_forcing_ens_3d + + + SUBROUTINE cup_kbcon(ierrc,cap_inc,iloop,k22,kbcon,he_cup,hes_cup, & + hkb,ierr,kbmax,p_cup,cap_max, & + xl,cp,ztexec,zqexec,use_excess, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte ) + + IMPLICIT NONE +! + + ! only local wrf dimensions are need as of now in this routine + + integer & + ,intent (in ) :: & + use_excess,itf,jtf,ktf, & + its,ite, jts,jte, kts,kte + ! + ! + ! + ! ierr error value, maybe modified in this routine + ! + real, dimension (its:ite,kts:kte) & + ,intent (in ) :: & + he_cup,hes_cup,p_cup + real, dimension (its:ite) & + ,intent (in ) :: & + ztexec,zqexec,cap_max,cap_inc + real,intent (in ) :: & + xl,cp + real, dimension (its:ite) & + ,intent (inout ) :: & + hkb + integer, dimension (its:ite) & + ,intent (in ) :: & + kbmax + integer, dimension (its:ite) & + ,intent (inout) :: & + kbcon,k22,ierr + integer & + ,intent (in ) :: & + iloop + character*50 :: ierrc(its:ite) +! +! local variables in this routine +! + + integer :: & + i,k,k1,k2 + real :: & + pbcdif,plus,hetest +! +!--- DETERMINE THE LEVEL OF CONVECTIVE CLOUD BASE - KBCON +! + DO 27 i=its,itf + kbcon(i)=1 + IF(ierr(I).ne.0)GO TO 27 + KBCON(I)=K22(I)+1 + if(iloop.eq.5)KBCON(I)=K22(I) + GO TO 32 + 31 CONTINUE + KBCON(I)=KBCON(I)+1 + IF(KBCON(I).GT.KBMAX(i)+2)THEN + if(iloop.ne.4)then + ierr(i)=3 + ierrc(i)="could not find reasonable kbcon in cup_kbcon" + endif + GO TO 27 + ENDIF + 32 CONTINUE + hetest=hkb(i) ! HE_cup(I,K22(I)) + if(iloop.eq.5)then + hetest=HKB(I) +! do k=1,k22(i) +! hetest=max(hetest,he_cup(i,k)) +! enddo + endif + IF(HETEST.LT.HES_cup(I,KBCON(I)))then +! write(0,*)'htest',k22(i),kbcon(i),HETEST,-P_cup(I,KBCON(I))+P_cup(I,K22(I)) + GO TO 31 + endif + +! cloud base pressure and max moist static energy pressure +! i.e., the depth (in mb) of the layer of negative buoyancy + if(KBCON(I)-K22(I).eq.1)go to 27 + if(iloop.eq.5 .and. (KBCON(I)-K22(I)).eq.0)go to 27 + PBCDIF=-P_cup(I,KBCON(I))+P_cup(I,K22(I)) + plus=max(25.,cap_max(i)-float(iloop-1)*cap_inc(i)) + if(iloop.eq.4)plus=cap_max(i) +! +! for shallow convection, if cap_max is greater than 25, it is the pressure at pbltop + if(iloop.eq.5)plus=25. + if(iloop.eq.5.and.cap_max(i).gt.25)pbcdif=-P_cup(I,KBCON(I))+cap_max(i) + IF(PBCDIF.GT.plus)THEN +! write(0,*)'htest',k22(i),kbcon(i),plus,-P_cup(I,KBCON(I))+P_cup(I,K22(I)) + K22(I)=K22(I)+1 + KBCON(I)=K22(I)+1 + if(use_excess == 2) then + k1=max(1,k22(i)-1) + k2=max(1,min(kbcon(i)-1,k22(i)+1)) !kbcon(i)-1 + k2=k22(i)+1 + hkb(i)=sum(he_cup(i,k1:k2))/float(k2-k1+1)+(xl*zqexec(i)+cp*ztexec(i))/float(k2-k1+1) + else if(use_excess <= 1)then + hkb(i)=he_cup(i,k22(i))+float(use_excess)*(xl*zqexec(i)+cp*ztexec(i)) + endif ! excess + + if(iloop.eq.5)KBCON(I)=K22(I) + IF(KBCON(I).GT.KBMAX(i)+2)THEN + if(iloop.ne.4)then + ierr(i)=3 + ierrc(i)="could not find reasonable kbcon in cup_kbcon" + endif + GO TO 27 + ENDIF + GO TO 32 + ENDIF + 27 CONTINUE + + END SUBROUTINE cup_kbcon + + + SUBROUTINE cup_ktop(ierrc,ilo,dby,kbcon,ktop,ierr, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte ) + + IMPLICIT NONE +! +! on input +! + + ! only local wrf dimensions are need as of now in this routine + + integer & + ,intent (in ) :: & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte + ! dby = buoancy term + ! ktop = cloud top (output) + ! ilo = flag + ! ierr error value, maybe modified in this routine + ! + real, dimension (its:ite,kts:kte) & + ,intent (inout) :: & + dby + integer, dimension (its:ite) & + ,intent (in ) :: & + kbcon + integer & + ,intent (in ) :: & + ilo + integer, dimension (its:ite) & + ,intent (out ) :: & + ktop + integer, dimension (its:ite) & + ,intent (inout) :: & + ierr + character*50 :: ierrc(its:ite) +! +! local variables in this routine +! + + integer :: & + i,k +! + DO 42 i=its,itf + ktop(i)=1 + IF(ierr(I).EQ.0)then + DO 40 K=KBCON(I)+1,ktf-1 + IF(DBY(I,K).LE.0.)THEN + KTOP(I)=K-1 + GO TO 41 + ENDIF + 40 CONTINUE + if(ilo.eq.1)ierr(i)=5 + if(ilo.eq.1)ierrc(i)="problem with defining ktop" +! if(ilo.eq.2)ierr(i)=998 + GO TO 42 + 41 CONTINUE + do k=ktop(i)+1,ktf + dby(i,k)=0. + enddo + if(kbcon(i).eq.ktop(i))then + ierr(i)=55 + ierrc(i)="kbcon == ktop " + endif + endif + 42 CONTINUE + + END SUBROUTINE cup_ktop + + + SUBROUTINE cup_MAXIMI(ARRAY,KS,KE,MAXX,ierr, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte ) + + IMPLICIT NONE +! +! on input +! + + ! only local wrf dimensions are need as of now in this routine + + integer & + ,intent (in ) :: & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte + ! array input array + ! x output array with return values + ! kt output array of levels + ! ks,kend check-range + real, dimension (its:ite,kts:kte) & + ,intent (in ) :: & + array + integer, dimension (its:ite) & + ,intent (in ) :: & + ierr,ke + integer & + ,intent (in ) :: & + ks + integer, dimension (its:ite) & + ,intent (out ) :: & + maxx + real, dimension (its:ite) :: & + x + real :: & + xar + integer :: & + i,k + + DO 200 i=its,itf + MAXX(I)=KS + if(ierr(i).eq.0)then + X(I)=ARRAY(I,KS) +! + DO 100 K=KS,KE(i) + XAR=ARRAY(I,K) + IF(XAR.GE.X(I)) THEN + X(I)=XAR + MAXX(I)=K + ENDIF + 100 CONTINUE + endif + 200 CONTINUE + + END SUBROUTINE cup_MAXIMI + + + SUBROUTINE cup_minimi(ARRAY,KS,KEND,KT,ierr, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte ) + + IMPLICIT NONE +! +! on input +! + + ! only local wrf dimensions are need as of now in this routine + + integer & + ,intent (in ) :: & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte + ! array input array + ! x output array with return values + ! kt output array of levels + ! ks,kend check-range + real, dimension (its:ite,kts:kte) & + ,intent (in ) :: & + array + integer, dimension (its:ite) & + ,intent (in ) :: & + ierr,ks,kend + integer, dimension (its:ite) & + ,intent (out ) :: & + kt + real, dimension (its:ite) :: & + x + integer :: & + i,k,kstop + + DO 200 i=its,itf + KT(I)=KS(I) + if(ierr(i).eq.0)then + X(I)=ARRAY(I,KS(I)) + KSTOP=MAX(KS(I)+1,KEND(I)) +! + DO 100 K=KS(I)+1,KSTOP + IF(ARRAY(I,K).LT.X(I)) THEN + X(I)=ARRAY(I,K) + KT(I)=K + ENDIF + 100 CONTINUE + endif + 200 CONTINUE + + END SUBROUTINE cup_MINIMI + + + SUBROUTINE cup_up_aa0(aa0,z,zu,dby,GAMMA_CUP,t_cup, & + kbcon,ktop,ierr, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte ) + + IMPLICIT NONE +! +! on input +! + + ! only local wrf dimensions are need as of now in this routine + + integer & + ,intent (in ) :: & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte + ! aa0 cloud work function + ! gamma_cup = gamma on model cloud levels + ! t_cup = temperature (Kelvin) on model cloud levels + ! dby = buoancy term + ! zu= normalized updraft mass flux + ! z = heights of model levels + ! ierr error value, maybe modified in this routine + ! + real, dimension (its:ite,kts:kte) & + ,intent (in ) :: & + z,zu,gamma_cup,t_cup,dby + integer, dimension (its:ite) & + ,intent (in ) :: & + kbcon,ktop +! +! input and output +! + + + integer, dimension (its:ite) & + ,intent (inout) :: & + ierr + real, dimension (its:ite) & + ,intent (out ) :: & + aa0 +! +! local variables in this routine +! + + integer :: & + i,k + real :: & + dz,da +! + do i=its,itf + aa0(i)=0. + enddo + DO 100 k=kts+1,ktf + DO 100 i=its,itf + IF(ierr(i).ne.0)GO TO 100 + IF(K.LE.KBCON(I))GO TO 100 + IF(K.Gt.KTOP(I))GO TO 100 + DZ=Z(I,K)-Z(I,K-1) + da=zu(i,k)*DZ*(9.81/(1004.*( & + (T_cup(I,K)))))*DBY(I,K-1)/ & + (1.+GAMMA_CUP(I,K)) + IF(K.eq.KTOP(I).and.da.le.0.)go to 100 + AA0(I)=AA0(I)+da + if(aa0(i).lt.0.)aa0(i)=0. +100 continue + + END SUBROUTINE cup_up_aa0 + +!==================================================================== + SUBROUTINE g3init(RTHCUTEN,RQVCUTEN,RQCCUTEN,RQICUTEN, & + MASS_FLUX,cp,restart, & + P_QC,P_QI,P_FIRST_SCALAR, & + RTHFTEN, RQVFTEN, & + APR_GR,APR_W,APR_MC,APR_ST,APR_AS, & + APR_CAPMA,APR_CAPME,APR_CAPMI, & + cugd_tten,cugd_ttens,cugd_qvten, & + cugd_qvtens,cugd_qcten, & + allowed_to_read, & + ids, ide, jds, jde, kds, kde, & + ims, ime, jms, jme, kms, kme, & + its, ite, jts, jte, kts, kte ) +!-------------------------------------------------------------------- + IMPLICIT NONE +!-------------------------------------------------------------------- + LOGICAL , INTENT(IN) :: restart,allowed_to_read + INTEGER , INTENT(IN) :: ids, ide, jds, jde, kds, kde, & + ims, ime, jms, jme, kms, kme, & + its, ite, jts, jte, kts, kte + INTEGER , INTENT(IN) :: P_FIRST_SCALAR, P_QI, P_QC + REAL, INTENT(IN) :: cp + + REAL, DIMENSION( ims:ime , kms:kme , jms:jme ) , INTENT(OUT) :: & + CUGD_TTEN, & + CUGD_TTENS, & + CUGD_QVTEN, & + CUGD_QVTENS, & + CUGD_QCTEN + REAL, DIMENSION( ims:ime , kms:kme , jms:jme ) , INTENT(OUT) :: & + RTHCUTEN, & + RQVCUTEN, & + RQCCUTEN, & + RQICUTEN + + REAL, DIMENSION( ims:ime , kms:kme , jms:jme ) , INTENT(OUT) :: & + RTHFTEN, & + RQVFTEN + + REAL, DIMENSION( ims:ime , jms:jme ) , INTENT(OUT) :: & + APR_GR,APR_W,APR_MC,APR_ST,APR_AS, & + APR_CAPMA,APR_CAPME,APR_CAPMI, & + MASS_FLUX + + INTEGER :: i, j, k, itf, jtf, ktf + + jtf=min0(jte,jde-1) + ktf=min0(kte,kde-1) + itf=min0(ite,ide-1) + + IF(.not.restart)THEN + DO j=jts,jte + DO k=kts,kte + DO i=its,ite + RTHCUTEN(i,k,j)=0. + RQVCUTEN(i,k,j)=0. + ENDDO + ENDDO + ENDDO + DO j=jts,jte + DO k=kts,kte + DO i=its,ite + cugd_tten(i,k,j)=0. + cugd_ttens(i,k,j)=0. + cugd_qvten(i,k,j)=0. + cugd_qvtens(i,k,j)=0. + ENDDO + ENDDO + ENDDO + + DO j=jts,jtf + DO k=kts,ktf + DO i=its,itf + RTHFTEN(i,k,j)=0. + RQVFTEN(i,k,j)=0. + ENDDO + ENDDO + ENDDO + + IF (P_QC .ge. P_FIRST_SCALAR) THEN + DO j=jts,jtf + DO k=kts,ktf + DO i=its,itf + RQCCUTEN(i,k,j)=0. + cugd_qcten(i,k,j)=0. + ENDDO + ENDDO + ENDDO + ENDIF + + IF (P_QI .ge. P_FIRST_SCALAR) THEN + DO j=jts,jtf + DO k=kts,ktf + DO i=its,itf + RQICUTEN(i,k,j)=0. + ENDDO + ENDDO + ENDDO + ENDIF + + DO j=jts,jtf + DO i=its,itf + mass_flux(i,j)=0. + ENDDO + ENDDO + + DO j=jts,jtf + DO i=its,itf + APR_GR(i,j)=0. + APR_ST(i,j)=0. + APR_W(i,j)=0. + APR_MC(i,j)=0. + APR_AS(i,j)=0. + APR_CAPMA(i,j)=0. + APR_CAPME(i,j)=0. + APR_CAPMI(i,j)=0. + ENDDO + ENDDO + + ENDIF + + END SUBROUTINE g3init + + SUBROUTINE neg_check(j,subt,subq,dt,q,outq,outt,outqc,pret,its,ite,kts,kte,itf,ktf) + + INTEGER, INTENT(IN ) :: j,its,ite,kts,kte,itf,ktf + + real, dimension (its:ite,kts:kte ) , & + intent(inout ) :: & + outq,outt,outqc,subt,subq + real, dimension (its:ite,kts:kte ) , & + intent(inout ) :: & + q + real, dimension (its:ite ) , & + intent(inout ) :: & + pret + real & + ,intent (in ) :: & + dt + real :: thresh,qmem,qmemf,qmem2,qtest,qmem1 +! +! first do check on vertical heating rate +! + thresh=300.01 + do i=its,itf + qmemf=1. + qmem=0. + do k=kts,ktf + qmem=(subt(i,k)+outt(i,k))*86400. + if(qmem.gt.2.*thresh)then + qmem2=2.*thresh/qmem + qmemf=min(qmemf,qmem2) +! +! +! print *,'1',' adjusted massflux by factor ',i,j,k,qmem,qmem2,qmemf,dt + endif + if(qmem.lt.-thresh)then + qmem2=-thresh/qmem + qmemf=min(qmemf,qmem2) +! +! +! print *,'2',' adjusted massflux by factor ',i,j,k,qmem,qmem2,qmemf,dt + endif + enddo +! if(qmemf.lt.1)then +! write(0,*)'1',' adjusted massflux by factor ',i,j,qmemf +! endif + do k=kts,ktf + subq(i,k)=subq(i,k)*qmemf + subt(i,k)=subt(i,k)*qmemf + outq(i,k)=outq(i,k)*qmemf + outt(i,k)=outt(i,k)*qmemf + outqc(i,k)=outqc(i,k)*qmemf + enddo + pret(i)=pret(i)*qmemf + enddo +! +! check whether routine produces negative q's. This can happen, since +! tendencies are calculated based on forced q's. This should have no +! influence on conservation properties, it scales linear through all +! tendencies +! + thresh=1.e-10 + do i=its,itf + qmemf=1. + do k=kts,ktf-1 + qmem=subq(i,k)+outq(i,k) + if(abs(qmem).gt.0.)then + qtest=q(i,k)+(subq(i,k)+outq(i,k))*dt + if(qtest.lt.thresh)then +! +! qmem2 would be the maximum allowable tendency +! + qmem1=outq(i,k)+subq(i,k) + qmem2=(thresh-q(i,k))/dt + qmemf=min(qmemf,qmem2/qmem1) + qmemf=max(0.,qmemf) +! write(0,*)'4 adjusted tendencies ',i,k,qmem,qmem2,qmemf +! write(0,*)'4 adjusted tendencies ',i,j,k,q(i,k),qmem1,qmemf + endif + endif + enddo +! if(qmemf.lt.1.)write(0,*)'4 adjusted tendencies ',i,j,qmemf + do k=kts,ktf + subq(i,k)=subq(i,k)*qmemf + subt(i,k)=subt(i,k)*qmemf + outq(i,k)=outq(i,k)*qmemf + outt(i,k)=outt(i,k)*qmemf + outqc(i,k)=outqc(i,k)*qmemf + enddo + pret(i)=pret(i)*qmemf + enddo + + END SUBROUTINE neg_check + + + SUBROUTINE cup_output_ens_3d(xf_ens,ierr,dellat,dellaq,dellaqc, & + subt_ens,subq_ens,subt,subq,outtem,outq,outqc, & + zu,sub_mas,pre,pw,xmb,ktop, & + j,name,nx,nx2,iens,ierr2,ierr3,pr_ens, & + maxens3,ensdim, & + sig,APR_GR,APR_W,APR_MC,APR_ST,APR_AS, & + APR_CAPMA,APR_CAPME,APR_CAPMI,closure_n,xland1, & + weight_GR,weight_W,weight_MC,weight_ST,weight_AS,training, & + ipr,jpr,itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + + IMPLICIT NONE +! +! on input +! + + ! only local wrf dimensions are need as of now in this routine + + integer & + ,intent (in ) :: & + ipr,jpr,itf,jtf,ktf, & + its,ite, jts,jte, kts,kte + integer, intent (in ) :: & + j,ensdim,nx,nx2,iens,maxens3,training + ! xf_ens = ensemble mass fluxes + ! pr_ens = precipitation ensembles + ! dellat = change of temperature per unit mass flux of cloud ensemble + ! dellaq = change of q per unit mass flux of cloud ensemble + ! dellaqc = change of qc per unit mass flux of cloud ensemble + ! outtem = output temp tendency (per s) + ! outq = output q tendency (per s) + ! outqc = output qc tendency (per s) + ! pre = output precip + ! xmb = total base mass flux + ! xfac1 = correction factor + ! pw = pw -epsilon*pd (ensemble dependent) + ! ierr error value, maybe modified in this routine + ! + real, dimension (its:ite,jts:jte,1:ensdim) & + ,intent (inout) :: & + xf_ens,pr_ens +!srf ------ +! real, dimension (its:ite,jts:jte) & + real, dimension (its:ite,jts:jte) & + ,intent (inout) :: & + APR_GR,APR_W,APR_MC,APR_ST,APR_AS,APR_CAPMA, & + APR_CAPME,APR_CAPMI + real, dimension( its:ite , jts:jte ) & + ,intent(in) :: weight_gr,weight_w,weight_mc,weight_st,weight_as +!-srf--- + real, dimension (its:ite,kts:kte) & + ,intent (out ) :: & + outtem,outq,outqc,subt,subq,sub_mas + real, dimension (its:ite,kts:kte) & + ,intent (in ) :: & + zu + real, dimension (its:ite) & + ,intent (in ) :: & + sig + real, dimension (its:ite) & + ,intent (out ) :: & + pre,xmb + real, dimension (its:ite) & + ,intent (inout ) :: & + closure_n,xland1 + real, dimension (its:ite,kts:kte,1:nx) & + ,intent (in ) :: & + subt_ens,subq_ens,dellat,dellaqc,dellaq,pw + integer, dimension (its:ite) & + ,intent (in ) :: & + ktop + integer, dimension (its:ite) & + ,intent (inout) :: & + ierr,ierr2,ierr3 +! +! local variables in this routine +! + + integer :: & + i,k,n,ncount + real :: & + outtes,ddtes,dtt,dtq,dtqc,dtpw,prerate,clos_wei,xmbhelp + real :: & + dtts,dtqs + real, dimension (its:ite) :: & + xfac1,xfac2 + real, dimension (its:ite):: & + xmb_ske,xmb_ave,xmb_std,xmb_cur,xmbweight + real, dimension (its:ite):: & + pr_ske,pr_ave,pr_std,pr_cur + real, dimension (its:ite,jts:jte):: & + pr_gr,pr_w,pr_mc,pr_st,pr_as,pr_capma, & + pr_capme,pr_capmi + real, dimension (5) :: weight,wm,wm1,wm2,wm3 + real, dimension (its:ite,5) :: xmb_w + +! + character *(*), intent (in) :: & + name + +! + weight(1) = -999. !this will turn off weights + wm(1)=-999. + +! +! + DO k=kts,ktf + do i=its,itf + outtem(i,k)=0. + outq(i,k)=0. + outqc(i,k)=0. + subt(i,k)=0. + subq(i,k)=0. + sub_mas(i,k)=0. + enddo + enddo + do i=its,itf + pre(i)=0. + xmb(i)=0. + xfac1(i)=0. + xfac2(i)=0. + xmbweight(i)=1. + enddo + do i=its,itf + IF(ierr(i).eq.0)then + do n=(iens-1)*nx*nx2*maxens3+1,iens*nx*nx2*maxens3 + if(pr_ens(i,j,n).le.0.)then +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'pr_ens',n,pr_ens(i,j,n),xf_ens(i,j,n) + xf_ens(i,j,n)=0. + endif + enddo + endif + enddo +! + xmb_w=0. +! +!-- now do feedback +! + ddtes=100. + do i=its,itf + if(ierr(i).eq.0)then + k=0 + xmb_ave(i)=0. + do n=(iens-1)*nx*nx2*maxens3+1,iens*nx*nx2*maxens3 + k=k+1 + xmb_ave(i)=xmb_ave(i)+xf_ens(i,j,n) + enddo + xmb_ave(i)=xmb_ave(i)/float(k) + if(xmb_ave(i).le.0.)then + ierr(i)=13 + xmb_ave(i)=0. + endif + xmb(i)=sig(i)*xmb_ave(i) +! --- Now use proper count of how many closures were actually +! used in cup_forcing_ens (including screening of some +! closures over water) to properly normalize xmb + clos_wei=16./max(1.,closure_n(i)) + + if(xmb(i).eq.0.)then + ierr(i)=19 + endif + if(xmb(i).gt.100.)then + ierr(i)=19 + endif + xfac1(i)=xmb(i) + xfac2(i)=xmb(i) + + endif + ENDDO + DO k=kts,ktf + do i=its,itf + dtt =0. + dtts=0. + dtq =0. + dtqs=0. + dtqc=0. + dtpw=0. + IF(ierr(i).eq.0.and.k.le.ktop(i))then + do n=1,nx + dtt =dtt + dellat (i,k,n) + dtts=dtts + subt_ens(i,k,n) + dtq =dtq + dellaq (i,k,n) + dtqs=dtqs + subq_ens(i,k,n) + dtqc=dtqc + dellaqc (i,k,n) + dtpw=dtpw + pw (i,k,n) + enddo + OUTTEM(I,K)= XMB(I)* dtt /float(nx) + SUBT (I,K)= XMB(I)* dtts/float(nx) + OUTQ (I,K)= XMB(I)* dtq /float(nx) + SUBQ (I,K)= XMB(I)* dtqs/float(nx) + OUTQC (I,K)= XMB(I)* dtqc/float(nx) + PRE(I)=PRE(I)+XMB(I)*dtpw/float(nx) + sub_mas(i,k)=zu(i,k)*xmb(i) +! xf_ens(i,j,:)=sig(i)*xf_ens(i,j,:)*dtpw/float(nx) + endif + enddo + enddo + + do i=its,itf + if(ierr(i).eq.0)then + do k=(iens-1)*nx*nx2*maxens3+1,iens*nx*nx2*maxens3 + xf_ens(i,j,k)=sig(i)*xf_ens(i,j,k)*xfac1(i) + enddo + endif + ENDDO + +!srf-fix for preci + do i=its,itf + if(ierr(i).ne. 0)then + apr_w (i,j)=0.0 + apr_st(i,j)=0.0 + apr_gr(i,j)=0.0 + apr_mc(i,j)=0.0 + apr_as(i,j)=0.0 + endif + ENDDO +!srf + END SUBROUTINE cup_output_ens_3d +!------------------------------------------------------- + SUBROUTINE cup_up_moisture(name,ierr,z_cup,qc,qrc,pw,pwav, & + ccnclean,p_cup,kbcon,ktop,cd,dby,clw_all,& + t_cup,q,GAMMA_cup,zu,qes_cup,k22,qe_cup,xl, & + ZQEXEC,use_excess,ccn,rho, & + up_massentr,up_massdetr,psum,psumh, & + autoconv,aeroevap,itest,itf,jtf,ktf,j,ipr,jpr, & + its,ite, jts,jte, kts,kte ) + + IMPLICIT NONE + real, parameter :: BDISPM = 0.366 !Berry--size dispersion (maritime) + REAL, PARAMETER :: BDISPC = 0.146 !Berry--size dispersion (continental) +! +! on input +! + + ! only local wrf dimensions are need as of now in this routine + + integer & + ,intent (in ) :: & + use_excess,itest,autoconv,aeroevap,itf,jtf,ktf, & + its,ite, jts,jte,j,ipr,jpr, kts,kte + ! cd= detrainment function + ! q = environmental q on model levels + ! qe_cup = environmental q on model cloud levels + ! qes_cup = saturation q on model cloud levels + ! dby = buoancy term + ! cd= detrainment function + ! zu = normalized updraft mass flux + ! gamma_cup = gamma on model cloud levels + ! + real, dimension (its:ite,kts:kte) & + ,intent (in ) :: & + t_cup,p_cup,rho,q,zu,gamma_cup,qe_cup, & + up_massentr,up_massdetr,dby,qes_cup,z_cup,cd + real, dimension (its:ite) & + ,intent (in ) :: & + zqexec + ! entr= entrainment rate + real & + ,intent (in ) :: & + ccnclean,xl + integer, dimension (its:ite) & + ,intent (in ) :: & + kbcon,ktop,k22 +! +! input and output +! + + ! ierr error value, maybe modified in this routine + + integer, dimension (its:ite) & + ,intent (inout) :: & + ierr + character *(*), intent (in) :: & + name + ! qc = cloud q (including liquid water) after entrainment + ! qrch = saturation q in cloud + ! qrc = liquid water content in cloud after rainout + ! pw = condensate that will fall out at that level + ! pwav = totan normalized integrated condensate (I1) + ! c0 = conversion rate (cloud to rain) + + real, dimension (its:ite,kts:kte) & + ,intent (out ) :: & + qc,qrc,pw,clw_all + real, dimension (its:ite,kts:kte) :: & + qch,qrcb,pwh,clw_allh + real, dimension (its:ite) :: & + pwavh + real, dimension (its:ite) & + ,intent (out ) :: & + pwav,psum,psumh + real, dimension (its:ite) & + ,intent (in ) :: & + ccn +! +! local variables in this routine +! + + integer :: & + iounit,iprop,iall,i,k,k1,k2 + real :: & + prop_ave,qrcb_h,bdsp,dp,g,rhoc,dh,qrch,c0,dz,radius,berryc0,q1,berryc + real, dimension (kts:kte) :: & + prop_b +! + prop_b(kts:kte)=0 + iall=0 + c0=.002 + g=9.81 + bdsp=BDISPM +! +!--- no precip for small clouds +! + if(name.eq.'shallow')c0=0. + do i=its,itf + pwav(i)=0. + pwavh(i)=0. + psum(i)=0. + psumh(i)=0. + enddo + do k=kts,ktf + do i=its,itf + pw(i,k)=0. + pwh(i,k)=0. + qc(i,k)=0. + if(ierr(i).eq.0)qc(i,k)=qes_cup(i,k) + if(ierr(i).eq.0)qch(i,k)=qes_cup(i,k) + clw_all(i,k)=0. + clw_allh(i,k)=0. + qrc(i,k)=0. + qrcb(i,k)=0. + enddo + enddo + if(use_excess < 2 ) then + do i=its,itf + if(ierr(i).eq.0.)then + do k=2,kbcon(i)-1 + DZ=Z_cup(i,K)-Z_cup(i,K-1) + qc(i,k)=qe_cup(i,k22(i))+float(use_excess)*zqexec(i) + qch(i,k)=qe_cup(i,k22(i))+float(use_excess)*zqexec(i) + if(qc(i,k).gt.qes_cup(i,kbcon(i)-1))then + pw(i,k)=zu(i,k)*(qc(i,k)-qes_cup(i,kbcon(i)-1)) + qc(i,k)=qes_cup(i,kbcon(i)-1) + qch(i,k)=qes_cup(i,kbcon(i)-1) + PWAV(I)=PWAV(I)+PW(I,K) + Psum(I)=Psum(I)+pw(I,K)*dz + endif + enddo + endif + enddo + else if(use_excess == 2) then + do i=its,itf + if(ierr(i).eq.0.)then + k1=max(1,k22(i)-1) + k2=k22(i)+1 + do k=2,kbcon(i)-1 + DZ=Z_cup(i,K)-Z_cup(i,K-1) + qc (i,k)=sum(qe_cup(i,k1:k2))/float(k2-k1+1) +zqexec(i) + qch(i,k)=sum(qe_cup(i,k1:k2))/float(k2-k1+1) +zqexec(i) + if(qc(i,k).gt.qes_cup(i,kbcon(i)-1))then + pw(i,k)=zu(i,k)*(qc(i,k)-qes_cup(i,kbcon(i)-1)) + qc(i,k)=qes_cup(i,kbcon(i)-1) + qch(i,k)=qes_cup(i,kbcon(i)-1) + PWAV(I)=PWAV(I)+PW(I,K) + Psum(I)=Psum(I)+pw(I,K)*dz + endif + enddo !k + endif !ierr + enddo !i + endif ! use_excess + + DO 100 k=kts+1,ktf + DO 100 i=its,itf + IF(ierr(i).ne.0)GO TO 100 + IF(K.Lt.KBCON(I))GO TO 100 + IF(K.Gt.KTOP(I))GO TO 100 + rhoc=.5*(rho(i,k)+rho(i,k-1)) + DZ=Z_cup(i,K)-Z_cup(i,K-1) + DP=p_cup(i,K)-p_cup(i,K-1) +! +!--- saturation in cloud, this is what is allowed to be in it +! + QRCH=QES_cup(I,K)+(1./XL)*(GAMMA_cup(i,k) & + /(1.+GAMMA_cup(i,k)))*DBY(I,K) +! +!------ 1. steady state plume equation, for what could +!------ be in cloud without condensation +! +! + qc(i,k)= (qc(i,k-1)*zu(i,k-1)-.5*up_massdetr(i,k-1)* qc(i,k-1)+ & + up_massentr(i,k-1)*q(i,k-1)) / & + (zu(i,k-1)-.5*up_massdetr(i,k-1)+up_massentr(i,k-1)) + qch(i,k)= (qch(i,k-1)*zu(i,k-1)-.5*up_massdetr(i,k-1)*qch(i,k-1)+ & + up_massentr(i,k-1)*q(i,k-1)) / & + (zu(i,k-1)-.5*up_massdetr(i,k-1)+up_massentr(i,k-1)) + + if(qc(i,k).le.qrch)qc(i,k)=qrch + if(qch(i,k).le.qrch)qch(i,k)=qrch +! +!------- Total condensed water before rainout +! + clw_all(i,k)=QC(I,K)-QRCH + QRC(I,K)=(QC(I,K)-QRCH) ! /(1.+C0*DZ*zu(i,k)) + clw_allh(i,k)=QCH(I,K)-QRCH + QRCB(I,K)=(QCH(I,K)-QRCH) ! /(1.+C0*DZ*zu(i,k)) + IF(autoconv.eq.2) then + + +! +! normalized berry +! +! first calculate for average conditions, used in cup_dd_edt! +! this will also determine proportionality constant prop_b, which, if applied, +! would give the same results as c0 under these conditions +! + q1=1.e3*rhoc*qrcb(i,k) ! g/m^3 ! g[h2o]/cm^3 + berryc0=q1*q1/(60.0*(5.0 + 0.0366*CCNclean/ & + ( q1 * BDSP) ) ) !/( +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'cupm',k,rhoc,rho(i,k) +! qrcb_h=qrcb(i,k)/(1.+c0*dz) + qrcb_h=((QCH(I,K)-QRCH)*zu(i,k)-qrcb(i,k-1)*(.5*up_massdetr(i,k-1)))/ & + (zu(i,k)+.5*up_massdetr(i,k-1)+c0*dz*zu(i,k)) + prop_b(k)=c0*qrcb_h*zu(i,k)/(1.e-3*berryc0) + pwh(i,k)=1.e-3*berryc0*dz*prop_b(k) ! 2. + berryc=qrcb(i,k) + qrcb(i,k)=((QCh(I,K)-QRCH)*zu(i,k)-pwh(i,k)-qrcb(i,k-1)*(.5*up_massdetr(i,k-1)))/ & + (zu(i,k)+.5*up_massdetr(i,k-1)) +! QRCb(I,K) = qrcb(i,k) - pwh(i,k) + if(qrcb(i,k).lt.0.)then + berryc0=(qrcb(i,k-1)*(.5*up_massdetr(i,k-1))-(QCh(I,K)-QRCH)*zu(i,k))/zu(i,k)*1.e-3*dz*prop_b(k) + pwh(i,k)=zu(i,k)*1.e-3*berryc0*dz*prop_b(k) + qrcb(i,k)=0. + endif +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'cupm',zu(i,k),pwh(i,k),dz,qrch,qrcb(i,k),clw_allh(i,k) + QCh(I,K)=QRCb(I,K)+qrch + PWAVH(I)=PWAVH(I)+pwh(I,K) + Psumh(I)=Psumh(I)+clw_allh(I,K)*zu(i,k) *dz +! +! then the real berry +! + q1=1.e3*rhoc*qrc(i,k) ! g/m^3 ! g[h2o]/cm^3 + berryc0=q1*q1/(60.0*(5.0 + 0.0366*CCN(i)/ & + ( q1 * BDSP) ) ) !/( + berryc0=1.e-3*berryc0*dz*prop_b(k) ! 2. + berryc=qrc(i,k) + qrc(i,k)=((QC(I,K)-QRCH)*zu(i,k)-zu(i,k)*berryc0-qrc(i,k-1)*(.5*up_massdetr(i,k-1)))/ & + (zu(i,k)+.5*up_massdetr(i,k-1)) + if(qrc(i,k).lt.0.)then + berryc0=((QC(I,K)-QRCH)*zu(i,k)-qrc(i,k-1)*(.5*up_massdetr(i,k-1)))/zu(i,k) + qrc(i,k)=0. + endif + pw(i,k)=berryc0*zu(i,k) + QC(I,K)=QRC(I,K)+qrch +! +! if not running with berry at all, do the following +! + ELSE !c0=.002 + qrc(i,k)=((QC(I,K)-QRCH)*zu(i,k)-qrc(i,k-1)*(.5*up_massdetr(i,k-1)))/ & + (zu(i,k)+.5*up_massdetr(i,k-1)+c0*dz*zu(i,k)) + PW(i,k)=c0*dz*QRC(I,K)*zu(i,k) + if(qrc(i,k).lt.0)then + qrc(i,k)=0. + pw(i,k)=0. + endif +! +! + if(iall.eq.1)then + qrc(i,k)=0. + pw(i,k)=(QC(I,K)-QRCH)*zu(i,k) + if(pw(i,k).lt.0.)pw(i,k)=0. + endif + QC(I,K)=QRC(I,K)+qrch + endif !autoconv +! +!--- integrated normalized ondensate +! + PWAV(I)=PWAV(I)+PW(I,K) + Psum(I)=Psum(I)+clw_all(I,K)*zu(i,k) *dz + 100 CONTINUE + prop_ave=0. + iprop=0 + do k=kts,kte + prop_ave=prop_ave+prop_b(k) + if(prop_b(k).gt.0)iprop=iprop+1 + enddo + iprop=max(iprop,1) +! write(11,*)'prop_ave = ',prop_ave/float(iprop) +! print *,'pwav = ',pwav(1) + + END SUBROUTINE cup_up_moisture +!==================================================================== + SUBROUTINE gdinit(RTHCUTEN,RQVCUTEN,RQCCUTEN,RQICUTEN, & + MASS_FLUX,cp,restart, & + P_QC,P_QI,P_FIRST_SCALAR, & + RTHFTEN, RQVFTEN, & + APR_GR,APR_W,APR_MC,APR_ST,APR_AS, & + APR_CAPMA,APR_CAPME,APR_CAPMI, & + allowed_to_read, & + ids, ide, jds, jde, kds, kde, & + ims, ime, jms, jme, kms, kme, & + its, ite, jts, jte, kts, kte ) +!-------------------------------------------------------------------- + IMPLICIT NONE +!-------------------------------------------------------------------- + LOGICAL , INTENT(IN) :: restart,allowed_to_read + INTEGER , INTENT(IN) :: ids, ide, jds, jde, kds, kde, & + ims, ime, jms, jme, kms, kme, & + its, ite, jts, jte, kts, kte + INTEGER , INTENT(IN) :: P_FIRST_SCALAR, P_QI, P_QC + REAL, INTENT(IN) :: cp + + REAL, DIMENSION( ims:ime , kms:kme , jms:jme ) , INTENT(OUT) :: & + RTHCUTEN, & + RQVCUTEN, & + RQCCUTEN, & + RQICUTEN + + REAL, DIMENSION( ims:ime , kms:kme , jms:jme ) , INTENT(OUT) :: & + RTHFTEN, & + RQVFTEN + + REAL, DIMENSION( ims:ime , jms:jme ) , INTENT(OUT) :: & + APR_GR,APR_W,APR_MC,APR_ST,APR_AS, & + APR_CAPMA,APR_CAPME,APR_CAPMI, & + MASS_FLUX + + IF(.not.restart)THEN + RTHCUTEN=0. + RQVCUTEN=0. + RTHFTEN=0. + RQVFTEN=0. + + IF (P_QC .ge. P_FIRST_SCALAR) THEN + RQCCUTEN=0. + ENDIF + + IF (P_QI .ge. P_FIRST_SCALAR) THEN + RQICUTEN=0. + ENDIF + + mass_flux=0. + + ENDIF + APR_GR=0. + APR_ST=0. + APR_W=0. + APR_MC=0. + APR_AS=0. + APR_CAPMA=0. + APR_CAPME=0. + APR_CAPMI=0. + + END SUBROUTINE gdinit + + +!-------------------------------------------------------------------- + + real function satvap(temp2) + implicit none + real :: temp2, temp, toot, toto, eilog, tsot, & + & ewlog, ewlog2, ewlog3, ewlog4 + temp = temp2-273.155 + if (temp.lt.-20.) then !!!! ice saturation + toot = 273.16 / temp2 + toto = 1 / toot + eilog = -9.09718 * (toot - 1) - 3.56654 * (log(toot) / & + & log(10.)) + .876793 * (1 - toto) + (log(6.1071) / log(10.)) + satvap = 10 ** eilog + else + tsot = 373.16 / temp2 + ewlog = -7.90298 * (tsot - 1) + 5.02808 * & + & (log(tsot) / log(10.)) + ewlog2 = ewlog - 1.3816e-07 * & + & (10 ** (11.344 * (1 - (1 / tsot))) - 1) + ewlog3 = ewlog2 + .0081328 * & + & (10 ** (-3.49149 * (tsot - 1)) - 1) + ewlog4 = ewlog3 + (log(1013.246) / log(10.)) + satvap = 10 ** ewlog4 + end if + return + end function +#if defined(mpas) + SUBROUTINE CUP_gf_sh(xmb_out,zo,OUTQC,J,AAEQ,T,Q,Z1, & + TN,QO,PO,PRE,P,OUTT,OUTQ,DTIME,ktau,PSUR,US,VS, & + TCRIT, & + ztexec,zqexec,ccn,ccnclean,rho,dxCell,areaCell,dhdt, & + kpbl,kbcon,ktop,cupclws,k22, & !-lxz + xland,gsw,tscl_kf, & + xl,rv,cp,g,ichoice,ipr,jpr,ierr,ierrc, & + autoconv,itf,jtf,ktf, & + use_excess,its,ite, jts,jte, kts,kte & + ) +#else + SUBROUTINE CUP_gf_sh(xmb_out,zo,OUTQC,J,AAEQ,T,Q,Z1, & + TN,QO,PO,PRE,P,OUTT,OUTQ,DTIME,ktau,PSUR,US,VS, & + TCRIT, & + ztexec,zqexec,ccn,ccnclean,rho,dx,dhdt, & + kpbl,kbcon,ktop,cupclws,k22, & !-lxz + xland,gsw,tscl_kf, & + xl,rv,cp,g,ichoice,ipr,jpr,ierr,ierrc, & + autoconv,itf,jtf,ktf, & + use_excess,its,ite, jts,jte, kts,kte & + ) +#endif + + IMPLICIT NONE + + integer & + ,intent (in ) :: & + autoconv,itf,jtf,ktf,ktau,use_excess, & + its,ite, jts,jte, kts,kte,ipr,jpr + integer, intent (in ) :: & + j,ichoice + ! + ! + ! + real, dimension (its:ite,jts:jte) & + ,intent (in ) :: & + gsw + ! outtem = output temp tendency (per s) + ! outq = output q tendency (per s) + ! outqc = output qc tendency (per s) + ! pre = output precip + real, dimension (its:ite,kts:kte) & + ,intent (inout ) :: & + cupclws,OUTT,OUTQ,OUTQC + real, dimension (its:ite) & + ,intent (out ) :: & + pre,xmb_out + integer, dimension (its:ite) & + ,intent (out ) :: & + kbcon,ktop,k22 + integer, dimension (its:ite) & + ,intent (in ) :: & + kpbl + ! + ! basic environmental input includes moisture convergence (mconv) + ! omega (omeg), windspeed (us,vs), and a flag (aaeq) to turn off + ! convection for this call only and at that particular gridpoint + ! + real, dimension (its:ite,kts:kte) & + ,intent (in ) :: & + rho,T,PO,P,US,VS,tn,dhdt + real, dimension (its:ite,kts:kte) & + ,intent (inout) :: & + Q,QO + real, dimension (its:ite) & + ,intent (in ) :: & + ztexec,zqexec,ccn,Z1,PSUR,AAEQ,xland + +#if defined(mpas) + real,intent(in):: ccnclean,dtime,tcrit,xl,cp,rv,g + real,intent(in),dimension(its:ite):: tscl_kf,dxCell,areaCell + real:: xff_max +#else + real & + ,intent (in ) :: & + tscl_kf,dx,ccnclean,dtime,tcrit,xl,cp,rv,g +#endif + +! +! +!***************** the following are your basic environmental +! variables. They carry a "_cup" if they are +! on model cloud levels (staggered). They carry +! an "o"-ending (z becomes zo), if they are the forced +! variables. They are preceded by x (z becomes xz) +! to indicate modification by some typ of cloud +! + ! z = heights of model levels + ! q = environmental mixing ratio + ! qes = environmental saturation mixing ratio + ! t = environmental temp + ! p = environmental pressure + ! he = environmental moist static energy + ! hes = environmental saturation moist static energy + ! z_cup = heights of model cloud levels + ! q_cup = environmental q on model cloud levels + ! qes_cup = saturation q on model cloud levels + ! t_cup = temperature (Kelvin) on model cloud levels + ! p_cup = environmental pressure + ! he_cup = moist static energy on model cloud levels + ! hes_cup = saturation moist static energy on model cloud levels + ! gamma_cup = gamma on model cloud levels +! +! + ! hcd = moist static energy in downdraft + ! zd normalized downdraft mass flux + ! dby = buoancy term + ! entr = entrainment rate + ! zd = downdraft normalized mass flux + ! entr= entrainment rate + ! hcd = h in model cloud + ! bu = buoancy term + ! zd = normalized downdraft mass flux + ! gamma_cup = gamma on model cloud levels + ! qcd = cloud q (including liquid water) after entrainment + ! qrch = saturation q in cloud + ! pwd = evaporate at that level + ! pwev = total normalized integrated evaoprate (I2) + ! entr= entrainment rate + ! z1 = terrain elevation + ! entr = downdraft entrainment rate + ! jmin = downdraft originating level + ! kdet = level above ground where downdraft start detraining + ! psur = surface pressure + ! z1 = terrain elevation + ! pr_ens = precipitation ensemble + ! xf_ens = mass flux ensembles + ! massfln = downdraft mass flux ensembles used in next timestep + ! omeg = omega from large scale model + ! mconv = moisture convergence from large scale model + ! zd = downdraft normalized mass flux + ! zu = updraft normalized mass flux + ! dir = "storm motion" + ! mbdt = arbitrary numerical parameter + ! dtime = dt over which forcing is applied + ! iact_gr_old = flag to tell where convection was active + ! kbcon = LFC of parcel from k22 + ! k22 = updraft originating level + ! icoic = flag if only want one closure (usually set to zero!) + ! dby = buoancy term + ! ktop = cloud top (output) + ! xmb = total base mass flux + ! hc = cloud moist static energy + ! hkb = moist static energy at originating level + + real, dimension (its:ite,kts:kte) :: & + entr_rate_2d,mentrd_rate_2d,he,hes,qes,z, & + heo,heso,qeso,zo, & + xhe,xhes,xqes,xz,xt,xq, & + + qes_cup,q_cup,he_cup,hes_cup,z_cup,p_cup,gamma_cup,t_cup, & + qeso_cup,qo_cup,heo_cup,heso_cup,zo_cup,po_cup,gammao_cup, & + tn_cup, & + xqes_cup,xq_cup,xhe_cup,xhes_cup,xz_cup,xp_cup,xgamma_cup, & + xt_cup, & + + xlamue,dby,qc,qrcd,pwd,pw,hcd,qcd,dbyd,hc,qrc,zu,zd,clw_all, & + dbyo,qco,qrcdo,pwdo,pwo,hcdo,qcdo,dbydo,hco,qrco,zuo,zdo, & + xdby,xqc,xqrcd,xpwd,xpw,xhcd,xqcd,xhc,xqrc,xzu,xzd, & + + ! cd = detrainment function for updraft + ! cdd = detrainment function for downdraft + ! dellat = change of temperature per unit mass flux of cloud ensemble + ! dellaq = change of q per unit mass flux of cloud ensemble + ! dellaqc = change of qc per unit mass flux of cloud ensemble + + cd,cdd,DELLAH,DELLAQ,DELLAT,DELLAQC,dsubt,dsubh,dsubq,subt,subq + + ! aa0 cloud work function for downdraft + ! edt = epsilon + ! aa0 = cloud work function without forcing effects + ! aa1 = cloud work function with forcing effects + ! xaa0 = cloud work function with cloud effects (ensemble dependent) + ! edt = epsilon + + real, dimension (its:ite) :: & + edt,edto,edtx,AA1,AA0,XAA0,HKB, & + HKBO,XHKB,QKB,QKBO, & + xmbmax,XMB,XPWAV,XPWEV,PWAV,PWEV,PWAVO, & + PWEVO,BU,BUD,BUO,cap_max,xland1, & + cap_max_increment,closure_n,psum,psumh,sig,zuhe + integer, dimension (its:ite) :: & + kzdown,KDET,KB,JMIN,kstabi,kstabm,K22x, & !-lxz + KBCONx,KBx,KTOPx,ierr,ierr2,ierr3,KBMAX + + integer :: & + nall,iedt,nens,nens3,ki,I,K,KK,iresult + real :: & + day,dz,dzo,mbdt,entr_rate,radius,entrd_rate,mentrd_rate, & + zcutdown,edtmax,edtmin,depth_min,zkbmax,z_detr,zktop, & + massfld,dh,cap_maxs,trash,frh,xlamdd,fsum + + real detdo1,detdo2,entdo,dp,subin,detdo,entup, & + detup,subdown,entdoj,entupk,detupk,totmas + real :: power_entr,zustart,zufinal,dzm1,dzp1 + + + integer :: tun_lim,jprnt,k1,k2,kbegzu,kfinalzu,kstart,jmini,levadj + logical :: keep_going + real xff_shal(9),blqe,xkshal + character*50 :: ierrc(its:ite) + real, dimension (its:ite,kts:kte) :: & + up_massentr,up_massdetr,dd_massentr,dd_massdetr & + ,up_massentro,up_massdetro,dd_massentro,dd_massdetro + real, dimension (kts:kte) :: smth + zustart=.1 + zufinal=1. + levadj=4 + power_entr=2. + day=86400. + do i=its,itf + xmb_out(i)=0. + xland1(i)=1. + if(xland(i).gt.1.5)xland1(i)=0. + cap_max_increment(i)=25. + ierrc(i)=" " + enddo +! +!--- initial entrainment rate (these may be changed later on in the +!--- program +! + entr_rate =.2/200. + tun_lim=7 + +! +!--- initial detrainmentrates +! + do k=kts,ktf + do i=its,itf + up_massentro(i,k)=0. + up_massdetro(i,k)=0. + z(i,k)=zo(i,k) + xz(i,k)=zo(i,k) + qrco(i,k)=0. + cd(i,k)=1.*entr_rate + dellaqc(i,k)=0. + enddo + enddo +! +!--- max/min allowed value for epsilon (ratio downdraft base mass flux/updraft +! +!--- minimum depth (m), clouds must have +! + depth_min=50. +! +!--- maximum depth (mb) of capping +!--- inversion (larger cap = no convection) +! + cap_maxs=25. + DO i=its,itf + kbmax(i)=1 + aa0(i)=0. + aa1(i)=0. + enddo + do i=its,itf + cap_max(i)=cap_maxs + iresult=0 + enddo +! +!--- max height(m) above ground where updraft air can originate +! + zkbmax=4000. +! +!--- calculate moist static energy, heights, qes +! + call cup_env(z,qes,he,hes,t,q,p,z1, & + psur,ierr,tcrit,-1,xl,cp, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + call cup_env(zo,qeso,heo,heso,tn,qo,po,z1, & + psur,ierr,tcrit,-1,xl,cp, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + +! +!--- environmental values on cloud levels +! + call cup_env_clev(t,qes,q,he,hes,z,p,qes_cup,q_cup,he_cup, & + hes_cup,z_cup,p_cup,gamma_cup,t_cup,psur, & + ierr,z1,xl,rv,cp, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + call cup_env_clev(tn,qeso,qo,heo,heso,zo,po,qeso_cup,qo_cup, & + heo_cup,heso_cup,zo_cup,po_cup,gammao_cup,tn_cup,psur, & + ierr,z1,xl,rv,cp, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + do i=its,itf + if(ierr(i).eq.0)then + if(aaeq(i).lt.-0.1)then + ierr(i)=20 + endif +! + do k=kts,ktf + if(zo_cup(i,k).gt.zkbmax+z1(i))then + kbmax(i)=k + go to 25 + endif + enddo + 25 continue +! + kbmax(i)=min(kbmax(i),ktf-4) + endif + enddo + +! +! +! +!------- DETERMINE LEVEL WITH HIGHEST MOIST STATIC ENERGY CONTENT - K22 +! + CALL cup_MAXIMI(HEO_CUP,3,KBMAX,K22,ierr, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + DO 36 i=its,itf + if(kpbl(i).gt.5)cap_max(i)=po_cup(i,kpbl(i)) + IF(ierr(I).eq.0.)THEN + IF(K22(I).GT.KBMAX(i))then + ierr(i)=2 + ierrc(i)="could not find k22" + endif + if(kpbl(i).gt.5)then + k22(i)=kpbl(i) + ierr(i)=0 + ierrc(i)="reset to zero becausof kpbl" + endif + else + ierrc(i)="why here? " + endif +! if(j.eq.jpr .and. i.eq.ipr)write(0,*)'initial k22 = ',k22(ipr),kpbl(i) + 36 CONTINUE +! +!--- DETERMINE THE LEVEL OF CONVECTIVE CLOUD BASE - KBCON +! + + do i=its,itf + IF(ierr(I).eq.0.)THEN + if(use_excess == 2) then + k1=max(1,k22(i)-1) + k2=k22(i)+1 + hkb(i) =sum(he_cup(i,k1:k2))/float(k2-k1+1)+xl*zqexec(i)+cp*ztexec(i) + hkbo(i)=sum(heo_cup(i,k1:k2))/float(k2-k1+1)+xl*zqexec(i)+cp*ztexec(i) + qkbo(i)=sum(qo_cup(i,k1:k2))/float(k2-k1+1)+xl*zqexec(i) +! write(0,*)sum(heo_cup(i,k1:k2))/float(k2-k1+1),heo_cup(i,k1),heo(i,k1:k2) + else if(use_excess <= 1) then + hkb(i)=he_cup(i,k22(i))+float(use_excess)*(xl*zqexec(i)+cp*ztexec(i)) + hkbo(i)=heo_cup(i,k22(i))+float(use_excess)*(xl*zqexec(i)+cp*ztexec(i)) + qkbo(i)=qo_cup(i,k22(i))+float(use_excess)*(xl*zqexec(i)) + endif ! excess + do k=1,k22(i) + hkb(i)=max(hkb(i),he_cup(i,k)) + hkbo(i)=max(hkbo(i),heo_cup(i,k)) + qkbo(i)=max(qkbo(i),qo_cup(i,k)) + enddo + endif ! ierr + enddo + call cup_kbcon(ierrc,cap_max_increment,5,k22,kbcon,heo_cup,heso_cup, & + hkbo,ierr,kbmax,po_cup,cap_max, & + xl,cp,ztexec,zqexec,use_excess, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) +! +!--- increase detrainment in stable layers +! + DO 887 i=its,itf + IF(ierr(I).eq.0.)THEN + if(kbcon(i).gt.ktf-4)then + ierr(i)=231 + go to 887 + endif + do k=kts,ktf + frh = min(qo_cup(i,k)/qeso_cup(i,k),1.) + entr_rate_2d(i,k)=entr_rate*(1.3-frh) + cd(i,k)=entr_rate_2d(i,k) + enddo + zuhe(i)=zustart + kstart=1 + frh=(zufinal-zustart)/((float(kbcon(i))**power_entr)-(float(kstart)**power_entr)) + dh=zuhe(i)-frh*(float(kstart)**power_entr) + do k=kstart,kbcon(i)-1 + dz=z_cup(i,k+1)-z_cup(i,k) + cd(i,k)=0. + entr_rate_2d(i,k)=((frh*(float((k+1))**power_entr)+dh)/zuhe(i)-1.+cd(i,k)*dz)/dz + zuhe(i)=zuhe(i)+entr_rate_2d(i,k)*dz*zuhe(i)-cd(i,k)*dz*zuhe(i) +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'entr = ',k,entr_rate_2d(i,k),dh,frh,zuhe(i),dz + enddo + frh=-(0.1-zuhe(i))/((float(kbcon(i)+tun_lim)**power_entr)-(float(kbcon(i)-1)**power_entr)) + dh=zuhe(i)+frh*(float(kbcon(i))**power_entr) + do k=kbcon(i),kbcon(i)+tun_lim + dz=z_cup(i,k+1)-z_cup(i,k) + cd(i,k)=-((-frh*(float((k+1))**power_entr)+dh)/zuhe(i)-1.-entr_rate_2d(i,k)*dz)/dz + zuhe(i)=zuhe(i)+entr_rate_2d(i,k)*dz*zuhe(i)-cd(i,k)*dz*zuhe(i) +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)'entr = ',k,entr_rate_2d(i,k),cd(i,k),zuhe(i) + enddo + do k=kbcon(i)+tun_lim+1,ktf + entr_rate_2d(i,k)=0. + cd(i,k)=0. + enddo + + + ENDIF + 887 enddo +! +! calculate mass entrainment and detrainment +! + do k=kts,ktf + do i=its,itf + hc(i,k)=0. + DBY(I,K)=0. + hco(i,k)=0. + DBYo(I,K)=0. + enddo + enddo + do i=its,itf + IF(ierr(I).eq.0.)THEN + do k=1,kbcon(i)-1 + hc(i,k)=hkb(i) + hco(i,k)=hkbo(i) + qco(i,k)=qkbo(i) + enddo + k=kbcon(i) + hc(i,k)=hkb(i) + qco(i,k)=qkbo(i) + DBY(I,Kbcon(i))=Hkb(I)-HES_cup(I,K) + hco(i,k)=hkbo(i) + DBYo(I,Kbcon(i))=Hkbo(I)-HESo_cup(I,K) + trash=QESo_cup(I,K)+(1./XL)*(GAMMAo_cup(i,k) & + /(1.+GAMMAo_cup(i,k)))*DBYo(I,K) + qrco(i,k)=max(0.,qco(i,k)-trash) + endif ! ierr + enddo +! +! + do 42 i=its,itf + if(ierr(i).eq.0)then + zu(i,1)=zustart + zuo(i,1)=zustart +! mass entrainment and detrinament is defined on model levels + do k=2,ktf-1 !kbcon(i)+4 ! ktf-1 + dz=zo_cup(i,k)-zo_cup(i,k-1) + up_massentro(i,k-1)=entr_rate_2d(i,k-1)*dz*zuo(i,k-1) + up_massdetro(i,k-1)=cd(i,k-1)*dz*zuo(i,k-1) + zuo(i,k)=zuo(i,k-1)+up_massentro(i,k-1)-up_massdetro(i,k-1) + if(zuo(i,k).lt.0.05)then + zuo(i,k)=.05 + up_massdetro(i,k-1)=zuo(i,k-1)-.05 + up_massentro(i,k-1) + cd(i,k-1)=up_massdetro(i,k-1)/dz/zuo(i,k-1) + endif + zu(i,k)=zuo(i,k) + up_massentr(i,k-1)=up_massentro(i,k-1) + up_massdetr(i,k-1)=up_massdetro(i,k-1) +! zu(i,k)=max(0.01,zu(i,k-1)+up_massentr(i,k-1)-up_massdetr(i,k-1)) + enddo + do k=kbcon(i)+1,ktf-1 + hc(i,k)=(hc(i,k-1)*zu(i,k-1)-.5*up_massdetr(i,k-1)*hc(i,k-1)+ & + up_massentr(i,k-1)*he(i,k-1)) / & + (zu(i,k-1)-.5*up_massdetr(i,k-1)+up_massentr(i,k-1)) + dby(i,k)=hc(i,k)-hes_cup(i,k) + hco(i,k)=(hco(i,k-1)*zuo(i,k-1)-.5*up_massdetro(i,k-1)*hco(i,k-1)+ & + up_massentro(i,k-1)*heo(i,k-1)) / & + (zuo(i,k-1)-.5*up_massdetro(i,k-1)+up_massentro(i,k-1)) + dbyo(i,k)=hco(i,k)-heso_cup(i,k) + enddo + do k=kbcon(i)+1,ktf + if(dbyo(i,k).lt.0)then + ktop(i)=k-1 + go to 41 + endif + enddo +41 continue + if(ktop(i).lt.kbcon(i)+1)then + ierr(i)=5 + ierrc(i)='ktop is less than kbcon+1' + go to 42 + endif + if(ktop(i).gt.ktf-2)then + ierr(i)=5 + ierrc(i)="ktop is larger than ktf-2" + go to 42 + endif + do k=kbcon(i)+1,ktop(i) + trash=QESo_cup(I,K)+(1./XL)*(GAMMAo_cup(i,k) & + /(1.+GAMMAo_cup(i,k)))*DBYo(I,K) + qco(i,k)= (qco(i,k-1)*zuo(i,k-1)-.5*up_massdetr(i,k-1)* qco(i,k-1)+ & + up_massentr(i,k-1)*qo(i,k-1)) / & + (zuo(i,k-1)-.5*up_massdetr(i,k-1)+up_massentr(i,k-1)) + qrco(i,k)=max(0.,qco(i,k)-trash) + cupclws(i,k)=qrco(i,k)*.5 + enddo + do k=ktop(i)+1,ktf + HC(i,K)=hes_cup(i,k) + HCo(i,K)=heso_cup(i,k) + DBY(I,K)=0. + DBYo(I,K)=0. + zu(i,k)=0. + zuo(i,k)=0. + cd(i,k)=0. + entr_rate_2d(i,k)=0. + up_massentr(i,k)=0. + up_massdetr(i,k)=0. + up_massentro(i,k)=0. + up_massdetro(i,k)=0. + enddo +! if(i.eq.ipr.and.j.eq.jpr)then +! write(0,*)'hcnew = ' +! do k=1,ktf +! write(0,*)k,hco(i,k),dbyo(i,k) +! enddo +! endif + endif +42 continue +! enddo +! +!--- calculate workfunctions for updrafts +! + call cup_up_aa0(aa0,z,zu,dby,GAMMA_CUP,t_cup, & + kbcon,ktop,ierr, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + call cup_up_aa0(aa1,zo,zuo,dbyo,GAMMAo_CUP,tn_cup, & + kbcon,ktop,ierr, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) + do i=its,itf + if(ierr(i).eq.0)then + if(aa1(i).eq.0.)then + ierr(i)=17 + ierrc(i)="cloud work function zero" + endif + endif + enddo +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +! +!--- change per unit mass that a model cloud would modify the environment +! +!--- 1. in bottom layer +! + do k=kts,ktf + do i=its,itf + dellah(i,k)=0. + dsubt(i,k)=0. + dsubh(i,k)=0. + dellaq(i,k)=0. + dsubq(i,k)=0. + enddo + enddo +! +!---------------------------------------------- cloud level ktop +! +!- - - - - - - - - - - - - - - - - - - - - - - - model level ktop-1 +! . . . +! . . . +! . . . +! . . . +! . . . +! . . . +! +!---------------------------------------------- cloud level k+2 +! +!- - - - - - - - - - - - - - - - - - - - - - - - model level k+1 +! +!---------------------------------------------- cloud level k+1 +! +!- - - - - - - - - - - - - - - - - - - - - - - - model level k +! +!---------------------------------------------- cloud level k +! +! . . . +! . . . +! . . . +! . . . +! . . . +! . . . +! . . . +! . . . +! . . . +! . . . +! +!---------------------------------------------- cloud level 3 +! +!- - - - - - - - - - - - - - - - - - - - - - - - model level 2 +! +!---------------------------------------------- cloud level 2 +! +!- - - - - - - - - - - - - - - - - - - - - - - - model level 1 + + do i=its,itf + if(ierr(i).eq.0)then + dp=100.*(po_cup(i,1)-po_cup(i,2)) + dsubt(i,1)=0. + dsubq(i,1)=0. + do k=kts+1,ktop(i) + subin=0. + subdown=0. +! these three are only used at or near mass detrainment and/or entrainment levels + entupk=0. + detupk=0. +! entrainment/detrainment for updraft + entup=up_massentro(i,k) + detup=up_massdetro(i,k) +! +! SPECIAL LEVELS +! + if(k.eq.ktop(i))then + detupk=zuo(i,ktop(i)) + subin=0. + subdown=0. + entup=0. + detup=0. + endif + totmas=subin-subdown+detup-entup & + -entupk+detupk+zuo(i,k+1)-zuo(i,k) +! print *,'*********************',k,totmas +! write(0,123)k,subin+zuo(i,k+1),subdown-zuo(i,k),detup,entup, & +! detdo,entdo,entupk,detupk +! write(8,*)'totmas = ',k,totmas + if(abs(totmas).gt.1.e-6)then + write(0,*)'*********************',i,j,k,totmas + print *,jmin(i),k22(i),kbcon(i),ktop(i) + write(0,123)k,subin,subdown,detup,entup, & + entupk,detupk,zuo(i,k+1),zuo(i,k) +123 formAT(1X,i2,10E12.4) +! call wrf_error_fatal ( 'totmas .gt.1.e-6' ) + endif + dp=100.*(po_cup(i,k)-po_cup(i,k+1)) + dellah(i,k)=(detup*.5*(HCo(i,K+1)+HCo(i,K)) & + -entup*heo(i,k) & + +subin*heo_cup(i,k+1) & + -subdown*heo_cup(i,k) & + +detupk*(hco(i,ktop(i))-heo_cup(i,ktop(i))) & + -entupk*heo_cup(i,k22(i)) & + )*g/dp + dellaq(i,k)=(detup*.5*(qco(i,K+1)+qco(i,K)-qrco(i,k+1)-qrco(i,k)) & + -entup*qo(i,k) & + +subin*qo_cup(i,k+1) & + -subdown*qo_cup(i,k) & + +detupk*(qco(i,ktop(i))-qrco(i,ktop(i))-qo_cup(i,ktop(i))) & + -entupk*qo_cup(i,k22(i)) & + )*g/dp + +! +! updraft subsidence only +! + if(k.lt.ktop(i))then + dsubt(i,k)=(zuo(i,k+1)*heo_cup(i,k+1) & + -zuo(i,k)*heo_cup(i,k))*g/dp + dsubq(i,k)=(zuo(i,k+1)*qo_cup(i,k+1) & + -zuo(i,k)*qo_cup(i,k))*g/dp +! if(i.eq.ipr.and.j.eq.jpr)then +! write(0,*)'dq3',k,zuo(i,k+1)*heo_cup(i,k+1),zuo(i,k)*heo_cup(i,k) +! endif + endif +! + enddo ! k + + endif + enddo +! +!-- take out cloud liquid water for detrainment +! + do k=kts,ktf-1 + do i=its,itf + dellaqc(i,k)=0. + if(ierr(i).eq.0)then + if(k.eq.ktop(i)-0)dellaqc(i,k)= & + .01*zuo(i,ktop(i))*qrco(i,ktop(i))* & + 9.81/(po_cup(i,k)-po_cup(i,k+1)) + if(k.lt.ktop(i).and.k.gt.kbcon(i))then + dz=zo_cup(i,k+1)-zo_cup(i,k) + dellaqc(i,k)=.01*9.81*up_massdetro(i,k)*.5*(qrco(i,k)+qrco(i,k+1))/ & + (po_cup(i,k)-po_cup(i,k+1)) + endif + if(dellaqc(i,k).lt.0)write(0,*)'neg della',i,j,k,ktop(i),qrco(i,k), & + qrco(i,k+1),up_massdetro(i,k),zuo(i,ktop(i)) + dellaqc(i,k)=max(0.,dellaqc(i,k)) + endif + enddo + enddo +! +!--- using dellas, calculate changed environmental profiles +! + mbdt=3.e-4 + + do k=kts,ktf + do i=its,itf + dellat(i,k)=0. + if(ierr(i).eq.0)then + dsubh(i,k)=dsubt(i,k) + dellaq(i,k)=dellaq(i,k)+dellaqc(i,k) + dellaqc(i,k)=0. + XHE(I,K)=(dsubt(i,k)+DELLAH(I,K))*MBDT+HEO(I,K) + XQ(I,K)=(dsubq(i,k)+DELLAQ(I,K))*MBDT+QO(I,K) + DELLAT(I,K)=(1./cp)*(DELLAH(I,K)-xl*DELLAQ(I,K)) + dSUBT(I,K)=(1./cp)*(dsubt(i,k)-xl*dsubq(i,k)) + XT(I,K)= (DELLAT(I,K)+dsubt(i,k))*MBDT+TN(I,K) + IF(XQ(I,K).LE.0.)XQ(I,K)=1.E-08 + ENDIF + enddo + enddo + do i=its,itf + if(ierr(i).eq.0)then + xhkb(i)=hkbo(i)+(dsubh(i,k22(i))+DELLAH(I,K22(i)))*MBDT + XHE(I,ktf)=HEO(I,ktf) + XQ(I,ktf)=QO(I,ktf) + XT(I,ktf)=TN(I,ktf) + IF(XQ(I,ktf).LE.0.)XQ(I,ktf)=1.E-08 + endif + enddo +! +!--- calculate moist static energy, heights, qes +! + call cup_env(xz,xqes,xhe,xhes,xt,xq,po,z1, & + psur,ierr,tcrit,-1,xl,cp, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) +! +!--- environmental values on cloud levels +! + call cup_env_clev(xt,xqes,xq,xhe,xhes,xz,po,xqes_cup,xq_cup, & + xhe_cup,xhes_cup,xz_cup,po_cup,gamma_cup,xt_cup,psur, & + ierr,z1,xl,rv,cp, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) +! +! +!**************************** static control +! +!--- moist static energy inside cloud +! +! do i=its,itf +! if(ierr(i).eq.0)then +! xhkb(i)=xhe(i,k22(i)) +! endif +! enddo + do k=kts,ktf + do i=its,itf + xhc(i,k)=0. + xDBY(I,K)=0. + enddo + enddo + do i=its,itf + if(ierr(i).eq.0)then +! if(use_excess == 2) then +! k1=max(1,k22(i)-1) +! k2=k22(i)+1 +! xhkb(i) =sum(xhe_cup(i,k1:k2))/float(k2-k1+1)+xl*zqexec(i)+cp*ztexec(i) +! else if(use_excess <= 1) then +! xhkb(i)=xhe_cup(i,k22(i))+float(use_excess)*(xl*zqexec(i)+cp*ztexec(i)) +! endif + + do k=1,kbcon(i)-1 + xhc(i,k)=xhkb(i) + enddo + k=kbcon(i) + xhc(i,k)=xhkb(i) + xDBY(I,Kbcon(i))=xHkb(I)-xHES_cup(I,K) + endif !ierr + enddo +! +! + do i=its,itf + if(ierr(i).eq.0)then + xzu(i,:)=zuo(i,:) + do k=kbcon(i)+1,ktop(i) + xhc(i,k)=(xhc(i,k-1)*xzu(i,k-1)-.5*up_massdetro(i,k-1)*xhc(i,k-1)+ & + up_massentro(i,k-1)*xhe(i,k-1)) / & + (xzu(i,k-1)-.5*up_massdetro(i,k-1)+up_massentro(i,k-1)) + xdby(i,k)=xhc(i,k)-xhes_cup(i,k) + enddo + do k=ktop(i)+1,ktf + xHC(i,K)=xhes_cup(i,k) + xDBY(I,K)=0. + xzu(i,k)=0. + enddo + endif + enddo + +! +!--- workfunctions for updraft +! + call cup_up_aa0(xaa0,xz,xzu,xdby,GAMMA_CUP,xt_cup, & + kbcon,ktop,ierr, & + itf,jtf,ktf, & + its,ite, jts,jte, kts,kte) +! +! now for shallow forcing +! + do i=its,itf + xmb(i)=0. + xff_shal(1:9)=0. + if(ierr(i).eq.0)then + xmbmax(i)=0.1 + xkshal=(xaa0(i)-aa1(i))/mbdt + if(xkshal.ge.0.)xkshal=+1.e6 + if(xkshal.gt.-1.e-4 .and. xkshal.lt.0.)xkshal=-1.e-4 + xff_shal(1)=max(0.,-(aa1(i)-aa0(i))/(xkshal*dtime)) + xff_shal(1)=min(xmbmax(i),xff_shal(1)) + xff_shal(2)=max(0.,-(aa1(i)-aa0(i))/(xkshal*dtime)) + xff_shal(2)=min(xmbmax(i),xff_shal(2)) + xff_shal(3)=max(0.,-(aa1(i)-aa0(i))/(xkshal*dtime)) + xff_shal(3)=min(xmbmax(i),xff_shal(3)) + if(aa1(i).le.0)then + xff_shal(1)=0. + xff_shal(2)=0. + xff_shal(3)=0. + endif + if(aa1(i)-aa0(i).le.0.)then + xff_shal(1)=0. + xff_shal(2)=0. + xff_shal(3)=0. + endif +! boundary layer QE (from Saulo Freitas) + blqe=0. + trash=0. + if(k22(i).lt.kpbl(i)+1)then + do k=1,kbcon(i)-1 + blqe=blqe+100.*dhdt(i,k)*(po_cup(i,k)-po_cup(i,k+1))/g + enddo + trash=max((hc(i,kbcon(i))-he_cup(i,kbcon(i))),1.e1) + xff_shal(7)=max(0.,blqe/trash) + xff_shal(7)=min(xmbmax(i),xff_shal(7)) + else + xff_shal(7)=0. + endif +#if defined(mpas) + xff_max=0 + if(xkshal.lt.-1.1e-04)then ! .and. & +! ((aa1(i)-aa0(i).gt.0.) .or. (xff_shal(7).gt.0)))then + xff_shal(4)=max(0.,-aa0(i)/(xkshal*tscl_kf(i))) + xff_shal(4)=min(xmbmax(i),xff_shal(4)) + xff_shal(5)=xff_shal(4) + xff_shal(6)=xff_shal(4) + else + xff_shal(4)=0. + xff_shal(5)=0. + xff_shal(6)=0. + endif +#else + if(xkshal.lt.-1.1e-04)then ! .and. & +! ((aa1(i)-aa0(i).gt.0.) .or. (xff_shal(7).gt.0)))then + xff_shal(4)=max(0.,-aa0(i)/(xkshal*tscl_KF)) + xff_shal(4)=min(xmbmax(i),xff_shal(4)) + xff_shal(5)=xff_shal(4) + xff_shal(6)=xff_shal(4) + else + xff_shal(4)=0. + xff_shal(5)=0. + xff_shal(6)=0. + endif +#endif +! write(0,888)'i0=',i,j,kpbl(i),blqe,xff_shal(7) +!888 format(a3,3(1x,i3),2e12.4) + xff_shal(8)= xff_shal(7) + xff_shal(9)= xff_shal(7) + fsum=0. + do k=1,9 + if(ichoice.gt.0)then + xmb(i)=xmb(i)+xff_shal(ichoice) + else if(ichoice.eq.0)then + xmb(i)=xmb(i)+xff_shal(k) + else if(ichoice.lt.0)then + xff_max = maxval(xff_shal(1:9)) + xmb(i)=xmb(i)+xff_max + endif + fsum=fsum+1. + enddo + xmb(i)=min(xmbmax(i),xmb(i)/fsum) +! if(i.eq.ipr.and.j.eq.jpr)write(0,*)',ierr,xffs',ierr(i),xff_shal(1:9),xmb(i),xmbmax(i) + if(xmb(i).eq.0.)ierr(i)=22 + if(xmb(i).eq.0.)ierrc(i)="22" + if(xmb(i).lt.0.)then + ierr(i)=21 + ierrc(i)="21" +! write(0,*)'neg xmb,i,j,xmb for shallow = ',i,j,k22(i),ierr(i) + endif + endif + if(ierr(i).ne.0)then + k22(i)=0 + kbcon(i)=0 + ktop(i)=0 + xmb(i)=0 + do k=kts,ktf + outt(i,k)=0. + outq(i,k)=0. + outqc(i,k)=0. + enddo + else if(ierr(i).eq.0)then +! +! got the mass flux, sanity check, first for heating rates +! + trash=0. +! kmaxx=0 + do k=2,ktop(i) + trash=max(trash,86400.*(dsubt(i,k)+dellat(i,k))*xmb(i)) + enddo + if(trash.gt.100.)then + xmb(i)=xmb(i)*100./trash + endif + trash=0. + do k=2,ktop(i) + trash=min(trash,86400.*(dsubt(i,k)+dellat(i,k))*xmb(i)) + enddo + if(trash.lt.-100.)then + xmb(i)=-xmb(i)*100./trash + endif +! +! sanity check on moisture tendencies: do not allow anything that may allow neg +! tendencies +! + do k=2,ktop(i) + trash=q(i,k)+(dsubq(i,k)+dellaq(i,k))*xmb(i)*dtime + if(trash.lt.1.e-12)then +! max allowable tendency over tendency that would lead to too small mix ratios +! + trash=(1.e-12 -q(i,k))/((dsubq(i,k)+dellaq(i,k))*dtime) + xmb(i)=(1.e-12 -q(i,k))/((dsubq(i,k)+dellaq(i,k))*dtime) + endif + enddo + xmb_out(i)=xmb(i) +! +! final tendencies +! + do k=2,ktop(i) + outt(i,k)=(dsubt(i,k)+dellat(i,k))*xmb(i) + outq(i,k)=(dsubq(i,k)+dellaq(i,k))*xmb(i) + enddo + endif + enddo +! +! done shallow +!--------------------------done------------------------------ +! + + END SUBROUTINE CUP_gf_sh +END MODULE module_cu_gf diff --git a/src/core_atmosphere/physics/physics_wrf/module_cu_ntiedtke.F b/src/core_atmosphere/physics/physics_wrf/module_cu_ntiedtke.F new file mode 100644 index 0000000000..02fa16cc8b --- /dev/null +++ b/src/core_atmosphere/physics/physics_wrf/module_cu_ntiedtke.F @@ -0,0 +1,3860 @@ +!================================================================================================================= +! copied for implementation in MPAS from WRF version 3.8.1: + +! modifications made to sourcecode: +! * used preprocessing option to replace module_model_constants with mpas_atmphys_constants; used preprocessing +! option to include the horizontal dependence of the array znu. +! Laura D. Fowler (laura@ucar.edu) / 2016-09-19. +! * added the three corrections available from module_cu_ntiedtke.F available in the WRF github repository Z(not +! in the released version WRF 3.8.1. +! Laura D. Fowler (laura@ucar.edu) / 2016-10-18. + +!================================================================================================================= +!----------------------------------------------------------------------- +! +!wrf:model_layer:physics +! +!####################tiedtke scheme######################### +! m.tiedtke e.c.m.w.f. 1989 +! j.morcrette 1992 +!-------------------------------------------- +! modifications +! C. zhang & Yuqing Wang 2011-2014 +! +! modified from IPRC IRAM - yuqing wang, university of hawaii +! & ICTP REGCM4.4 +! +! The current version is stable. There are many updates to the old Tiedtke scheme (cu_physics=6) +! update notes: +! the new Tiedtke scheme is similar to the Tiedtke scheme used in REGCM4 and ECMWF cy40r1. +! the major differences to the old Tiedtke (cu_physics=6) scheme are, +! (a) New trigger functions for deep and shallow convections (Jakob and Siebesma 2003; +! Bechtold et al. 2004, 2008, 2014). +! (b) Non-equilibrium situations are considered in the closure for deep convection +! (Bechtold et al. 2014). +! (c) New convection time scale for the deep convection closure (Bechtold et al. 2008). +! (d) New entrainment and detrainment rates for all convection types (Bechtold et al. 2008). +! (e) New formula for the conversion from cloud water/ice to rain/snow (Sundqvist 1978). +! (f) Different way to include cloud scale pressure gradients (Gregory et al. 1997; +! Wu and Yanai 1994) +! +! other refenrence: tiedtke (1989, mwr, 117, 1779-1800) +! IFS documentation - cy33r1, cy37r2, cy38r1, cy40r1 +! +!########################################################### + +module module_cu_ntiedtke + +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +#if defined(mpas) + use mpas_atmphys_constants, only: rd=>R_d, rv=>R_v, & + & cpd=>cp, alv=>xlv, als=>xls, alf=>xlf, g=>gravity +#else + use module_model_constants, only:rd=>r_d, rv=>r_v, & + & cpd=>cp, alv=>xlv, als=>xls, alf=>xlf, g +#endif + + implicit none + real,private :: rcpd,vtmpc1,tmelt, & + c1es,c2es,c3les,c3ies,c4les,c4ies,c5les,c5ies,zrg + + real,private :: r5alvcp,r5alscp,ralvdcp,ralsdcp,ralfdcp,rtwat,rtber,rtice + real,private :: entrdd,cmfcmax,cmfcmin,cmfdeps,zdnoprc,cprcon + integer,private :: momtrans + + parameter( & + rcpd=1.0/cpd, & + tmelt=273.16, & + zrg=1.0/g, & + c1es=610.78, & + c2es=c1es*rd/rv, & + c3les=17.2693882, & + c3ies=21.875, & + c4les=35.86, & + c4ies=7.66, & + c5les=c3les*(tmelt-c4les), & + c5ies=c3ies*(tmelt-c4ies), & + r5alvcp=c5les*alv*rcpd, & + r5alscp=c5ies*als*rcpd, & + ralvdcp=alv*rcpd, & + ralsdcp=als*rcpd, & + ralfdcp=alf*rcpd, & + rtwat=tmelt, & + rtber=tmelt-5., & + rtice=tmelt-23., & + vtmpc1=rv/rd-1.0 ) +! +! entrdd: average entrainment & detrainment rate for downdrafts +! ------ +! + parameter(entrdd = 2.0e-4) +! +! cmfcmax: maximum massflux value allowed for updrafts etc +! ------- +! + parameter(cmfcmax = 1.0) +! +! cmfcmin: minimum massflux value (for safety) +! ------- +! + parameter(cmfcmin = 1.e-10) +! +! cmfdeps: fractional massflux for downdrafts at lfs +! ------- +! + parameter(cmfdeps = 0.30) + +! zdnoprc: deep cloud is thicker than this height (Unit:Pa) +! + parameter(zdnoprc = 2.0e4) +! ------- +! +! cprcon: coefficient from cloud water to rain water +! + parameter(cprcon = 1.4e-3) +! ------- +! +! momtrans: momentum transport method +! ( 1 = IFS40r1 method; 2 = new method ) +! + parameter(momtrans = 2 ) +! ------- +! + logical :: isequil +! isequil: representing equilibrium and nonequilibrium convection +! ( .false. [default]; .true. [experimental]. Ref. Bechtold et al. 2014 JAS ) +! + parameter(isequil = .true. ) +! +!-------------------- +! switches for deep, mid, shallow convections, downdraft, and momentum transport +! ------------------ + logical :: lmfpen,lmfmid,lmfscv,lmfdd,lmfdudv + parameter(lmfpen=.true.,lmfmid=.true.,lmfscv=.true.,lmfdd=.true.,lmfdudv=.true.) +!-------------------- +!#################### end of variables definition########################## +!----------------------------------------------------------------------- +! +contains +!----------------------------------------------------------------------- + subroutine cu_ntiedtke( & + dt,itimestep,stepcu & + ,raincv,pratec,qfx,hfx & + ,u3d,v3d,w,t3d,qv3d,qc3d,qi3d,pi3d,rho3d & + ,qvften,thften & + ,dz8w,pcps,p8w,xland,cu_act_flag,dx & + ,ids,ide, jds,jde, kds,kde & + ,ims,ime, jms,jme, kms,kme & + ,its,ite, jts,jte, kts,kte & + ,rthcuten,rqvcuten,rqccuten,rqicuten & + ,rucuten, rvcuten & + ,f_qv ,f_qc ,f_qr ,f_qi ,f_qs & + ) +!------------------------------------------------------------------- + implicit none +!------------------------------------------------------------------- +!-- u3d 3d u-velocity interpolated to theta points (m/s) +!-- v3d 3d v-velocity interpolated to theta points (m/s) +!-- th3d 3d potential temperature (k) +!-- t3d temperature (k) +!-- qv3d 3d water vapor mixing ratio (kg/kg) +!-- qc3d 3d cloud mixing ratio (kg/kg) +!-- qi3d 3d ice mixing ratio (kg/kg) +!-- rho3d 3d air density (kg/m^3) +!-- p8w 3d hydrostatic pressure at full levels (pa) +!-- pcps 3d hydrostatic pressure at half levels (pa) +!-- pi3d 3d exner function (dimensionless) +!-- qvften 3d total advective + PBL moisture tendency (kg kg-1 s-1) +!-- thften 3d total advective + PBL + radiative temperature tendency (k s-1) +!-- rthcuten theta tendency due to +! cumulus scheme precipitation (k/s) +!-- rucuten u wind tendency due to +! cumulus scheme precipitation (k/s) +!-- rvcuten v wind tendency due to +! cumulus scheme precipitation (k/s) +!-- rqvcuten qv tendency due to +! cumulus scheme precipitation (kg/kg/s) +!-- rqccuten qc tendency due to +! cumulus scheme precipitation (kg/kg/s) +!-- rqicuten qi tendency due to +! cumulus scheme precipitation (kg/kg/s) +!-- rainc accumulated total cumulus scheme precipitation (mm) +!-- raincv cumulus scheme precipitation (mm) +!-- pratec precipitiation rate from cumulus scheme (mm/s) +!-- dz8w dz between full levels (m) +!-- qfx upward moisture flux at the surface (kg/m^2/s) +!-- hfx upward heat flux at the surface (w/m^2) +!-- dt time step (s) +!-- ids start index for i in domain +!-- ide end index for i in domain +!-- jds start index for j in domain +!-- jde end index for j in domain +!-- kds start index for k in domain +!-- kde end index for k in domain +!-- ims start index for i in memory +!-- ime end index for i in memory +!-- jms start index for j in memory +!-- jme end index for j in memory +!-- kms start index for k in memory +!-- kme end index for k in memory +!-- its start index for i in tile +!-- ite end index for i in tile +!-- jts start index for j in tile +!-- jte end index for j in tile +!-- kts start index for k in tile +!-- kte end index for k in tile +!------------------------------------------------------------------- + integer, intent(in) :: ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + its,ite, jts,jte, kts,kte, & + itimestep, & + stepcu + + real, intent(in) :: & + dt + real, dimension(ims:ime, jms:jme), intent(in) :: & + dx + + real, dimension(ims:ime, jms:jme), intent(in) :: & + xland + + real, dimension(ims:ime, jms:jme), intent(inout) :: & + raincv, pratec + + logical, dimension(ims:ime,jms:jme), intent(inout) :: & + cu_act_flag + + + real, dimension(ims:ime, kms:kme, jms:jme), intent(in) :: & + dz8w, & + pcps, & + p8w, & + pi3d, & + qc3d, & + qvften, & + thften, & + qi3d, & + qv3d, & + rho3d, & + t3d, & + u3d, & + v3d, & + w + real, dimension(ims:ime, jms:jme) :: & + qfx, & + hfx + +!--------------------------- optional vars ---------------------------- + + real, dimension(ims:ime, kms:kme, jms:jme), & + optional, intent(inout) :: & + rqccuten, & + rqicuten, & + rqvcuten, & + rthcuten, & + rucuten, & + rvcuten + +! +! flags relating to the optional tendency arrays declared above +! models that carry the optional tendencies will provdide the +! optional arguments at compile time; these flags all the model +! to determine at run-time whether a particular tracer is in +! use or not. +! + logical, optional :: & + f_qv & + ,f_qc & + ,f_qr & + ,f_qi & + ,f_qs + +!--------------------------- local vars ------------------------------ + real :: & + delt, & + rdelt + + real , dimension(its:ite) :: & + rcs, & + rn, & + evap, & + heatflux + integer , dimension(its:ite) :: slimsk + + + real , dimension(its:ite, kts:kte+1) :: & + prsi, & + ghti, & + zi + + real , dimension(its:ite, kts:kte) :: & + dot, & + prsl, & + q1, & + q2, & + q3, & + q1b, & + t1b, & + q11, & + q12, & + t1, & + u1, & + v1, & + zl, & + omg, & + ghtl + + integer, dimension(its:ite) :: & + kbot, & + ktop + + integer :: & + i, & + im, & + j, & + k, & + km, & + kp, & + kx, & + kx1 + +!-------other local variables---- + integer :: zz +!----------------------------------------------------------------------- +! +! +!*** check to see if this is a convection timestep +! + +!----------------------------------------------------------------------- + do j=jts,jte + do i=its,ite + cu_act_flag(i,j)=.true. + enddo + enddo + + im=ite-its+1 + kx=kte-kts+1 + kx1=kx+1 + delt=dt*stepcu + rdelt=1./delt + +!------------- j loop (outer) -------------------------------------------------- + + do j=jts,jte + +! --------------- compute zi and zl ----------------------------------------- + do i=its,ite + zi(i,kts)=0.0 + enddo +! + do k=kts,kte + do i=its,ite + zi(i,k+1)=zi(i,k)+dz8w(i,k,j) + enddo + enddo +! + do k=kts,kte + do i=its,ite + zl(i,k)=0.5*(zi(i,k)+zi(i,k+1)) + enddo + enddo + +! --------------- end compute zi and zl ------------------------------------- + do i=its,ite + slimsk(i)=int(abs(xland(i,j)-2.)) + enddo + + do k=kts,kte + kp=k+1 + do i=its,ite + dot(i,k)=-0.5*g*rho3d(i,k,j)*(w(i,k,j)+w(i,kp,j)) + enddo + enddo + + do k=kts,kte + zz = kte+1-k + do i=its,ite + u1(i,zz)=u3d(i,k,j) + v1(i,zz)=v3d(i,k,j) + t1(i,zz)=t3d(i,k,j) + q1(i,zz)=qv3d(i,k,j) + if(itimestep == 1) then + q1b(i,zz)=0. + t1b(i,zz)=0. + else + q1b(i,zz)=qvften(i,k,j) + t1b(i,zz)=thften(i,k,j) + endif + q2(i,zz)=qc3d(i,k,j) + q3(i,zz)=qi3d(i,k,j) + omg(i,zz)=dot(i,k) + ghtl(i,zz)=zl(i,k) + prsl(i,zz) = pcps(i,k,j) + enddo + enddo + + do k=kts,kte+1 + zz = kte+2-k + do i=its,ite + ghti(i,zz) = zi(i,k) + prsi(i,zz) = p8w(i,k,j) + enddo + enddo +! + do i=its,ite + evap(i) = qfx(i,j) + heatflux(i)= hfx(i,j) + enddo +! +!######################################################################## + call tiecnvn(u1,v1,t1,q1,q2,q3,q1b,t1b,ghtl,ghti,omg,prsl,prsi,evap,heatflux, & + rn,slimsk,im,kx,kx1,delt,dx) + + do i=its,ite + raincv(i,j)=rn(i)/stepcu + pratec(i,j)=rn(i)/(stepcu * dt) + enddo + + do k=kts,kte + zz = kte+1-k + do i=its,ite + rthcuten(i,k,j)=(t1(i,zz)-t3d(i,k,j))/pi3d(i,k,j)*rdelt + rqvcuten(i,k,j)=(q1(i,zz)-qv3d(i,k,j))*rdelt + rucuten(i,k,j) =(u1(i,zz)-u3d(i,k,j))*rdelt + rvcuten(i,k,j) =(v1(i,zz)-v3d(i,k,j))*rdelt + enddo + enddo + + if(present(rqccuten))then + if ( f_qc ) then + do k=kts,kte + zz = kte+1-k + do i=its,ite + rqccuten(i,k,j)=(q2(i,zz)-qc3d(i,k,j))*rdelt + enddo + enddo + endif + endif + + if(present(rqicuten))then + if ( f_qi ) then + do k=kts,kte + zz = kte+1-k + do i=its,ite + rqicuten(i,k,j)=(q3(i,zz)-qi3d(i,k,j))*rdelt + enddo + enddo + endif + endif + + + enddo + + end subroutine cu_ntiedtke + +!==================================================================== + subroutine ntiedtkeinit(rthcuten,rqvcuten,rqccuten,rqicuten, & + rucuten,rvcuten,rthften,rqvften, & + restart,p_qc,p_qi,p_first_scalar, & + allowed_to_read, & + ids, ide, jds, jde, kds, kde, & + ims, ime, jms, jme, kms, kme, & + its, ite, jts, jte, kts, kte) +!-------------------------------------------------------------------- + implicit none +!-------------------------------------------------------------------- + logical , intent(in) :: allowed_to_read,restart + integer , intent(in) :: ids, ide, jds, jde, kds, kde, & + ims, ime, jms, jme, kms, kme, & + its, ite, jts, jte, kts, kte + integer , intent(in) :: p_first_scalar, p_qi, p_qc + + real, dimension( ims:ime , kms:kme , jms:jme ) , intent(out) :: & + rthcuten, & + rqvcuten, & + rqccuten, & + rqicuten, & + rucuten,rvcuten,& + rthften,rqvften + + integer :: i, j, k, itf, jtf, ktf + + jtf=min0(jte,jde-1) + ktf=min0(kte,kde-1) + itf=min0(ite,ide-1) + + if(.not.restart)then + do j=jts,jtf + do k=kts,ktf + do i=its,itf + rthcuten(i,k,j)=0. + rqvcuten(i,k,j)=0. + rucuten(i,k,j)=0. + rvcuten(i,k,j)=0. + enddo + enddo + enddo + + DO j=jts,jtf + DO k=kts,ktf + DO i=its,itf + rthften(i,k,j)=0. + rqvften(i,k,j)=0. + ENDDO + ENDDO + ENDDO + + if (p_qc .ge. p_first_scalar) then + do j=jts,jtf + do k=kts,ktf + do i=its,itf + rqccuten(i,k,j)=0. + enddo + enddo + enddo + endif + + if (p_qi .ge. p_first_scalar) then + do j=jts,jtf + do k=kts,ktf + do i=its,itf + rqicuten(i,k,j)=0. + enddo + enddo + enddo + endif + endif + + end subroutine ntiedtkeinit + +!----------------------------------------------------------------- +! level 1 subroutine 'tiecnvn' +!----------------------------------------------------------------- + subroutine tiecnvn(pu,pv,pt,pqv,pqc,pqi,pqvf,ptf,poz,pzz,pomg, & + & pap,paph,evap,hfx,zprecc,lndj,lq,km,km1,dt,dx) +!----------------------------------------------------------------- +! this is the interface between the model and the mass +! flux convection module +!----------------------------------------------------------------- + implicit none +! + real pu(lq,km), pv(lq,km), pt(lq,km), pqv(lq,km) + real poz(lq,km), pomg(lq,km), evap(lq), zprecc(lq) + real pzz(lq,km1) + + real pum1(lq,km), pvm1(lq,km), ztt(lq,km), & + & ptte(lq,km), pqte(lq,km), pvom(lq,km), pvol(lq,km), & + & pverv(lq,km), pgeo(lq,km), pap(lq,km), paph(lq,km1) + real pqhfl(lq), zqq(lq,km), & + & prsfc(lq), pssfc(lq), pcte(lq,km), & + & phhfl(lq), hfx(lq), pgeoh(lq,km1) + real ztp1(lq,km), zqp1(lq,km), ztu(lq,km), zqu(lq,km), & + & zlu(lq,km), zlude(lq,km), zmfu(lq,km), zmfd(lq,km), & + & zqsat(lq,km), pqc(lq,km), pqi(lq,km), zrain(lq) + real pqvf(lq,km), ptf(lq,km) + real dx(lq) + + integer icbot(lq), ictop(lq), ktype(lq), lndj(lq) + logical locum(lq) +! + real ztmst,fliq,fice,ztc,zalf,tt + integer i,j,k,lq,km,km1 + real dt,ztpp1 + real zew,zqs,zcor +! + ztmst=dt +! +! masv flux diagnostics. +! + do j=1,lq + zrain(j)=0.0 + locum(j)=.false. + prsfc(j)=0.0 + pssfc(j)=0.0 + pqhfl(j)=evap(j) + phhfl(j)=hfx(j) + pgeoh(j,km1)=g*pzz(j,km1) + end do +! +! convert model variables for mflux scheme +! + do k=1,km + do j=1,lq + pcte(j,k)=0.0 + pvom(j,k)=0.0 + pvol(j,k)=0.0 + ztp1(j,k)=pt(j,k) + zqp1(j,k)=pqv(j,k)/(1.0+pqv(j,k)) + pum1(j,k)=pu(j,k) + pvm1(j,k)=pv(j,k) + pverv(j,k)=pomg(j,k) + pgeo(j,k)=g*poz(j,k) + pgeoh(j,k)=g*pzz(j,k) + tt=ztp1(j,k) + zew = foeewm(tt) + zqs = zew/pap(j,k) + zqs = min(0.5,zqs) + zcor = 1./(1.-vtmpc1*zqs) + zqsat(j,k)=zqs*zcor + pqte(j,k)=pqvf(j,k) + zqq(j,k) =pqte(j,k) + ptte(j,k)=ptf(j,k) + ztt(j,k) =ptte(j,k) + end do + end do +! +!----------------------------------------------------------------------- +!* 2. call 'cumastrn'(master-routine for cumulus parameterization) +! + call cumastrn & + & (lq, km, km1, km-1, ztp1, & + & zqp1, pum1, pvm1, pverv, zqsat,& + & pqhfl, ztmst, pap, paph, pgeo, & + & ptte, pqte, pvom, pvol, prsfc,& + & pssfc, locum, & + & ktype, icbot, ictop, ztu, zqu, & + & zlu, zlude, zmfu, zmfd, zrain,& + & pcte, phhfl, lndj, pgeoh, dx) +! +! to include the cloud water and cloud ice detrained from convection +! + do k=1,km + do j=1,lq + if(pcte(j,k).gt.0.) then + fliq=foealfa(ztp1(j,k)) + fice=1.0-fliq + pqc(j,k)=pqc(j,k)+fliq*pcte(j,k)*ztmst + pqi(j,k)=pqi(j,k)+fice*pcte(j,k)*ztmst + endif + end do + end do +! + do k=1,km + do j=1,lq + pt(j,k)= ztp1(j,k)+(ptte(j,k)-ztt(j,k))*ztmst + zqp1(j,k)=zqp1(j,k)+(pqte(j,k)-zqq(j,k))*ztmst + pqv(j,k)=zqp1(j,k)/(1.0-zqp1(j,k)) + end do + end do + + do j=1,lq + zprecc(j)=amax1(0.0,(prsfc(j)+pssfc(j))*ztmst) + end do + + if (lmfdudv) then + do k=1,km + do j=1,lq + pu(j,k)=pu(j,k)+pvom(j,k)*ztmst + pv(j,k)=pv(j,k)+pvol(j,k)*ztmst + end do + end do + endif +! + return + end subroutine tiecnvn + +!############################################################# +! +! level 2 subroutines +! +!############################################################# +!*********************************************************** +! subroutine cumastrn +!*********************************************************** + subroutine cumastrn & + & (klon, klev, klevp1, klevm1, pten, & + & pqen, puen, pven, pverv, pqsen,& + & pqhfl, ztmst, pap, paph, pgeo, & + & ptte, pqte, pvom, pvol, prsfc,& + & pssfc, ldcum, & + & ktype, kcbot, kctop, ptu, pqu,& + & plu, plude, pmfu, pmfd, prain,& + & pcte, phhfl, lndj, zgeoh, dx) + implicit none +! +!***cumastrn* master routine for cumulus massflux-scheme +! m.tiedtke e.c.m.w.f. 1986/1987/1989 +! modifications +! y.wang i.p.r.c 2001 +! c.zhang 2012 +!***purpose +! ------- +! this routine computes the physical tendencies of the +! prognostic variables t,q,u and v due to convective processes. +! processes considered are: convective fluxes, formation of +! precipitation, evaporation of falling rain below cloud base, +! saturated cumulus downdrafts. +!***method +! ------ +! parameterization is done using a massflux-scheme. +! (1) define constants and parameters +! (2) specify values (t,q,qs...) at half levels and +! initialize updraft- and downdraft-values in 'cuinin' +! (3) calculate cloud base in 'cutypen', calculate cloud types in cutypen, +! and specify cloud base massflux +! (4) do cloud ascent in 'cuascn' in absence of downdrafts +! (5) do downdraft calculations: +! (a) determine values at lfs in 'cudlfsn' +! (b) determine moist descent in 'cuddrafn' +! (c) recalculate cloud base massflux considering the +! effect of cu-downdrafts +! (6) do final adjusments to convective fluxes in 'cuflxn', +! do evaporation in subcloud layer +! (7) calculate increments of t and q in 'cudtdqn' +! (8) calculate increments of u and v in 'cududvn' +!***externals. +! ---------- +! cuinin: initializes values at vertical grid used in cu-parametr. +! cutypen: cloud bypes, 1: deep cumulus 2: shallow cumulus +! cuascn: cloud ascent for entraining plume +! cudlfsn: determines values at lfs for downdrafts +! cuddrafn:does moist descent for cumulus downdrafts +! cuflxn: final adjustments to convective fluxes (also in pbl) +! cudqdtn: updates tendencies for t and q +! cududvn: updates tendencies for u and v +!***switches. +! -------- +! lmfmid=.t. midlevel convection is switched on +! lmfdd=.t. cumulus downdrafts switched on +! lmfdudv=.t. cumulus friction switched on +!*** +! model parameters (defined in subroutine cuparam) +! ------------------------------------------------ +! entrdd entrainment rate for cumulus downdrafts +! cmfcmax maximum massflux value allowed for +! cmfcmin minimum massflux value (for safety) +! cmfdeps fractional massflux for downdrafts at lfs +! cprcon coefficient for conversion from cloud water to rain +!***reference. +! ---------- +! paper on massflux scheme (tiedtke,1989) +!----------------------------------------------------------------- + integer klev,klon,klevp1,klevm1 + real pten(klon,klev), pqen(klon,klev),& + & puen(klon,klev), pven(klon,klev),& + & ptte(klon,klev), pqte(klon,klev),& + & pvom(klon,klev), pvol(klon,klev),& + & pqsen(klon,klev), pgeo(klon,klev),& + & pap(klon,klev), paph(klon,klevp1),& + & pverv(klon,klev), pqhfl(klon),& + & phhfl(klon) + real ptu(klon,klev), pqu(klon,klev),& + & plu(klon,klev), plude(klon,klev),& + & pmfu(klon,klev), pmfd(klon,klev),& + & prain(klon),& + & prsfc(klon), pssfc(klon) + real ztenh(klon,klev), zqenh(klon,klev),& + & zgeoh(klon,klevp1), zqsenh(klon,klev),& + & ztd(klon,klev), zqd(klon,klev),& + & zmfus(klon,klev), zmfds(klon,klev),& + & zmfuq(klon,klev), zmfdq(klon,klev),& + & zdmfup(klon,klev), zdmfdp(klon,klev),& + & zmful(klon,klev), zrfl(klon),& + & zuu(klon,klev), zvu(klon,klev),& + & zud(klon,klev), zvd(klon,klev),& + & zlglac(klon,klev) + real pmflxr(klon,klevp1), pmflxs(klon,klevp1) + real zhcbase(klon),& + & zmfub(klon), zmfub1(klon),& + & zdhpbl(klon) + real zsfl(klon), zdpmel(klon,klev),& + & pcte(klon,klev), zcape(klon),& + & zcape1(klon), zcape2(klon),& + & ztauc(klon), ztaubl(klon),& + & zheat(klon) + real wup(klon), zdqcv(klon) + real wbase(klon), zmfuub(klon) + real upbl(klon) + real dx(klon) + real pmfude_rate(klon,klev), pmfdde_rate(klon,klev) + real zmfuus(klon,klev), zmfdus(klon,klev) + real zuv2(klon,klev),ztenu(klon,klev),ztenv(klon,klev) + real zmfuvb(klon),zsum12(klon),zsum22(klon) + integer ilab(klon,klev), idtop(klon),& + & ictop0(klon), ilwmin(klon) + integer kdpl(klon) + integer kcbot(klon), kctop(klon),& + & ktype(klon), lndj(klon) + logical ldcum(klon) + logical loddraf(klon), llo1, llo2(klon) + +! local varaiables + real zcons,zcons2,zqumqe,zdqmin,zdh,zmfmax + real zalfaw,zalv,zqalv,zc5ldcp,zc4les,zhsat,zgam,zzz,zhhat + real zpbmpt,zro,zdz,zdp,zeps,zfac,wspeed + integer jl,jk,ik + integer ikb,ikt,icum,itopm2 + real ztmst,ztau,zerate,zderate,zmfa + real zmfs(klon),pmean(klev),zlon + real zduten,zdvten,ztdis,pgf_u,pgf_v +!------------------------------------------- +! 1. specify constants and parameters +!------------------------------------------- + zcons=1./(g*ztmst) + zcons2=3./(g*ztmst) + +!-------------------------------------------------------------- +!* 2. initialize values at vertical grid points in 'cuini' +!-------------------------------------------------------------- + call cuinin & + & (klon, klev, klevp1, klevm1, pten, & + & pqen, pqsen, puen, pven, pverv,& + & pgeo, paph, zgeoh, ztenh, zqenh,& + & zqsenh, ilwmin, ptu, pqu, ztd, & + & zqd, zuu, zvu, zud, zvd, & + & pmfu, pmfd, zmfus, zmfds, zmfuq,& + & zmfdq, zdmfup, zdmfdp, zdpmel, plu, & + & plude, ilab) + +!---------------------------------- +!* 3.0 cloud base calculations +!---------------------------------- +!* (a) determine cloud base values in 'cutypen', +! and the cumulus type 1 or 2 +! ------------------------------------------- + call cutypen & + & ( klon, klev, klevp1, klevm1, pqen,& + & ztenh, zqenh, zqsenh, zgeoh, paph,& + & phhfl, pqhfl, pgeo, pqsen, pap,& + & pten, lndj, ptu, pqu, ilab,& + & ldcum, kcbot, ictop0, ktype, wbase, plu, kdpl) + +!* (b) assign the first guess mass flux at cloud base +! ------------------------------------------ + do jl=1,klon + zdhpbl(jl)=0.0 + upbl(jl) = 0.0 + idtop(jl)=0 + end do + + do jk=2,klev + do jl=1,klon + if(jk.ge.kcbot(jl) .and. ldcum(jl)) then + zdhpbl(jl)=zdhpbl(jl)+(alv*pqte(jl,jk)+cpd*ptte(jl,jk))& + & *(paph(jl,jk+1)-paph(jl,jk)) + if(lndj(jl) .eq. 0) then + wspeed = sqrt(puen(jl,jk)**2 + pven(jl,jk)**2) + upbl(jl) = upbl(jl) + wspeed*(paph(jl,jk+1)-paph(jl,jk)) + end if + end if + end do + end do + + do jl=1,klon + if(ldcum(jl)) then + ikb=kcbot(jl) + zmfmax = (paph(jl,ikb)-paph(jl,ikb-1))*zcons2 + if(ktype(jl) == 1) then + zmfub(jl)= 0.1*zmfmax + else if ( ktype(jl) == 2 ) then + zqumqe = pqu(jl,ikb) + plu(jl,ikb) - zqenh(jl,ikb) + zdqmin = max(0.01*zqenh(jl,ikb),1.e-10) + zdh = cpd*(ptu(jl,ikb)-ztenh(jl,ikb)) + alv*zqumqe + zdh = g*max(zdh,1.e5*zdqmin) + if ( zdhpbl(jl) > 0. ) then + zmfub(jl) = zdhpbl(jl)/zdh + zmfub(jl) = min(zmfub(jl),zmfmax) + else + zmfub(jl) = 0.1*zmfmax + ldcum(jl) = .false. + end if + end if + else + zmfub(jl) = 0. + end if + end do +!------------------------------------------------------ +!* 4.0 determine cloud ascent for entraining plume +!------------------------------------------------------ +!* (a) do ascent in 'cuasc'in absence of downdrafts +!---------------------------------------------------------- + call cuascn & + & (klon, klev, klevp1, klevm1, ztenh,& + & zqenh, puen, pven, pten, pqen,& + & pqsen, pgeo, zgeoh, pap, paph,& + & pqte, pverv, ilwmin, ldcum, zhcbase,& + & ktype, ilab, ptu, pqu, plu,& + & zuu, zvu, pmfu, zmfub,& + & zmfus, zmfuq, zmful, plude, zdmfup,& + & kcbot, kctop, ictop0, icum, ztmst,& + & zqsenh, zlglac, lndj, wup, wbase, kdpl, pmfude_rate ) + +!* (b) check cloud depth and change entrainment rate accordingly +! calculate precipitation rate (for downdraft calculation) +!------------------------------------------------------------------ + do jl=1,klon + if ( ldcum(jl) ) then + ikb = kcbot(jl) + itopm2 = kctop(jl) + zpbmpt = paph(jl,ikb) - paph(jl,itopm2) + if ( ktype(jl) == 1 .and. zpbmpt < zdnoprc ) ktype(jl) = 2 + if ( ktype(jl) == 2 .and. zpbmpt >= zdnoprc ) ktype(jl) = 1 + ictop0(jl) = kctop(jl) + end if + zrfl(jl)=zdmfup(jl,1) + end do + + do jk=2,klev + do jl=1,klon + zrfl(jl)=zrfl(jl)+zdmfup(jl,jk) + end do + end do + + do jk = 1,klev + do jl = 1,klon + pmfd(jl,jk) = 0. + zmfds(jl,jk) = 0. + zmfdq(jl,jk) = 0. + zdmfdp(jl,jk) = 0. + zdpmel(jl,jk) = 0. + end do + end do + +!----------------------------------------- +!* 6.0 cumulus downdraft calculations +!----------------------------------------- + if(lmfdd) then +!* (a) determine lfs in 'cudlfsn' +!-------------------------------------- + call cudlfsn & + & (klon, klev,& + & kcbot, kctop, lndj, ldcum, & + & ztenh, zqenh, puen, pven, & + & pten, pqsen, pgeo, & + & zgeoh, paph, ptu, pqu, plu, & + & zuu, zvu, zmfub, zrfl, & + & ztd, zqd, zud, zvd, & + & pmfd, zmfds, zmfdq, zdmfdp, & + & idtop, loddraf) +!* (b) determine downdraft t,q and fluxes in 'cuddrafn' +!------------------------------------------------------------ + call cuddrafn & + & ( klon, klev, loddraf, & + & ztenh, zqenh, puen, pven, & + & pgeo, zgeoh, paph, zrfl, & + & ztd, zqd, zud, zvd, pmfu, & + & pmfd, zmfds, zmfdq, zdmfdp, pmfdde_rate ) +!----------------------------------------------------------- + end if +! +!----------------------------------------------------------------------- +!* 6.0 closure and clean work +! ------ +!-- 6.1 recalculate cloud base massflux from a cape closure +! for deep convection (ktype=1) +! + do jl=1,klon + if(ldcum(jl) .and. ktype(jl) .eq. 1) then + ikb = kcbot(jl) + ikt = kctop(jl) + zheat(jl)=0.0 + zcape(jl)=0.0 + zcape1(jl)=0.0 + zcape2(jl)=0.0 + zmfub1(jl)=zmfub(jl) + + ztauc(jl) = (zgeoh(jl,ikt)-zgeoh(jl,ikb)) / & + ((2.+ min(15.0,wup(jl)))*g) + if(lndj(jl) .eq. 0) then + upbl(jl) = 2.+ upbl(jl)/(paph(jl,klev+1)-paph(jl,ikb)) + ztaubl(jl) = (zgeoh(jl,ikb)-zgeoh(jl,klev+1))/(g*upbl(jl)) + ztaubl(jl) = min(300., ztaubl(jl)) + else + ztaubl(jl) = ztauc(jl) + end if + end if + end do +! + do jk = 1 , klev + do jl = 1 , klon + llo1 = ldcum(jl) .and. ktype(jl) .eq. 1 + if ( llo1 .and. jk <= kcbot(jl) .and. jk > kctop(jl) ) then + ikb = kcbot(jl) + zdz = pgeo(jl,jk-1)-pgeo(jl,jk) + zdp = pap(jl,jk)-pap(jl,jk-1) + zheat(jl) = zheat(jl) + ((pten(jl,jk-1)-pten(jl,jk)+zdz*rcpd) / & + ztenh(jl,jk)+vtmpc1*(pqen(jl,jk-1)-pqen(jl,jk))) * & + (g*(pmfu(jl,jk)+pmfd(jl,jk))) + zcape1(jl) = zcape1(jl) + ((ptu(jl,jk)-ztenh(jl,jk))/ztenh(jl,jk) + & + vtmpc1*(pqu(jl,jk)-zqenh(jl,jk))-plu(jl,jk))*zdp + end if + + if ( llo1 .and. jk >= kcbot(jl) ) then + if((paph(jl,klev+1)-paph(jl,kdpl(jl)))<50.e2) then + zdp = paph(jl,jk+1)-paph(jl,jk) + zcape2(jl) = zcape2(jl) + ztaubl(jl)* & + ((1.+vtmpc1*pqen(jl,jk))*ptte(jl,jk)+vtmpc1*pten(jl,jk)*pqte(jl,jk))*zdp + end if + end if + end do + end do + + do jl=1,klon + if(ldcum(jl).and.ktype(jl).eq.1) then + ikb = kcbot(jl) + ikt = kctop(jl) + ztau = ztauc(jl) * (1.+1.33e-5*dx(jl)) + ztau = max(ztmst,ztau) + ztau = max(360.,ztau) + ztau = min(10800.,ztau) + if(isequil) then + zcape2(jl)= max(0.,zcape2(jl)) + zcape(jl) = max(0.,min(zcape1(jl)-zcape2(jl),5000.)) + else + zcape(jl) = max(0.,min(zcape1(jl),5000.)) + end if + zheat(jl) = max(1.e-4,zheat(jl)) + zmfub1(jl) = (zcape(jl)*zmfub(jl))/(zheat(jl)*ztau) + zmfub1(jl) = max(zmfub1(jl),0.001) + zmfmax=(paph(jl,ikb)-paph(jl,ikb-1))*zcons2 + zmfub1(jl)=min(zmfub1(jl),zmfmax) + end if + end do +! +!* 6.2 recalculate convective fluxes due to effect of +! downdrafts on boundary layer moist static energy budget (ktype=2) +!-------------------------------------------------------- + do jl=1,klon + if(ldcum(jl) .and. ktype(jl) .eq. 2) then + ikb=kcbot(jl) + if(pmfd(jl,ikb).lt.0.0 .and. loddraf(jl)) then + zeps=-pmfd(jl,ikb)/max(zmfub(jl),cmfcmin) + else + zeps=0. + endif + zqumqe=pqu(jl,ikb)+plu(jl,ikb)- & + & zeps*zqd(jl,ikb)-(1.-zeps)*zqenh(jl,ikb) + zdqmin=max(0.01*zqenh(jl,ikb),cmfcmin) + zmfmax=(paph(jl,ikb)-paph(jl,ikb-1))*zcons2 +! using moist static engergy closure instead of moisture closure + zdh=cpd*(ptu(jl,ikb)-zeps*ztd(jl,ikb)- & + & (1.-zeps)*ztenh(jl,ikb))+alv*zqumqe + zdh=g*max(zdh,1.e5*zdqmin) + if(zdhpbl(jl).gt.0.)then + zmfub1(jl)=zdhpbl(jl)/zdh + else + zmfub1(jl) = zmfub(jl) + end if + zmfub1(jl) = min(zmfub1(jl),zmfmax) + end if + +!* 6.3 mid-level convection - nothing special +!--------------------------------------------------------- + if(ldcum(jl) .and. ktype(jl) .eq. 3 ) then + zmfub1(jl) = zmfub(jl) + end if + + end do + +!* 6.4 scaling the downdraft mass flux +!--------------------------------------------------------- + do jk=1,klev + do jl=1,klon + if( ldcum(jl) ) then + zfac=zmfub1(jl)/max(zmfub(jl),cmfcmin) + pmfd(jl,jk)=pmfd(jl,jk)*zfac + zmfds(jl,jk)=zmfds(jl,jk)*zfac + zmfdq(jl,jk)=zmfdq(jl,jk)*zfac + zdmfdp(jl,jk)=zdmfdp(jl,jk)*zfac + pmfdde_rate(jl,jk) = pmfdde_rate(jl,jk)*zfac + end if + end do + end do + +!* 6.5 scaling the updraft mass flux +! -------------------------------------------------------- + do jl = 1,klon + if ( ldcum(jl) ) zmfs(jl) = zmfub1(jl)/max(cmfcmin,zmfub(jl)) + end do + do jk = 2 , klev + do jl = 1,klon + if ( ldcum(jl) .and. jk >= kctop(jl)-1 ) then + ikb = kcbot(jl) + if ( jk>ikb ) then + zdz = ((paph(jl,klev+1)-paph(jl,jk))/(paph(jl,klev+1)-paph(jl,ikb))) + pmfu(jl,jk) = pmfu(jl,ikb)*zdz + end if + zmfmax = (paph(jl,jk)-paph(jl,jk-1))*zcons2 + if ( pmfu(jl,jk)*zmfs(jl) > zmfmax ) then + zmfs(jl) = min(zmfs(jl),zmfmax/pmfu(jl,jk)) + end if + end if + end do + end do + do jk = 2 , klev + do jl = 1,klon + if ( ldcum(jl) .and. jk <= kcbot(jl) .and. jk >= kctop(jl)-1 ) then + pmfu(jl,jk) = pmfu(jl,jk)*zmfs(jl) + zmfus(jl,jk) = zmfus(jl,jk)*zmfs(jl) + zmfuq(jl,jk) = zmfuq(jl,jk)*zmfs(jl) + zmful(jl,jk) = zmful(jl,jk)*zmfs(jl) + zdmfup(jl,jk) = zdmfup(jl,jk)*zmfs(jl) + plude(jl,jk) = plude(jl,jk)*zmfs(jl) + pmfude_rate(jl,jk) = pmfude_rate(jl,jk)*zmfs(jl) + end if + end do + end do + +!* 6.6 if ktype = 2, kcbot=kctop is not allowed +! --------------------------------------------------- + do jl = 1,klon + if ( ktype(jl) == 2 .and. & + kcbot(jl) == kctop(jl) .and. kcbot(jl) >= klev-1 ) then + ldcum(jl) = .false. + ktype(jl) = 0 + end if + end do + + if ( .not. lmfscv .or. .not. lmfpen ) then + do jl = 1,klon + llo2(jl) = .false. + if ( (.not. lmfscv .and. ktype(jl) == 2) .or. & + (.not. lmfpen .and. ktype(jl) == 1) ) then + llo2(jl) = .true. + ldcum(jl) = .false. + end if + end do + end if + +!* 6.7 set downdraft mass fluxes to zero above cloud top +!---------------------------------------------------- + do jl = 1,klon + if ( loddraf(jl) .and. idtop(jl) <= kctop(jl) ) then + idtop(jl) = kctop(jl) + 1 + end if + end do + do jk = 2 , klev + do jl = 1,klon + if ( loddraf(jl) ) then + if ( jk < idtop(jl) ) then + pmfd(jl,jk) = 0. + zmfds(jl,jk) = 0. + zmfdq(jl,jk) = 0. + pmfdde_rate(jl,jk) = 0. + zdmfdp(jl,jk) = 0. + else if ( jk == idtop(jl) ) then + pmfdde_rate(jl,jk) = 0. + end if + end if + end do + end do +!---------------------------------------------------------- +!* 7.0 determine final convective fluxes in 'cuflx' +!---------------------------------------------------------- + call cuflxn & + & ( klon, klev, ztmst & + & , pten, pqen, pqsen, ztenh, zqenh & + & , paph, pap, zgeoh, lndj, ldcum & + & , kcbot, kctop, idtop, itopm2 & + & , ktype, loddraf & + & , pmfu, pmfd, zmfus, zmfds & + & , zmfuq, zmfdq, zmful, plude & + & , zdmfup, zdmfdp, zdpmel, zlglac & + & , prain, pmfdde_rate, pmflxr, pmflxs ) + +! some adjustments needed + do jl=1,klon + zmfs(jl) = 1. + zmfuub(jl)=0. + end do + do jk = 2 , klev + do jl = 1,klon + if ( loddraf(jl) .and. jk >= idtop(jl)-1 ) then + zmfmax = pmfu(jl,jk)*0.98 + if ( pmfd(jl,jk)+zmfmax+1.e-15 < 0. ) then + zmfs(jl) = min(zmfs(jl),-zmfmax/pmfd(jl,jk)) + end if + end if + end do + end do + + do jk = 2 , klev + do jl = 1 , klon + if ( zmfs(jl) < 1. .and. jk >= idtop(jl)-1 ) then + pmfd(jl,jk) = pmfd(jl,jk)*zmfs(jl) + zmfds(jl,jk) = zmfds(jl,jk)*zmfs(jl) + zmfdq(jl,jk) = zmfdq(jl,jk)*zmfs(jl) + pmfdde_rate(jl,jk) = pmfdde_rate(jl,jk)*zmfs(jl) + zmfuub(jl) = zmfuub(jl) - (1.-zmfs(jl))*zdmfdp(jl,jk) + pmflxr(jl,jk+1) = pmflxr(jl,jk+1) + zmfuub(jl) + zdmfdp(jl,jk) = zdmfdp(jl,jk)*zmfs(jl) + end if + end do + end do + + do jk = 2 , klev - 1 + do jl = 1, klon + if ( loddraf(jl) .and. jk >= idtop(jl)-1 ) then + zerate = -pmfd(jl,jk) + pmfd(jl,jk-1) + pmfdde_rate(jl,jk) + if ( zerate < 0. ) then + pmfdde_rate(jl,jk) = pmfdde_rate(jl,jk) - zerate + end if + end if + if ( ldcum(jl) .and. jk >= kctop(jl)-1 ) then + zerate = pmfu(jl,jk) - pmfu(jl,jk+1) + pmfude_rate(jl,jk) + if ( zerate < 0. ) then + pmfude_rate(jl,jk) = pmfude_rate(jl,jk) - zerate + end if + zdmfup(jl,jk) = pmflxr(jl,jk+1) + pmflxs(jl,jk+1) - & + pmflxr(jl,jk) - pmflxs(jl,jk) + zdmfdp(jl,jk) = 0. + end if + end do + end do + +! avoid negative humidities at ddraught top + do jl = 1,klon + if ( loddraf(jl) ) then + jk = idtop(jl) + ik = min(jk+1,klev) + if ( zmfdq(jl,jk) < 0.3*zmfdq(jl,ik) ) then + zmfdq(jl,jk) = 0.3*zmfdq(jl,ik) + end if + end if + end do + +! avoid negative humidities near cloud top because gradient of precip flux +! and detrainment / liquid water flux are too large + do jk = 2 , klev + do jl = 1, klon + if ( ldcum(jl) .and. jk >= kctop(jl)-1 .and. jk < kcbot(jl) ) then + zdz = ztmst*g/(paph(jl,jk+1)-paph(jl,jk)) + zmfa = zmfuq(jl,jk+1) + zmfdq(jl,jk+1) - & + zmfuq(jl,jk) - zmfdq(jl,jk) + & + zmful(jl,jk+1) - zmful(jl,jk) + zdmfup(jl,jk) + zmfa = (zmfa-plude(jl,jk))*zdz + if ( pqen(jl,jk)+zmfa < 0. ) then + plude(jl,jk) = plude(jl,jk) + 2.*(pqen(jl,jk)+zmfa)/zdz + end if + if ( plude(jl,jk) < 0. ) plude(jl,jk) = 0. + end if + if ( .not. ldcum(jl) ) pmfude_rate(jl,jk) = 0. + if ( abs(pmfd(jl,jk-1)) < 1.0e-20 ) pmfdde_rate(jl,jk) = 0. + end do + end do + + do jl=1,klon + prsfc(jl) = pmflxr(jl,klev+1) + pssfc(jl) = pmflxs(jl,klev+1) + end do + +!---------------------------------------------------------------- +!* 8.0 update tendencies for t and q in subroutine cudtdq +!---------------------------------------------------------------- + call cudtdqn(klon,klev,itopm2,kctop,idtop,ldcum,loddraf, & + ztmst,paph,zgeoh,pgeo,pten,ztenh,pqen,zqenh,pqsen, & + zlglac,plude,pmfu,pmfd,zmfus,zmfds,zmfuq,zmfdq,zmful, & + zdmfup,zdmfdp,zdpmel,ptte,pqte,pcte) +!---------------------------------------------------------------- +!* 9.0 update tendencies for u and u in subroutine cududv +!---------------------------------------------------------------- + if(lmfdudv) then + do jk = klev-1 , 2 , -1 + ik = jk + 1 + do jl = 1,klon + if ( ldcum(jl) ) then + if ( jk == kcbot(jl) .and. ktype(jl) < 3 ) then + ikb = kdpl(jl) + zuu(jl,jk) = puen(jl,ikb-1) + zvu(jl,jk) = pven(jl,ikb-1) + else if ( jk == kcbot(jl) .and. ktype(jl) == 3 ) then + zuu(jl,jk) = puen(jl,jk-1) + zvu(jl,jk) = pven(jl,jk-1) + end if + if ( jk < kcbot(jl) .and. jk >= kctop(jl) ) then + if(momtrans .eq. 1)then + zfac = 0. + if ( ktype(jl) == 1 .or. ktype(jl) == 3 ) zfac = 2. + if ( ktype(jl) == 1 .and. jk <= kctop(jl)+2 ) zfac = 3. + zerate = pmfu(jl,jk) - pmfu(jl,ik) + & + (1.+zfac)*pmfude_rate(jl,jk) + zderate = (1.+zfac)*pmfude_rate(jl,jk) + zmfa = 1./max(cmfcmin,pmfu(jl,jk)) + zuu(jl,jk) = (zuu(jl,ik)*pmfu(jl,ik) + & + zerate*puen(jl,jk)-zderate*zuu(jl,ik))*zmfa + zvu(jl,jk) = (zvu(jl,ik)*pmfu(jl,ik) + & + zerate*pven(jl,jk)-zderate*zvu(jl,ik))*zmfa + else + if(ktype(jl) == 1 .or. ktype(jl) == 3) then + pgf_u = -0.7*0.5*(pmfu(jl,ik)*(puen(jl,ik)-puen(jl,jk))+& + pmfu(jl,jk)*(puen(jl,jk)-puen(jl,jk-1))) + pgf_v = -0.7*0.5*(pmfu(jl,ik)*(pven(jl,ik)-pven(jl,jk))+& + pmfu(jl,jk)*(pven(jl,jk)-pven(jl,jk-1))) + else + pgf_u = 0. + pgf_v = 0. + end if + zerate = pmfu(jl,jk) - pmfu(jl,ik) + pmfude_rate(jl,jk) + zderate = pmfude_rate(jl,jk) + zmfa = 1./max(cmfcmin,pmfu(jl,jk)) + zuu(jl,jk) = (zuu(jl,ik)*pmfu(jl,ik) + & + zerate*puen(jl,jk)-zderate*zuu(jl,ik)+pgf_u)*zmfa + zvu(jl,jk) = (zvu(jl,ik)*pmfu(jl,ik) + & + zerate*pven(jl,jk)-zderate*zvu(jl,ik)+pgf_v)*zmfa + end if + end if + end if + end do + end do + + if(lmfdd) then + do jk = 3 , klev + ik = jk - 1 + do jl = 1,klon + if ( ldcum(jl) ) then + if ( jk == idtop(jl) ) then + zud(jl,jk) = 0.5*(zuu(jl,jk)+puen(jl,ik)) + zvd(jl,jk) = 0.5*(zvu(jl,jk)+pven(jl,ik)) + else if ( jk > idtop(jl) ) then + zerate = -pmfd(jl,jk) + pmfd(jl,ik) + pmfdde_rate(jl,jk) + zmfa = 1./min(-cmfcmin,pmfd(jl,jk)) + zud(jl,jk) = (zud(jl,ik)*pmfd(jl,ik) - & + zerate*puen(jl,ik)+pmfdde_rate(jl,jk)*zud(jl,ik))*zmfa + zvd(jl,jk) = (zvd(jl,ik)*pmfd(jl,ik) - & + zerate*pven(jl,ik)+pmfdde_rate(jl,jk)*zvd(jl,ik))*zmfa + end if + end if + end do + end do + end if +! -------------------------------------------------- +! rescale massfluxes for stability in Momentum +!------------------------------------------------------------------------ + zmfs(:) = 1. + do jk = 2 , klev + do jl = 1, klon + if ( ldcum(jl) .and. jk >= kctop(jl)-1 ) then + zmfmax = (paph(jl,jk)-paph(jl,jk-1))*zcons + if ( pmfu(jl,jk) > zmfmax .and. jk >= kctop(jl) ) then + zmfs(jl) = min(zmfs(jl),zmfmax/pmfu(jl,jk)) + end if + end if + end do + end do + do jk = 1 , klev + do jl = 1, klon + zmfuus(jl,jk) = pmfu(jl,jk) + zmfdus(jl,jk) = pmfd(jl,jk) + if ( ldcum(jl) .and. jk >= kctop(jl)-1 ) then + zmfuus(jl,jk) = pmfu(jl,jk)*zmfs(jl) + zmfdus(jl,jk) = pmfd(jl,jk)*zmfs(jl) + end if + end do + end do +!* 9.1 update u and v in subroutine cududvn +!------------------------------------------------------------------- + do jk = 1 , klev + do jl = 1, klon + ztenu(jl,jk) = pvom(jl,jk) + ztenv(jl,jk) = pvol(jl,jk) + end do + end do + + call cududvn(klon,klev,itopm2,ktype,kcbot,kctop, & + ldcum,ztmst,paph,puen,pven,zmfuus,zmfdus,zuu, & + zud,zvu,zvd,pvom,pvol) + +! calculate KE dissipation + do jl = 1, klon + zsum12(jl) = 0. + zsum22(jl) = 0. + end do + do jk = 1 , klev + do jl = 1, klon + zuv2(jl,jk) = 0. + if ( ldcum(jl) .and. jk >= kctop(jl)-1 ) then + zdz = (paph(jl,jk+1)-paph(jl,jk)) + zduten = pvom(jl,jk) - ztenu(jl,jk) + zdvten = pvol(jl,jk) - ztenv(jl,jk) + zuv2(jl,jk) = sqrt(zduten**2+zdvten**2) + zsum22(jl) = zsum22(jl) + zuv2(jl,jk)*zdz + zsum12(jl) = zsum12(jl) - & + (puen(jl,jk)*zduten+pven(jl,jk)*zdvten)*zdz + end if + end do + end do + do jk = 1 , klev + do jl = 1, klon + if ( ldcum(jl) .and. jk>=kctop(jl)-1 ) then + ztdis = rcpd*zsum12(jl)*zuv2(jl,jk)/max(1.e-15,zsum22(jl)) + ptte(jl,jk) = ptte(jl,jk) + ztdis + end if + end do + end do + + end if + +!---------------------------------------------------------------------- +!* 10. IN CASE THAT EITHER DEEP OR SHALLOW IS SWITCHED OFF +! NEED TO SET SOME VARIABLES A POSTERIORI TO ZERO +! --------------------------------------------------- + if ( .not. lmfscv .or. .not. lmfpen ) then + do jk = 2 , klev + do jl = 1, klon + if ( llo2(jl) .and. jk >= kctop(jl)-1 ) then + ptu(jl,jk) = pten(jl,jk) + pqu(jl,jk) = pqen(jl,jk) + plu(jl,jk) = 0. + pmfude_rate(jl,jk) = 0. + pmfdde_rate(jl,jk) = 0. + end if + end do + end do + do jl = 1, klon + if ( llo2(jl) ) then + kctop(jl) = klev - 1 + kcbot(jl) = klev - 1 + end if + end do + end if + + return + end subroutine cumastrn + +!********************************************** +! level 3 subroutine cuinin +!********************************************** +! + subroutine cuinin & + & (klon, klev, klevp1, klevm1, pten,& + & pqen, pqsen, puen, pven, pverv,& + & pgeo, paph, pgeoh, ptenh, pqenh,& + & pqsenh, klwmin, ptu, pqu, ptd,& + & pqd, puu, pvu, pud, pvd,& + & pmfu, pmfd, pmfus, pmfds, pmfuq,& + & pmfdq, pdmfup, pdmfdp, pdpmel, plu,& + & plude, klab) + implicit none +! m.tiedtke e.c.m.w.f. 12/89 +!***purpose +! ------- +! this routine interpolates large-scale fields of t,q etc. +! to half levels (i.e. grid for massflux scheme), +! and initializes values for updrafts and downdrafts +!***interface +! --------- +! this routine is called from *cumastr*. +!***method. +! -------- +! for extrapolation to half levels see tiedtke(1989) +!***externals +! --------- +! *cuadjtq* to specify qs at half levels +! ---------------------------------------------------------------- + integer klon,klev,klevp1,klevm1 + real pten(klon,klev), pqen(klon,klev),& + & puen(klon,klev), pven(klon,klev),& + & pqsen(klon,klev), pverv(klon,klev),& + & pgeo(klon,klev), pgeoh(klon,klevp1),& + & paph(klon,klevp1), ptenh(klon,klev),& + & pqenh(klon,klev), pqsenh(klon,klev) + real ptu(klon,klev), pqu(klon,klev),& + & ptd(klon,klev), pqd(klon,klev),& + & puu(klon,klev), pud(klon,klev),& + & pvu(klon,klev), pvd(klon,klev),& + & pmfu(klon,klev), pmfd(klon,klev),& + & pmfus(klon,klev), pmfds(klon,klev),& + & pmfuq(klon,klev), pmfdq(klon,klev),& + & pdmfup(klon,klev), pdmfdp(klon,klev),& + & plu(klon,klev), plude(klon,klev) + real zwmax(klon), zph(klon), & + & pdpmel(klon,klev) + integer klab(klon,klev), klwmin(klon) + logical loflag(klon) +! local variables + integer jl,jk + integer icall,ik + real zzs +!------------------------------------------------------------ +!* 1. specify large scale parameters at half levels +!* adjust temperature fields if staticly unstable +!* find level of maximum vertical velocity +! ----------------------------------------------------------- + do jk=2,klev + do jl=1,klon + ptenh(jl,jk)=(max(cpd*pten(jl,jk-1)+pgeo(jl,jk-1), & + & cpd*pten(jl,jk)+pgeo(jl,jk))-pgeoh(jl,jk))*rcpd + pqenh(jl,jk) = pqen(jl,jk-1) + pqsenh(jl,jk)= pqsen(jl,jk-1) + zph(jl)=paph(jl,jk) + loflag(jl)=.true. + end do + + if ( jk >= klev-1 .or. jk < 2 ) cycle + ik=jk + icall=0 + call cuadjtqn(klon,klev,ik,zph,ptenh,pqsenh,loflag,icall) + do jl=1,klon + pqenh(jl,jk)=min(pqen(jl,jk-1),pqsen(jl,jk-1)) & + & +(pqsenh(jl,jk)-pqsen(jl,jk-1)) + pqenh(jl,jk)=max(pqenh(jl,jk),0.) + end do + end do + + do jl=1,klon + ptenh(jl,klev)=(cpd*pten(jl,klev)+pgeo(jl,klev)- & + & pgeoh(jl,klev))*rcpd + pqenh(jl,klev)=pqen(jl,klev) + ptenh(jl,1)=pten(jl,1) + pqenh(jl,1)=pqen(jl,1) + klwmin(jl)=klev + zwmax(jl)=0. + end do + + do jk=klevm1,2,-1 + do jl=1,klon + zzs=max(cpd*ptenh(jl,jk)+pgeoh(jl,jk), & + & cpd*ptenh(jl,jk+1)+pgeoh(jl,jk+1)) + ptenh(jl,jk)=(zzs-pgeoh(jl,jk))*rcpd + end do + end do + + do jk=klev,3,-1 + do jl=1,klon + if(pverv(jl,jk).lt.zwmax(jl)) then + zwmax(jl)=pverv(jl,jk) + klwmin(jl)=jk + end if + end do + end do +!----------------------------------------------------------- +!* 2.0 initialize values for updrafts and downdrafts +!----------------------------------------------------------- + do jk=1,klev + ik=jk-1 + if(jk.eq.1) ik=1 + do jl=1,klon + ptu(jl,jk)=ptenh(jl,jk) + ptd(jl,jk)=ptenh(jl,jk) + pqu(jl,jk)=pqenh(jl,jk) + pqd(jl,jk)=pqenh(jl,jk) + plu(jl,jk)=0. + puu(jl,jk)=puen(jl,ik) + pud(jl,jk)=puen(jl,ik) + pvu(jl,jk)=pven(jl,ik) + pvd(jl,jk)=pven(jl,ik) + klab(jl,jk)=0 + end do + end do + return + end subroutine cuinin + +!--------------------------------------------------------- +! level 3 souroutines +!-------------------------------------------------------- + subroutine cutypen & + & ( klon, klev, klevp1, klevm1, pqen,& + & ptenh, pqenh, pqsenh, pgeoh, paph,& + & hfx, qfx, pgeo, pqsen, pap,& + & pten, lndj, cutu, cuqu, culab,& + & ldcum, cubot, cutop, ktype, wbase, culu, kdpl ) +! zhang & wang iprc 2011-2013 +!***purpose. +! -------- +! to produce first guess updraught for cu-parameterizations +! calculates condensation level, and sets updraught base variables and +! first guess cloud type +!***interface +! --------- +! this routine is called from *cumastr*. +! input are environm. values of t,q,p,phi at half levels. +! it returns cloud types as follows; +! ktype=1 for deep cumulus +! ktype=2 for shallow cumulus +!***method. +! -------- +! based on a simplified updraught equation +! partial(hup)/partial(z)=eta(h - hup) +! eta is the entrainment rate for test parcel +! h stands for dry static energy or the total water specific humidity +! references: christian jakob, 2003: a new subcloud model for +! mass-flux convection schemes +! influence on triggering, updraft properties, and model +! climate, mon.wea.rev. +! 131, 2765-2778 +! and +! ifs documentation - cy36r1,cy38r1 +!***input variables: +! ptenh [ztenh] - environment temperature on half levels. (cuini) +! pqenh [zqenh] - env. specific humidity on half levels. (cuini) +! pgeoh [zgeoh] - geopotential on half levels, (mssflx) +! paph - pressure of half levels. (mssflx) +! rho - density of the lowest model level +! qfx - net upward moisture flux at the surface (kg/m^2/s) +! hfx - net upward heat flux at the surface (w/m^2) +!***variables output by cutype: +! ktype - convection type - 1: penetrative (cumastr) +! 2: stratocumulus (cumastr) +! 3: mid-level (cuasc) +! information for updraft parcel (ptu,pqu,plu,kcbot,klab,kdpl...) +! ---------------------------------------------------------------- +!------------------------------------------------------------------- + implicit none +!------------------------------------------------------------------- + integer klon, klev, klevp1, klevm1 + real ptenh(klon,klev), pqenh(klon,klev),& + & pqsen(klon,klev), pqsenh(klon,klev),& + & pgeoh(klon,klevp1), paph(klon,klevp1),& + & pap(klon,klev), pqen(klon,klev) + real pten(klon,klev) + real ptu(klon,klev),pqu(klon,klev),plu(klon,klev) + real pgeo(klon,klev) + integer klab(klon,klev) + integer kctop(klon),kcbot(klon) + + real qfx(klon),hfx(klon) + real zph(klon) + integer lndj(klon) + logical loflag(klon), deepflag(klon), resetflag(klon) + +! output variables + real cutu(klon,klev), cuqu(klon,klev), culu(klon,klev) + integer culab(klon,klev) + real wbase(klon) + integer ktype(klon),cubot(klon),cutop(klon),kdpl(klon) + logical ldcum(klon) + +! local variables + real zqold(klon) + real rho, part1, part2, root, conw, deltt, deltq + real eta(klon),dz(klon),coef(klon) + real dhen(klon,klev), dh(klon,klev) + real plude(klon,klev) + real kup(klon,klev) + real vptu(klon,klev),vten(klon,klev) + real zbuo(klon,klev),abuoy(klon,klev) + + real zz,zdken,zdq + real fscale,crirh1,pp + real atop1,atop2,abot + real tmix,zmix,qmix,pmix + real zlglac,dp,t13 + integer nk,is,ikb,ikt + + real zqsu,zcor,zdp,zesdp,zalfaw,zfacw,zfaci,zfac,zdsdp,zdqsdt,zdtdp + real zpdifftop, zpdiffbot + integer zcbase(klon), itoppacel(klon) + integer jl,jk,ik,icall,levels + logical needreset, lldcum(klon) +!-------------------------------------------------------------- + t13 = 1.0/3.0 +! + do jl=1,klon + kcbot(jl)=klev + kctop(jl)=klev + kdpl(jl) =klev + ktype(jl)=0 + wbase(jl)=0. + ldcum(jl)=.false. + end do + +!----------------------------------------------------------- +! let's do test,and check the shallow convection first +! the first level is klev +! define deltat and deltaq +!----------------------------------------------------------- + do jk=1,klev + do jl=1,klon + plu(jl,jk)=culu(jl,jk) ! parcel liquid water + ptu(jl,jk)=cutu(jl,jk) ! parcel temperature + pqu(jl,jk)=cuqu(jl,jk) ! parcel specific humidity + klab(jl,jk)=culab(jl,jk) + dh(jl,jk)=0.0 ! parcel dry static energy + dhen(jl,jk)=0.0 ! environment dry static energy + kup(jl,jk)=0.0 ! updraught kinetic energy for parcel + vptu(jl,jk)=0.0 ! parcel virtual temperature considering water-loading + vten(jl,jk)=0.0 ! environment virtual temperature + zbuo(jl,jk)=0.0 ! parcel buoyancy + abuoy(jl,jk)=0.0 + end do + end do + + do jl=1,klon + zqold(jl) = 0. + lldcum(jl) = .false. + loflag(jl) = .true. + end do + +! check the levels from lowest level to second top level + do jk=klevm1,2,-1 + +! define the variables at the first level + if(jk .eq. klevm1) then + do jl=1,klon + rho=pap(jl,klev)/ & + & (rd*(pten(jl,klev)*(1.+vtmpc1*pqen(jl,klev)))) + part1 = 1.5*0.4*pgeo(jl,klev)/ & + & (rho*pten(jl,klev)) + part2 = -hfx(jl)*rcpd-vtmpc1*pten(jl,klev)*qfx(jl) + root = 0.001-part1*part2 + if(part2 .lt. 0.) then + conw = 1.2*(root)**t13 + deltt = max(1.5*hfx(jl)/(rho*cpd*conw),0.) + deltq = max(1.5*qfx(jl)/(rho*conw),0.) + kup(jl,klev) = 0.5*(conw**2) + pqu(jl,klev)= pqenh(jl,klev) + deltq + dhen(jl,klev)= pgeoh(jl,klev) + ptenh(jl,klev)*cpd + dh(jl,klev) = dhen(jl,klev) + deltt*cpd + ptu(jl,klev) = (dh(jl,klev)-pgeoh(jl,klev))*rcpd + vptu(jl,klev)=ptu(jl,klev)*(1.+vtmpc1*pqu(jl,klev)) + vten(jl,klev)=ptenh(jl,klev)*(1.+vtmpc1*pqenh(jl,klev)) + zbuo(jl,klev)=(vptu(jl,klev)-vten(jl,klev))/vten(jl,klev) + klab(jl,klev) = 1 + else + loflag(jl) = .false. + end if + end do + end if + + is=0 + do jl=1,klon + if(loflag(jl))then + is=is+1 + endif + enddo + if(is.eq.0) exit + +! the next levels, we use the variables at the first level as initial values + do jl=1,klon + if(loflag(jl)) then + eta(jl) = 0.8/(pgeo(jl,jk)*zrg)+2.e-4 + dz(jl) = (pgeoh(jl,jk)-pgeoh(jl,jk+1))*zrg + coef(jl)= 0.5*eta(jl)*dz(jl) + dhen(jl,jk) = pgeoh(jl,jk) + cpd*ptenh(jl,jk) + dh(jl,jk) = (coef(jl)*(dhen(jl,jk+1)+dhen(jl,jk))& + & +(1.-coef(jl))*dh(jl,jk+1))/(1.+coef(jl)) + pqu(jl,jk) =(coef(jl)*(pqenh(jl,jk+1)+pqenh(jl,jk))& + & +(1.-coef(jl))*pqu(jl,jk+1))/(1.+coef(jl)) + ptu(jl,jk) = (dh(jl,jk)-pgeoh(jl,jk))*rcpd + zqold(jl) = pqu(jl,jk) + zph(jl)=paph(jl,jk) + end if + end do +! check if the parcel is saturated + ik=jk + icall=1 + call cuadjtqn(klon,klev,ik,zph,ptu,pqu,loflag,icall) + do jl=1,klon + if( loflag(jl) ) then + zdq = max((zqold(jl) - pqu(jl,jk)),0.) + plu(jl,jk) = plu(jl,jk+1) + zdq + zlglac=zdq*((1.-foealfa(ptu(jl,jk))) - & + (1.-foealfa(ptu(jl,jk+1)))) + plu(jl,jk) = min(plu(jl,jk),5.e-3) + dh(jl,jk) = pgeoh(jl,jk) + cpd*(ptu(jl,jk)+ralfdcp*zlglac) +! compute the updraft speed + vptu(jl,jk) = ptu(jl,jk)*(1.+vtmpc1*pqu(jl,jk)-plu(jl,jk))+& + ralfdcp*zlglac + vten(jl,jk) = ptenh(jl,jk)*(1.+vtmpc1*pqenh(jl,jk)) + zbuo(jl,jk) = (vptu(jl,jk) - vten(jl,jk))/vten(jl,jk) + abuoy(jl,jk)=(zbuo(jl,jk)+zbuo(jl,jk+1))*0.5*g + atop1 = 1.0 - 2.*coef(jl) + atop2 = 2.0*dz(jl)*abuoy(jl,jk) + abot = 1.0 + 2.*coef(jl) + kup(jl,jk) = (atop1*kup(jl,jk+1) + atop2) / abot + +! let's find the exact cloud base + if ( plu(jl,jk) > 0. .and. klab(jl,jk+1) == 1 ) then + ik = jk + 1 + zqsu = foeewm(ptu(jl,ik))/paph(jl,ik) + zqsu = min(0.5,zqsu) + zcor = 1./(1.-vtmpc1*zqsu) + zqsu = zqsu*zcor + zdq = min(0.,pqu(jl,ik)-zqsu) + zalfaw = foealfa(ptu(jl,ik)) + zfacw = c5les/((ptu(jl,ik)-c4les)**2) + zfaci = c5ies/((ptu(jl,ik)-c4ies)**2) + zfac = zalfaw*zfacw + (1.-zalfaw)*zfaci + zesdp = foeewm(ptu(jl,ik))/paph(jl,ik) + zcor = 1./(1.-vtmpc1*zesdp) + zdqsdt = zfac*zcor*zqsu + zdtdp = rd*ptu(jl,ik)/(cpd*paph(jl,ik)) + zdp = zdq/(zdqsdt*zdtdp) + zcbase(jl) = paph(jl,ik) + zdp +! chose nearest half level as cloud base (jk or jk+1) + zpdifftop = zcbase(jl) - paph(jl,jk) + zpdiffbot = paph(jl,jk+1) - zcbase(jl) + if ( zpdifftop > zpdiffbot .and. kup(jl,jk+1) > 0. ) then + ikb = min(klev-1,jk+1) + klab(jl,ikb) = 2 + klab(jl,jk) = 2 + kcbot(jl) = ikb + plu(jl,jk+1) = 1.0e-8 + else if ( zpdifftop <= zpdiffbot .and.kup(jl,jk) > 0. ) then + klab(jl,jk) = 2 + kcbot(jl) = jk + end if + end if + + if(kup(jl,jk) .lt. 0.)then + loflag(jl) = .false. + if(plu(jl,jk+1) .gt. 0.) then + kctop(jl) = jk + lldcum(jl) = .true. + else + lldcum(jl) = .false. + end if + else if(plu(jl,jk) .gt. 0.)then + klab(jl,jk)=2 + else + klab(jl,jk)=1 + end if + end if + end do + + end do ! end all the levels + + do jl=1,klon + ikb = kcbot(jl) + ikt = kctop(jl) + if(paph(jl,ikb) - paph(jl,ikt) > zdnoprc) lldcum(jl) = .false. + if(lldcum(jl)) then + ktype(jl) = 2 + ldcum(jl) = .true. + wbase(jl) = sqrt(max(2.*kup(jl,ikb),0.)) + cubot(jl) = ikb + cutop(jl) = ikt + kdpl(jl) = klev + else + cutop(jl) = -1 + cubot(jl) = -1 + kdpl(jl) = klev - 1 + ldcum(jl) = .false. + wbase(jl) = 0. + end if + end do + + do jk=klev,1,-1 + do jl=1,klon + ikt = kctop(jl) + if(jk .ge. ikt)then + culab(jl,jk) = klab(jl,jk) + cutu(jl,jk) = ptu(jl,jk) + cuqu(jl,jk) = pqu(jl,jk) + culu(jl,jk) = plu(jl,jk) + end if + end do + end do + +!----------------------------------------------------------- +! next, let's check the deep convection +! the first level is klevm1-1 +! define deltat and deltaq +!---------------------------------------------------------- +! we check the parcel starting level by level +! assume the mix-layer is 60hPa + deltt = 0.2 + deltq = 1.0e-4 + do jl=1,klon + deepflag(jl) = .false. + end do + + do jk=klev,1,-1 + do jl=1,klon + if((paph(jl,klev+1)-paph(jl,jk)) .lt. 350.e2) itoppacel(jl) = jk + end do + end do + + do levels=klevm1-1,klev/2,-1 ! loop starts + do jk=1,klev + do jl=1,klon + plu(jl,jk)=0.0 ! parcel liquid water + ptu(jl,jk)=0.0 ! parcel temperature + pqu(jl,jk)=0.0 ! parcel specific humidity + dh(jl,jk)=0.0 ! parcel dry static energy + dhen(jl,jk)=0.0 ! environment dry static energy + kup(jl,jk)=0.0 ! updraught kinetic energy for parcel + vptu(jl,jk)=0.0 ! parcel virtual temperature consideringwater-loading + vten(jl,jk)=0.0 ! environment virtual temperature + abuoy(jl,jk)=0.0 + zbuo(jl,jk)=0.0 + klab(jl,jk)=0 + end do + end do + + do jl=1,klon + kcbot(jl) = levels + kctop(jl) = levels + zqold(jl) = 0. + lldcum(jl) = .false. + resetflag(jl)= .false. + loflag(jl) = (.not. deepflag(jl)) .and. (levels.ge.itoppacel(jl)) + end do + +! start the inner loop to search the deep convection points + do jk=levels,2,-1 + is=0 + do jl=1,klon + if(loflag(jl))then + is=is+1 + endif + enddo + if(is.eq.0) exit + +! define the variables at the departure level + if(jk .eq. levels) then + do jl=1,klon + if(loflag(jl)) then + if((paph(jl,klev+1)-paph(jl,jk)) < 60.e2) then + tmix=0. + qmix=0. + zmix=0. + pmix=0. + do nk=jk+2,jk,-1 + if(pmix < 50.e2) then + dp = paph(jl,nk) - paph(jl,nk-1) + tmix=tmix+dp*ptenh(jl,nk) + qmix=qmix+dp*pqenh(jl,nk) + zmix=zmix+dp*pgeoh(jl,nk) + pmix=pmix+dp + end if + end do + tmix=tmix/pmix + qmix=qmix/pmix + zmix=zmix/pmix + else + tmix=ptenh(jl,jk+1) + qmix=pqenh(jl,jk+1) + zmix=pgeoh(jl,jk+1) + end if + + pqu(jl,jk+1) = qmix + deltq + dhen(jl,jk+1)= zmix + tmix*cpd + dh(jl,jk+1) = dhen(jl,jk+1) + deltt*cpd + ptu(jl,jk+1) = (dh(jl,jk+1)-pgeoh(jl,jk+1))*rcpd + kup(jl,jk+1) = 0.5 + klab(jl,jk+1)= 1 + vptu(jl,jk+1)=ptu(jl,jk+1)*(1.+vtmpc1*pqu(jl,jk+1)) + vten(jl,jk+1)=ptenh(jl,jk+1)*(1.+vtmpc1*pqenh(jl,jk+1)) + zbuo(jl,jk+1)=(vptu(jl,jk+1)-vten(jl,jk+1))/vten(jl,jk+1) + end if + end do + end if + +! the next levels, we use the variables at the first level as initial values + do jl=1,klon + if(loflag(jl)) then +! define the fscale + fscale = min(1.,(pqsen(jl,jk)/pqsen(jl,levels))**3) + eta(jl) = 1.75e-3*fscale + dz(jl) = (pgeoh(jl,jk)-pgeoh(jl,jk+1))*zrg + coef(jl)= 0.5*eta(jl)*dz(jl) + dhen(jl,jk) = pgeoh(jl,jk) + cpd*ptenh(jl,jk) + dh(jl,jk) = (coef(jl)*(dhen(jl,jk+1)+dhen(jl,jk))& + & +(1.-coef(jl))*dh(jl,jk+1))/(1.+coef(jl)) + pqu(jl,jk) =(coef(jl)*(pqenh(jl,jk+1)+pqenh(jl,jk))& + & +(1.-coef(jl))*pqu(jl,jk+1))/(1.+coef(jl)) + ptu(jl,jk) = (dh(jl,jk)-pgeoh(jl,jk))*rcpd + zqold(jl) = pqu(jl,jk) + zph(jl)=paph(jl,jk) + end if + end do +! check if the parcel is saturated + ik=jk + icall=1 + call cuadjtqn(klon,klev,ik,zph,ptu,pqu,loflag,icall) + + do jl=1,klon + if( loflag(jl) ) then + zdq = max((zqold(jl) - pqu(jl,jk)),0.) + plu(jl,jk) = plu(jl,jk+1) + zdq + zlglac=zdq*((1.-foealfa(ptu(jl,jk))) - & + (1.-foealfa(ptu(jl,jk+1)))) + plu(jl,jk) = 0.5*plu(jl,jk) + dh(jl,jk) = pgeoh(jl,jk) + cpd*(ptu(jl,jk)+ralfdcp*zlglac) +! compute the updraft speed + vptu(jl,jk) = ptu(jl,jk)*(1.+vtmpc1*pqu(jl,jk)-plu(jl,jk))+& + ralfdcp*zlglac + vten(jl,jk) = ptenh(jl,jk)*(1.+vtmpc1*pqenh(jl,jk)) + zbuo(jl,jk) = (vptu(jl,jk) - vten(jl,jk))/vten(jl,jk) + abuoy(jl,jk)=(zbuo(jl,jk)+zbuo(jl,jk+1))*0.5*g + atop1 = 1.0 - 2.*coef(jl) + atop2 = 2.0*dz(jl)*abuoy(jl,jk) + abot = 1.0 + 2.*coef(jl) + kup(jl,jk) = (atop1*kup(jl,jk+1) + atop2) / abot +! let's find the exact cloud base + if ( plu(jl,jk) > 0. .and. klab(jl,jk+1) == 1 ) then + ik = jk + 1 + zqsu = foeewm(ptu(jl,ik))/paph(jl,ik) + zqsu = min(0.5,zqsu) + zcor = 1./(1.-vtmpc1*zqsu) + zqsu = zqsu*zcor + zdq = min(0.,pqu(jl,ik)-zqsu) + zalfaw = foealfa(ptu(jl,ik)) + zfacw = c5les/((ptu(jl,ik)-c4les)**2) + zfaci = c5ies/((ptu(jl,ik)-c4ies)**2) + zfac = zalfaw*zfacw + (1.-zalfaw)*zfaci + zesdp = foeewm(ptu(jl,ik))/paph(jl,ik) + zcor = 1./(1.-vtmpc1*zesdp) + zdqsdt = zfac*zcor*zqsu + zdtdp = rd*ptu(jl,ik)/(cpd*paph(jl,ik)) + zdp = zdq/(zdqsdt*zdtdp) + zcbase(jl) = paph(jl,ik) + zdp +! chose nearest half level as cloud base (jk or jk+1) + zpdifftop = zcbase(jl) - paph(jl,jk) + zpdiffbot = paph(jl,jk+1) - zcbase(jl) + if ( zpdifftop > zpdiffbot .and. kup(jl,jk+1) > 0. ) then + ikb = min(klev-1,jk+1) + klab(jl,ikb) = 2 + klab(jl,jk) = 2 + kcbot(jl) = ikb + plu(jl,jk+1) = 1.0e-8 + else if ( zpdifftop <= zpdiffbot .and.kup(jl,jk) > 0. ) then + klab(jl,jk) = 2 + kcbot(jl) = jk + end if + end if + + if(kup(jl,jk) .lt. 0.)then + loflag(jl) = .false. + if(plu(jl,jk+1) .gt. 0.) then + kctop(jl) = jk + lldcum(jl) = .true. + else + lldcum(jl) = .false. + end if + else if(plu(jl,jk) .gt. 0.)then + klab(jl,jk)=2 + else + klab(jl,jk)=1 + end if + end if + end do + + end do ! end all the levels + + needreset = .false. + do jl=1,klon + ikb = kcbot(jl) + ikt = kctop(jl) + if(paph(jl,ikb) - paph(jl,ikt) < zdnoprc) lldcum(jl) = .false. + if(lldcum(jl)) then + ktype(jl) = 1 + ldcum(jl) = .true. + deepflag(jl) = .true. + wbase(jl) = sqrt(max(2.*kup(jl,ikb),0.)) + cubot(jl) = ikb + cutop(jl) = ikt + kdpl(jl) = levels+1 + needreset = .true. + resetflag(jl)= .true. + end if + end do + + if(needreset) then + do jk=klev,1,-1 + do jl=1,klon + if(resetflag(jl)) then + ikt = kctop(jl) + ikb = kdpl(jl) + if(jk .le. ikb .and. jk .ge. ikt )then + culab(jl,jk) = klab(jl,jk) + cutu(jl,jk) = ptu(jl,jk) + cuqu(jl,jk) = pqu(jl,jk) + culu(jl,jk) = plu(jl,jk) + else + culab(jl,jk) = 1 + cutu(jl,jk) = ptenh(jl,jk) + cuqu(jl,jk) = pqenh(jl,jk) + culu(jl,jk) = 0. + end if + if ( jk .lt. ikt ) culab(jl,jk) = 0 + end if + end do + end do + end if + + end do ! end all cycles + + return + end subroutine cutypen + +!----------------------------------------------------------------- +! level 3 subroutines 'cuascn' +!----------------------------------------------------------------- + subroutine cuascn & + & (klon, klev, klevp1, klevm1, ptenh,& + & pqenh, puen, pven, pten, pqen,& + & pqsen, pgeo, pgeoh, pap, paph,& + & pqte, pverv, klwmin, ldcum, phcbase,& + & ktype, klab, ptu, pqu, plu,& + & puu, pvu, pmfu, pmfub, & + & pmfus, pmfuq, pmful, plude, pdmfup,& + & kcbot, kctop, kctop0, kcum, ztmst,& + & pqsenh, plglac, lndj, wup, wbase, kdpl, pmfude_rate) + implicit none +! this routine does the calculations for cloud ascents +! for cumulus parameterization +! m.tiedtke e.c.m.w.f. 7/86 modif. 12/89 +! y.wang iprc 11/01 modif. +! c.zhang iprc 05/12 modif. +!***purpose. +! -------- +! to produce cloud ascents for cu-parametrization +! (vertical profiles of t,q,l,u and v and corresponding +! fluxes as well as precipitation rates) +!***interface +! --------- +! this routine is called from *cumastr*. +!***method. +! -------- +! lift surface air dry-adiabatically to cloud base +! and then calculate moist ascent for +! entraining/detraining plume. +! entrainment and detrainment rates differ for +! shallow and deep cumulus convection. +! in case there is no penetrative or shallow convection +! check for possibility of mid level convection +! (cloud base values calculated in *cubasmc*) +!***externals +! --------- +! *cuadjtqn* adjust t and q due to condensation in ascent +! *cuentrn* calculate entrainment/detrainment rates +! *cubasmcn* calculate cloud base values for midlevel convection +!***reference +! --------- +! (tiedtke,1989) +!***input variables: +! ptenh [ztenh] - environ temperature on half levels. (cuini) +! pqenh [zqenh] - env. specific humidity on half levels. (cuini) +! puen - environment wind u-component. (mssflx) +! pven - environment wind v-component. (mssflx) +! pten - environment temperature. (mssflx) +! pqen - environment specific humidity. (mssflx) +! pqsen - environment saturation specific humidity. (mssflx) +! pgeo - geopotential. (mssflx) +! pgeoh [zgeoh] - geopotential on half levels, (mssflx) +! pap - pressure in pa. (mssflx) +! paph - pressure of half levels. (mssflx) +! pqte - moisture convergence (delta q/delta t). (mssflx) +! pverv - large scale vertical velocity (omega). (mssflx) +! klwmin [ilwmin] - level of minimum omega. (cuini) +! klab [ilab] - level label - 1: sub-cloud layer. +! 2: condensation level (cloud base) +! pmfub [zmfub] - updraft mass flux at cloud base. (cumastr) +!***variables modified by cuasc: +! ldcum - logical denoting profiles. (cubase) +! ktype - convection type - 1: penetrative (cumastr) +! 2: stratocumulus (cumastr) +! 3: mid-level (cuasc) +! ptu - cloud temperature. +! pqu - cloud specific humidity. +! plu - cloud liquid water (moisture condensed out) +! puu [zuu] - cloud momentum u-component. +! pvu [zvu] - cloud momentum v-component. +! pmfu - updraft mass flux. +! pmfus [zmfus] - updraft flux of dry static energy. (cubasmc) +! pmfuq [zmfuq] - updraft flux of specific humidity. +! pmful [zmful] - updraft flux of cloud liquid water. +! plude - liquid water returned to environment by detrainment. +! pdmfup [zmfup] - +! kcbot - cloud base level. (cubase) +! kctop - cloud top level +! kctop0 [ictop0] - estimate of cloud top. (cumastr) +! kcum [icum] - flag to control the call + + integer klev,klon,klevp1,klevm1 + real ptenh(klon,klev), pqenh(klon,klev), & + & puen(klon,klev), pven(klon,klev),& + & pten(klon,klev), pqen(klon,klev),& + & pgeo(klon,klev), pgeoh(klon,klevp1),& + & pap(klon,klev), paph(klon,klevp1),& + & pqsen(klon,klev), pqte(klon,klev),& + & pverv(klon,klev), pqsenh(klon,klev) + real ptu(klon,klev), pqu(klon,klev),& + & puu(klon,klev), pvu(klon,klev),& + & pmfu(klon,klev), zph(klon),& + & pmfub(klon), & + & pmfus(klon,klev), pmfuq(klon,klev),& + & plu(klon,klev), plude(klon,klev),& + & pmful(klon,klev), pdmfup(klon,klev) + real zdmfen(klon), zdmfde(klon),& + & zmfuu(klon), zmfuv(klon),& + & zpbase(klon), zqold(klon) + real phcbase(klon), zluold(klon) + real zprecip(klon), zlrain(klon,klev) + real zbuo(klon,klev), kup(klon,klev) + real wup(klon) + real wbase(klon), zodetr(klon,klev) + real plglac(klon,klev) + + real eta(klon),dz(klon) + + integer klwmin(klon), ktype(klon),& + & klab(klon,klev), kcbot(klon),& + & kctop(klon), kctop0(klon) + integer lndj(klon) + logical ldcum(klon), loflag(klon) + logical llo2,llo3, llo1(klon) + + integer kdpl(klon) + real zoentr(klon), zdpmean(klon) + real pdmfen(klon,klev), pmfude_rate(klon,klev) +! local variables + integer jl,jk + integer ikb,icum,itopm2,ik,icall,is,kcum,jlm,jll + integer jlx(klon) + real ztmst,zcons2,zfacbuo,zprcdgw,z_cwdrag,z_cldmax,z_cwifrac,z_cprc2 + real zmftest,zmfmax,zqeen,zseen,zscde,zqude + real zmfusk,zmfuqk,zmfulk + real zbc,zbe,zkedke,zmfun,zwu,zprcon,zdt,zcbf,zzco + real zlcrit,zdfi,zc,zd,zint,zlnew,zvw,zvi,zalfaw,zrold + real zrnew,zz,zdmfeu,zdmfdu,dp + real zfac,zbuoc,zdkbuo,zdken,zvv,zarg,zchange,zxe,zxs,zdshrd + real atop1,atop2,abot +!-------------------------------- +!* 1. specify parameters +!-------------------------------- + zcons2=3./(g*ztmst) + zfacbuo = 0.5/(1.+0.5) + zprcdgw = cprcon*zrg + z_cldmax = 5.e-3 + z_cwifrac = 0.5 + z_cprc2 = 0.5 + z_cwdrag = (3.0/8.0)*0.506/0.2 +!--------------------------------- +! 2. set default values +!--------------------------------- + llo3 = .false. + do jl=1,klon + zluold(jl)=0. + wup(jl)=0. + zdpmean(jl)=0. + zoentr(jl)=0. + if(.not.ldcum(jl)) then + ktype(jl)=0 + kcbot(jl) = -1 + pmfub(jl) = 0. + pqu(jl,klev) = 0. + end if + end do + + ! initialize variout quantities + do jk=1,klev + do jl=1,klon + if(jk.ne.kcbot(jl)) plu(jl,jk)=0. + pmfu(jl,jk)=0. + pmfus(jl,jk)=0. + pmfuq(jl,jk)=0. + pmful(jl,jk)=0. + plude(jl,jk)=0. + plglac(jl,jk)=0. + pdmfup(jl,jk)=0. + zlrain(jl,jk)=0. + zbuo(jl,jk)=0. + kup(jl,jk)=0. + pdmfen(jl,jk) = 0. + pmfude_rate(jl,jk) = 0. + if(.not.ldcum(jl).or.ktype(jl).eq.3) klab(jl,jk)=0 + if(.not.ldcum(jl).and.paph(jl,jk).lt.4.e4) kctop0(jl)=jk + end do + end do + + do jl = 1,klon + if ( ktype(jl) == 3 ) ldcum(jl) = .false. + end do +!------------------------------------------------ +! 3.0 initialize values at cloud base level +!------------------------------------------------ + do jl=1,klon + kctop(jl)=kcbot(jl) + if(ldcum(jl)) then + ikb = kcbot(jl) + kup(jl,ikb) = 0.5*wbase(jl)**2 + pmfu(jl,ikb) = pmfub(jl) + pmfus(jl,ikb) = pmfub(jl)*(cpd*ptu(jl,ikb)+pgeoh(jl,ikb)) + pmfuq(jl,ikb) = pmfub(jl)*pqu(jl,ikb) + pmful(jl,ikb) = pmfub(jl)*plu(jl,ikb) + end if + end do +! +!----------------------------------------------------------------- +! 4. do ascent: subcloud layer (klab=1) ,clouds (klab=2) +! by doing first dry-adiabatic ascent and then +! by adjusting t,q and l accordingly in *cuadjtqn*, +! then check for buoyancy and set flags accordingly +!----------------------------------------------------------------- +! + do jk=klevm1,3,-1 +! specify cloud base values for midlevel convection +! in *cubasmc* in case there is not already convection +! --------------------------------------------------------------------- + ik=jk + call cubasmcn& + & (klon, klev, klevm1, ik, pten,& + & pqen, pqsen, puen, pven, pverv,& + & pgeo, pgeoh, ldcum, ktype, klab, zlrain,& + & pmfu, pmfub, kcbot, ptu,& + & pqu, plu, puu, pvu, pmfus,& + & pmfuq, pmful, pdmfup) + is = 0 + jlm = 0 + do jl = 1,klon + loflag(jl) = .false. + zprecip(jl) = 0. + llo1(jl) = .false. + is = is + klab(jl,jk+1) + if ( klab(jl,jk+1) == 0 ) klab(jl,jk) = 0 + if ( (ldcum(jl) .and. klab(jl,jk+1) == 2) .or. & + (ktype(jl) == 3 .and. klab(jl,jk+1) == 1) ) then + loflag(jl) = .true. + jlm = jlm + 1 + jlx(jlm) = jl + end if + zph(jl) = paph(jl,jk) + if ( ktype(jl) == 3 .and. jk == kcbot(jl) ) then + zmfmax = (paph(jl,jk)-paph(jl,jk-1))*zcons2 + if ( pmfub(jl) > zmfmax ) then + zfac = zmfmax/pmfub(jl) + pmfu(jl,jk+1) = pmfu(jl,jk+1)*zfac + pmfus(jl,jk+1) = pmfus(jl,jk+1)*zfac + pmfuq(jl,jk+1) = pmfuq(jl,jk+1)*zfac + pmfub(jl) = zmfmax + end if + pmfub(jl)=min(pmfub(jl),zmfmax) + end if + end do + + if(is.gt.0) llo3 = .true. +! +!* specify entrainment rates in *cuentr* +! ------------------------------------- + ik=jk + call cuentrn(klon,klev,ik,kcbot,ldcum,llo3, & + pgeoh,pmfu,zdmfen,zdmfde) +! +! do adiabatic ascent for entraining/detraining plume + if(llo3) then +! ------------------------------------------------------- +! + do jl = 1,klon + zqold(jl) = 0. + end do + do jll = 1 , jlm + jl = jlx(jll) + zdmfde(jl) = min(zdmfde(jl),0.75*pmfu(jl,jk+1)) + if ( jk == kcbot(jl) ) then + zoentr(jl) = -1.75e-3*(min(1.,pqen(jl,jk)/pqsen(jl,jk)) - & + 1.)*(pgeoh(jl,jk)-pgeoh(jl,jk+1))*zrg + zoentr(jl) = min(0.4,zoentr(jl))*pmfu(jl,jk+1) + end if + if ( jk < kcbot(jl) ) then + zmfmax = (paph(jl,jk)-paph(jl,jk-1))*zcons2 + zxs = max(pmfu(jl,jk+1)-zmfmax,0.) + wup(jl) = wup(jl) + kup(jl,jk+1)*(pap(jl,jk+1)-pap(jl,jk)) + zdpmean(jl) = zdpmean(jl) + pap(jl,jk+1) - pap(jl,jk) + zdmfen(jl) = zoentr(jl) + if ( ktype(jl) >= 2 ) then + zdmfen(jl) = 2.0*zdmfen(jl) + zdmfde(jl) = zdmfen(jl) + end if + zdmfde(jl) = zdmfde(jl) * & + (1.6-min(1.,pqen(jl,jk)/pqsen(jl,jk))) + zmftest = pmfu(jl,jk+1) + zdmfen(jl) - zdmfde(jl) + zchange = max(zmftest-zmfmax,0.) + zxe = max(zchange-zxs,0.) + zdmfen(jl) = zdmfen(jl) - zxe + zchange = zchange - zxe + zdmfde(jl) = zdmfde(jl) + zchange + end if + pdmfen(jl,jk) = zdmfen(jl) - zdmfde(jl) + pmfu(jl,jk) = pmfu(jl,jk+1) + zdmfen(jl) - zdmfde(jl) + zqeen = pqenh(jl,jk+1)*zdmfen(jl) + zseen = (cpd*ptenh(jl,jk+1)+pgeoh(jl,jk+1))*zdmfen(jl) + zscde = (cpd*ptu(jl,jk+1)+pgeoh(jl,jk+1))*zdmfde(jl) + zqude = pqu(jl,jk+1)*zdmfde(jl) + plude(jl,jk) = plu(jl,jk+1)*zdmfde(jl) + zmfusk = pmfus(jl,jk+1) + zseen - zscde + zmfuqk = pmfuq(jl,jk+1) + zqeen - zqude + zmfulk = pmful(jl,jk+1) - plude(jl,jk) + plu(jl,jk) = zmfulk*(1./max(cmfcmin,pmfu(jl,jk))) + pqu(jl,jk) = zmfuqk*(1./max(cmfcmin,pmfu(jl,jk))) + ptu(jl,jk) = (zmfusk * & + (1./max(cmfcmin,pmfu(jl,jk)))-pgeoh(jl,jk))*rcpd + ptu(jl,jk) = max(100.,ptu(jl,jk)) + ptu(jl,jk) = min(400.,ptu(jl,jk)) + zqold(jl) = pqu(jl,jk) + zlrain(jl,jk) = zlrain(jl,jk+1)*(pmfu(jl,jk+1)-zdmfde(jl)) * & + (1./max(cmfcmin,pmfu(jl,jk))) + zluold(jl) = plu(jl,jk) + end do +! reset to environmental values if below departure level + do jl = 1,klon + if ( jk > kdpl(jl) ) then + ptu(jl,jk) = ptenh(jl,jk) + pqu(jl,jk) = pqenh(jl,jk) + plu(jl,jk) = 0. + zluold(jl) = plu(jl,jk) + end if + end do +!* do corrections for moist ascent +!* by adjusting t,q and l in *cuadjtq* +!------------------------------------------------ + ik=jk + icall=1 +! + if ( jlm > 0 ) then + call cuadjtqn(klon,klev,ik,zph,ptu,pqu,loflag,icall) + end if +! compute the upfraft speed in cloud layer + do jll = 1 , jlm + jl = jlx(jll) + if ( pqu(jl,jk) /= zqold(jl) ) then + plglac(jl,jk) = plu(jl,jk) * & + ((1.-foealfa(ptu(jl,jk)))- & + (1.-foealfa(ptu(jl,jk+1)))) + ptu(jl,jk) = ptu(jl,jk) + ralfdcp*plglac(jl,jk) + end if + end do + do jll = 1 , jlm + jl = jlx(jll) + if ( pqu(jl,jk) /= zqold(jl) ) then + klab(jl,jk) = 2 + plu(jl,jk) = plu(jl,jk) + zqold(jl) - pqu(jl,jk) + zbc = ptu(jl,jk)*(1.+vtmpc1*pqu(jl,jk)-plu(jl,jk+1) - & + zlrain(jl,jk+1)) + zbe = ptenh(jl,jk)*(1.+vtmpc1*pqenh(jl,jk)) + zbuo(jl,jk) = zbc - zbe +! set flags for the case of midlevel convection + if ( ktype(jl) == 3 .and. klab(jl,jk+1) == 1 ) then + if ( zbuo(jl,jk) > -0.5 ) then + ldcum(jl) = .true. + kctop(jl) = jk + kup(jl,jk) = 0.5 + else + klab(jl,jk) = 0 + pmfu(jl,jk) = 0. + plude(jl,jk) = 0. + plu(jl,jk) = 0. + end if + end if + if ( klab(jl,jk+1) == 2 ) then + if ( zbuo(jl,jk) < 0. ) then + ptenh(jl,jk) = 0.5*(pten(jl,jk)+pten(jl,jk-1)) + pqenh(jl,jk) = 0.5*(pqen(jl,jk)+pqen(jl,jk-1)) + zbuo(jl,jk) = zbc - ptenh(jl,jk)*(1.+vtmpc1*pqenh(jl,jk)) + end if + zbuoc = (zbuo(jl,jk) / & + (ptenh(jl,jk)*(1.+vtmpc1*pqenh(jl,jk)))+zbuo(jl,jk+1) / & + (ptenh(jl,jk+1)*(1.+vtmpc1*pqenh(jl,jk+1))))*0.5 + zdkbuo = (pgeoh(jl,jk)-pgeoh(jl,jk+1))*zfacbuo*zbuoc +! mixing and "pressure" gradient term in upper troposphere + if ( zdmfen(jl) > 0. ) then + zdken = min(1.,(1.+z_cwdrag)*zdmfen(jl) / & + max(cmfcmin,pmfu(jl,jk+1))) + else + zdken = min(1.,(1.+z_cwdrag)*zdmfde(jl) / & + max(cmfcmin,pmfu(jl,jk+1))) + end if + kup(jl,jk) = (kup(jl,jk+1)*(1.-zdken)+zdkbuo) / & + (1.+zdken) + if ( zbuo(jl,jk) < 0. ) then + zkedke = kup(jl,jk)/max(1.e-10,kup(jl,jk+1)) + zkedke = max(0.,min(1.,zkedke)) + zmfun = sqrt(zkedke)*pmfu(jl,jk+1) + zdmfde(jl) = max(zdmfde(jl),pmfu(jl,jk+1)-zmfun) + plude(jl,jk) = plu(jl,jk+1)*zdmfde(jl) + pmfu(jl,jk) = pmfu(jl,jk+1) + zdmfen(jl) - zdmfde(jl) + end if + if ( zbuo(jl,jk) > 0. ) then + ikb = kcbot(jl) + zoentr(jl) = 1.75e-3*(0.3-(min(1.,pqen(jl,jk-1) / & + pqsen(jl,jk-1))-1.))*(pgeoh(jl,jk-1)-pgeoh(jl,jk)) * & + zrg*min(1.,pqsen(jl,jk)/pqsen(jl,ikb))**3 + zoentr(jl) = min(0.4,zoentr(jl))*pmfu(jl,jk) + else + zoentr(jl) = 0. + end if +! erase values if below departure level + if ( jk > kdpl(jl) ) then + pmfu(jl,jk) = pmfu(jl,jk+1) + kup(jl,jk) = 0.5 + end if + if ( kup(jl,jk) > 0. .and. pmfu(jl,jk) > 0. ) then + kctop(jl) = jk + llo1(jl) = .true. + else + klab(jl,jk) = 0 + pmfu(jl,jk) = 0. + kup(jl,jk) = 0. + zdmfde(jl) = pmfu(jl,jk+1) + plude(jl,jk) = plu(jl,jk+1)*zdmfde(jl) + end if +! save detrainment rates for updraught + if ( pmfu(jl,jk+1) > 0. ) pmfude_rate(jl,jk) = zdmfde(jl) + end if + else if ( ktype(jl) == 2 .and. pqu(jl,jk) == zqold(jl) ) then + klab(jl,jk) = 0 + pmfu(jl,jk) = 0. + kup(jl,jk) = 0. + zdmfde(jl) = pmfu(jl,jk+1) + plude(jl,jk) = plu(jl,jk+1)*zdmfde(jl) + pmfude_rate(jl,jk) = zdmfde(jl) + end if + end do + + do jl = 1,klon + if ( llo1(jl) ) then +! conversions only proceeds if plu is greater than a threshold liquid water +! content of 0.3 g/kg over water and 0.5 g/kg over land to prevent precipitation +! generation from small water contents. + if ( lndj(jl).eq.1 ) then + zdshrd = 5.e-4 + else + zdshrd = 3.e-4 + end if + ikb=kcbot(jl) +! if((paph(jl,ikb)-paph(jl,jk))>zdnoprc) then + if ( plu(jl,jk) > zdshrd )then + zwu = min(15.0,sqrt(2.*max(0.1,kup(jl,jk+1)))) + zprcon = zprcdgw/(0.75*zwu) +! PARAMETERS FOR BERGERON-FINDEISEN PROCESS (T < -5C) + zdt = min(rtber-rtice,max(rtber-ptu(jl,jk),0.)) + zcbf = 1. + z_cprc2*sqrt(zdt) + zzco = zprcon*zcbf + zlcrit = zdshrd/zcbf + zdfi = pgeoh(jl,jk) - pgeoh(jl,jk+1) + zc = (plu(jl,jk)-zluold(jl)) + zarg = (plu(jl,jk)/zlcrit)**2 + if ( zarg < 25.0 ) then + zd = zzco*(1.-exp(-zarg))*zdfi + else + zd = zzco*zdfi + end if + zint = exp(-zd) + zlnew = zluold(jl)*zint + zc/zd*(1.-zint) + zlnew = max(0.,min(plu(jl,jk),zlnew)) + zlnew = min(z_cldmax,zlnew) + zprecip(jl) = max(0.,zluold(jl)+zc-zlnew) + pdmfup(jl,jk) = zprecip(jl)*pmfu(jl,jk) + zlrain(jl,jk) = zlrain(jl,jk) + zprecip(jl) + plu(jl,jk) = zlnew + end if + end if + end do + do jl = 1, klon + if ( llo1(jl) ) then + if ( zlrain(jl,jk) > 0. ) then + zvw = 21.18*zlrain(jl,jk)**0.2 + zvi = z_cwifrac*zvw + zalfaw = foealfa(ptu(jl,jk)) + zvv = zalfaw*zvw + (1.-zalfaw)*zvi + zrold = zlrain(jl,jk) - zprecip(jl) + zc = zprecip(jl) + zwu = min(15.0,sqrt(2.*max(0.1,kup(jl,jk)))) + zd = zvv/zwu + zint = exp(-zd) + zrnew = zrold*zint + zc/zd*(1.-zint) + zrnew = max(0.,min(zlrain(jl,jk),zrnew)) + zlrain(jl,jk) = zrnew + end if + end if + end do + do jll = 1 , jlm + jl = jlx(jll) + pmful(jl,jk) = plu(jl,jk)*pmfu(jl,jk) + pmfus(jl,jk) = (cpd*ptu(jl,jk)+pgeoh(jl,jk))*pmfu(jl,jk) + pmfuq(jl,jk) = pqu(jl,jk)*pmfu(jl,jk) + end do + end if + end do +!---------------------------------------------------------------------- +! 5. final calculations +! ------------------ + do jl = 1,klon + if ( kctop(jl) == -1 ) ldcum(jl) = .false. + kcbot(jl) = max(kcbot(jl),kctop(jl)) + if ( ldcum(jl) ) then + wup(jl) = max(1.e-2,wup(jl)/max(1.,zdpmean(jl))) + wup(jl) = sqrt(2.*wup(jl)) + end if + end do + + return + end subroutine cuascn +!--------------------------------------------------------- +! level 3 souroutines +!-------------------------------------------------------- + subroutine cudlfsn & + & (klon, klev, & + & kcbot, kctop, lndj, ldcum, & + & ptenh, pqenh, puen, pven, & + & pten, pqsen, pgeo, & + & pgeoh, paph, ptu, pqu, plu,& + & puu, pvu, pmfub, prfl, & + & ptd, pqd, pud, pvd, & + & pmfd, pmfds, pmfdq, pdmfdp, & + & kdtop, lddraf) + +! this routine calculates level of free sinking for +! cumulus downdrafts and specifies t,q,u and v values + +! m.tiedtke e.c.m.w.f. 12/86 modif. 12/89 + +! purpose. +! -------- +! to produce lfs-values for cumulus downdrafts +! for massflux cumulus parameterization + +! interface +! --------- +! this routine is called from *cumastr*. +! input are environmental values of t,q,u,v,p,phi +! and updraft values t,q,u and v and also +! cloud base massflux and cu-precipitation rate. +! it returns t,q,u and v values and massflux at lfs. + +! method. + +! check for negative buoyancy of air of equal parts of +! moist environmental air and cloud air. + +! parameter description units +! --------- ----------- ----- +! input parameters (integer): + +! *klon* number of grid points per packet +! *klev* number of levels +! *kcbot* cloud base level +! *kctop* cloud top level + +! input parameters (logical): + +! *lndj* land sea mask (1 for land) +! *ldcum* flag: .true. for convective points + +! input parameters (real): + +! *ptenh* env. temperature (t+1) on half levels k +! *pqenh* env. spec. humidity (t+1) on half levels kg/kg +! *puen* provisional environment u-velocity (t+1) m/s +! *pven* provisional environment v-velocity (t+1) m/s +! *pten* provisional environment temperature (t+1) k +! *pqsen* environment spec. saturation humidity (t+1) kg/kg +! *pgeo* geopotential m2/s2 +! *pgeoh* geopotential on half levels m2/s2 +! *paph* provisional pressure on half levels pa +! *ptu* temperature in updrafts k +! *pqu* spec. humidity in updrafts kg/kg +! *plu* liquid water content in updrafts kg/kg +! *puu* u-velocity in updrafts m/s +! *pvu* v-velocity in updrafts m/s +! *pmfub* massflux in updrafts at cloud base kg/(m2*s) + +! updated parameters (real): + +! *prfl* precipitation rate kg/(m2*s) + +! output parameters (real): + +! *ptd* temperature in downdrafts k +! *pqd* spec. humidity in downdrafts kg/kg +! *pud* u-velocity in downdrafts m/s +! *pvd* v-velocity in downdrafts m/s +! *pmfd* massflux in downdrafts kg/(m2*s) +! *pmfds* flux of dry static energy in downdrafts j/(m2*s) +! *pmfdq* flux of spec. humidity in downdrafts kg/(m2*s) +! *pdmfdp* flux difference of precip. in downdrafts kg/(m2*s) + +! output parameters (integer): + +! *kdtop* top level of downdrafts + +! output parameters (logical): + +! *lddraf* .true. if downdrafts exist + +! externals +! --------- +! *cuadjtq* for calculating wet bulb t and q at lfs +!---------------------------------------------------------------------- + implicit none + + integer klev,klon + real ptenh(klon,klev), pqenh(klon,klev), & + & puen(klon,klev), pven(klon,klev), & + & pten(klon,klev), pqsen(klon,klev), & + & pgeo(klon,klev), & + & pgeoh(klon,klev+1), paph(klon,klev+1),& + & ptu(klon,klev), pqu(klon,klev), & + & puu(klon,klev), pvu(klon,klev), & + & plu(klon,klev), & + & pmfub(klon), prfl(klon) + + real ptd(klon,klev), pqd(klon,klev), & + & pud(klon,klev), pvd(klon,klev), & + & pmfd(klon,klev), pmfds(klon,klev), & + & pmfdq(klon,klev), pdmfdp(klon,klev) + integer kcbot(klon), kctop(klon), & + & kdtop(klon), ikhsmin(klon) + logical ldcum(klon), & + & lddraf(klon) + integer lndj(klon) + + real ztenwb(klon,klev), zqenwb(klon,klev), & + & zcond(klon), zph(klon), & + & zhsmin(klon) + logical llo2(klon) +! local variables + integer jl,jk + integer is,ik,icall,ike + real zhsk,zttest,zqtest,zbuo,zmftop + +!---------------------------------------------------------------------- + +! 1. set default values for downdrafts +! --------------------------------- + do jl=1,klon + lddraf(jl)=.false. + kdtop(jl)=klev+1 + ikhsmin(jl)=klev+1 + zhsmin(jl)=1.e8 + enddo +!---------------------------------------------------------------------- + +! 2. determine level of free sinking: +! downdrafts shall start at model level of minimum +! of saturation moist static energy or below +! respectively + +! for every point and proceed as follows: + +! (1) determine level of minimum of hs +! (2) determine wet bulb environmental t and q +! (3) do mixing with cumulus cloud air +! (4) check for negative buoyancy +! (5) if buoyancy>0 repeat (2) to (4) for next +! level below + +! the assumption is that air of downdrafts is mixture +! of 50% cloud air + 50% environmental air at wet bulb +! temperature (i.e. which became saturated due to +! evaporation of rain and cloud water) +! ---------------------------------------------------- + do jk=3,klev-2 + do jl=1,klon + zhsk=cpd*pten(jl,jk)+pgeo(jl,jk) + & + & foelhm(pten(jl,jk))*pqsen(jl,jk) + if(zhsk .lt. zhsmin(jl)) then + zhsmin(jl) = zhsk + ikhsmin(jl)= jk + end if + end do + end do + + + ike=klev-3 + do jk=3,ike + +! 2.1 calculate wet-bulb temperature and moisture +! for environmental air in *cuadjtq* +! ------------------------------------------- + is=0 + do jl=1,klon + ztenwb(jl,jk)=ptenh(jl,jk) + zqenwb(jl,jk)=pqenh(jl,jk) + zph(jl)=paph(jl,jk) + llo2(jl)=ldcum(jl).and.prfl(jl).gt.0..and..not.lddraf(jl).and. & + & (jk.lt.kcbot(jl).and.jk.gt.kctop(jl)).and. jk.ge.ikhsmin(jl) + if(llo2(jl))then + is=is+1 + endif + enddo + if(is.eq.0) cycle + + ik=jk + icall=2 + call cuadjtqn & + & ( klon, klev, ik, zph, ztenwb, zqenwb, llo2, icall) + +! 2.2 do mixing of cumulus and environmental air +! and check for negative buoyancy. +! then set values for downdraft at lfs. +! ---------------------------------------- + do jl=1,klon + if(llo2(jl)) then + zttest=0.5*(ptu(jl,jk)+ztenwb(jl,jk)) + zqtest=0.5*(pqu(jl,jk)+zqenwb(jl,jk)) + zbuo=zttest*(1.+vtmpc1 *zqtest)- & + & ptenh(jl,jk)*(1.+vtmpc1 *pqenh(jl,jk)) + zcond(jl)=pqenh(jl,jk)-zqenwb(jl,jk) + zmftop=-cmfdeps*pmfub(jl) + if(zbuo.lt.0..and.prfl(jl).gt.10.*zmftop*zcond(jl)) then + kdtop(jl)=jk + lddraf(jl)=.true. + ptd(jl,jk)=zttest + pqd(jl,jk)=zqtest + pmfd(jl,jk)=zmftop + pmfds(jl,jk)=pmfd(jl,jk)*(cpd*ptd(jl,jk)+pgeoh(jl,jk)) + pmfdq(jl,jk)=pmfd(jl,jk)*pqd(jl,jk) + pdmfdp(jl,jk-1)=-0.5*pmfd(jl,jk)*zcond(jl) + prfl(jl)=prfl(jl)+pdmfdp(jl,jk-1) + endif + endif + enddo + + enddo + + return + end subroutine cudlfsn + +!--------------------------------------------------------- +! level 3 souroutines +!-------------------------------------------------------- +!********************************************** +! subroutine cuddrafn +!********************************************** + subroutine cuddrafn & + & ( klon, klev, lddraf & + & , ptenh, pqenh, puen, pven & + & , pgeo, pgeoh, paph, prfl & + & , ptd, pqd, pud, pvd, pmfu & + & , pmfd, pmfds, pmfdq, pdmfdp, pmfdde_rate ) + +! this routine calculates cumulus downdraft descent + +! m.tiedtke e.c.m.w.f. 12/86 modif. 12/89 + +! purpose. +! -------- +! to produce the vertical profiles for cumulus downdrafts +! (i.e. t,q,u and v and fluxes) + +! interface +! --------- + +! this routine is called from *cumastr*. +! input is t,q,p,phi,u,v at half levels. +! it returns fluxes of s,q and evaporation rate +! and u,v at levels where downdraft occurs + +! method. +! -------- +! calculate moist descent for entraining/detraining plume by +! a) moving air dry-adiabatically to next level below and +! b) correcting for evaporation to obtain saturated state. + +! parameter description units +! --------- ----------- ----- +! input parameters (integer): + +! *klon* number of grid points per packet +! *klev* number of levels + +! input parameters (logical): + +! *lddraf* .true. if downdrafts exist + +! input parameters (real): + +! *ptenh* env. temperature (t+1) on half levels k +! *pqenh* env. spec. humidity (t+1) on half levels kg/kg +! *puen* provisional environment u-velocity (t+1) m/s +! *pven* provisional environment v-velocity (t+1) m/s +! *pgeo* geopotential m2/s2 +! *pgeoh* geopotential on half levels m2/s2 +! *paph* provisional pressure on half levels pa +! *pmfu* massflux updrafts kg/(m2*s) + +! updated parameters (real): + +! *prfl* precipitation rate kg/(m2*s) + +! output parameters (real): + +! *ptd* temperature in downdrafts k +! *pqd* spec. humidity in downdrafts kg/kg +! *pud* u-velocity in downdrafts m/s +! *pvd* v-velocity in downdrafts m/s +! *pmfd* massflux in downdrafts kg/(m2*s) +! *pmfds* flux of dry static energy in downdrafts j/(m2*s) +! *pmfdq* flux of spec. humidity in downdrafts kg/(m2*s) +! *pdmfdp* flux difference of precip. in downdrafts kg/(m2*s) + +! externals +! --------- +! *cuadjtq* for adjusting t and q due to evaporation in +! saturated descent +!---------------------------------------------------------------------- + implicit none + + integer klev,klon + real ptenh(klon,klev), pqenh(klon,klev), & + & puen(klon,klev), pven(klon,klev), & + & pgeoh(klon,klev+1), paph(klon,klev+1), & + & pgeo(klon,klev), pmfu(klon,klev) + + real ptd(klon,klev), pqd(klon,klev), & + & pud(klon,klev), pvd(klon,klev), & + & pmfd(klon,klev), pmfds(klon,klev), & + & pmfdq(klon,klev), pdmfdp(klon,klev), & + & prfl(klon) + real pmfdde_rate(klon,klev) + logical lddraf(klon) + + real zdmfen(klon), zdmfde(klon), & + & zcond(klon), zoentr(klon), & + & zbuoy(klon) + real zph(klon) + logical llo2(klon) + logical llo1 +! local variables + integer jl,jk + integer is,ik,icall,ike, itopde(klon) + real zentr,zdz,zzentr,zseen,zqeen,zsdde,zqdde,zdmfdp + real zmfdsk,zmfdqk,zbuo,zrain,zbuoyz,zmfduk,zmfdvk + +!---------------------------------------------------------------------- +! 1. calculate moist descent for cumulus downdraft by +! (a) calculating entrainment/detrainment rates, +! including organized entrainment dependent on +! negative buoyancy and assuming +! linear decrease of massflux in pbl +! (b) doing moist descent - evaporative cooling +! and moistening is calculated in *cuadjtq* +! (c) checking for negative buoyancy and +! specifying final t,q,u,v and downward fluxes +! ------------------------------------------------- + do jl=1,klon + zoentr(jl)=0. + zbuoy(jl)=0. + zdmfen(jl)=0. + zdmfde(jl)=0. + enddo + + do jk=klev,1,-1 + do jl=1,klon + pmfdde_rate(jl,jk) = 0. + if((paph(jl,klev+1)-paph(jl,jk)).lt. 60.e2) itopde(jl) = jk + end do + end do + + do jk=3,klev + is=0 + do jl=1,klon + zph(jl)=paph(jl,jk) + llo2(jl)=lddraf(jl).and.pmfd(jl,jk-1).lt.0. + if(llo2(jl)) then + is=is+1 + endif + enddo + if(is.eq.0) cycle + + do jl=1,klon + if(llo2(jl)) then + zentr = entrdd*pmfd(jl,jk-1)*(pgeoh(jl,jk-1)-pgeoh(jl,jk))*zrg + zdmfen(jl)=zentr + zdmfde(jl)=zentr + endif + enddo + + do jl=1,klon + if(llo2(jl)) then + if(jk.gt.itopde(jl)) then + zdmfen(jl)=0. + zdmfde(jl)=pmfd(jl,itopde(jl))* & + & (paph(jl,jk)-paph(jl,jk-1))/ & + & (paph(jl,klev+1)-paph(jl,itopde(jl))) + endif + endif + enddo + + do jl=1,klon + if(llo2(jl)) then + if(jk.le.itopde(jl)) then + zdz=-(pgeoh(jl,jk-1)-pgeoh(jl,jk))*zrg + zzentr=zoentr(jl)*zdz*pmfd(jl,jk-1) + zdmfen(jl)=zdmfen(jl)+zzentr + zdmfen(jl)=max(zdmfen(jl),0.3*pmfd(jl,jk-1)) + zdmfen(jl)=max(zdmfen(jl),-0.75*pmfu(jl,jk)- & + & (pmfd(jl,jk-1)-zdmfde(jl))) + zdmfen(jl)=min(zdmfen(jl),0.) + endif + endif + enddo + + do jl=1,klon + if(llo2(jl)) then + pmfd(jl,jk)=pmfd(jl,jk-1)+zdmfen(jl)-zdmfde(jl) + zseen=(cpd*ptenh(jl,jk-1)+pgeoh(jl,jk-1))*zdmfen(jl) + zqeen=pqenh(jl,jk-1)*zdmfen(jl) + zsdde=(cpd*ptd(jl,jk-1)+pgeoh(jl,jk-1))*zdmfde(jl) + zqdde=pqd(jl,jk-1)*zdmfde(jl) + zmfdsk=pmfds(jl,jk-1)+zseen-zsdde + zmfdqk=pmfdq(jl,jk-1)+zqeen-zqdde + pqd(jl,jk)=zmfdqk*(1./min(-cmfcmin,pmfd(jl,jk))) + ptd(jl,jk)=(zmfdsk*(1./min(-cmfcmin,pmfd(jl,jk)))-& + & pgeoh(jl,jk))*rcpd + ptd(jl,jk)=min(400.,ptd(jl,jk)) + ptd(jl,jk)=max(100.,ptd(jl,jk)) + zcond(jl)=pqd(jl,jk) + endif + enddo + + ik=jk + icall=2 + call cuadjtqn(klon, klev, ik, zph, ptd, pqd, llo2, icall ) + + do jl=1,klon + if(llo2(jl)) then + zcond(jl)=zcond(jl)-pqd(jl,jk) + zbuo=ptd(jl,jk)*(1.+vtmpc1 *pqd(jl,jk))- & + & ptenh(jl,jk)*(1.+vtmpc1 *pqenh(jl,jk)) + if(prfl(jl).gt.0..and.pmfu(jl,jk).gt.0.) then + zrain=prfl(jl)/pmfu(jl,jk) + zbuo=zbuo-ptd(jl,jk)*zrain + endif + if(zbuo.ge.0 .or. prfl(jl).le.(pmfd(jl,jk)*zcond(jl))) then + pmfd(jl,jk)=0. + zbuo=0. + endif + pmfds(jl,jk)=(cpd*ptd(jl,jk)+pgeoh(jl,jk))*pmfd(jl,jk) + pmfdq(jl,jk)=pqd(jl,jk)*pmfd(jl,jk) + zdmfdp=-pmfd(jl,jk)*zcond(jl) + pdmfdp(jl,jk-1)=zdmfdp + prfl(jl)=prfl(jl)+zdmfdp + +! compute organized entrainment for use at next level + zbuoyz=zbuo/ptenh(jl,jk) + zbuoyz=min(zbuoyz,0.0) + zdz=-(pgeo(jl,jk-1)-pgeo(jl,jk)) + zbuoy(jl)=zbuoy(jl)+zbuoyz*zdz + zoentr(jl)=g*zbuoyz*0.5/(1.+zbuoy(jl)) + pmfdde_rate(jl,jk) = -zdmfde(jl) + endif + enddo + + enddo + + return + end subroutine cuddrafn +!--------------------------------------------------------- +! level 3 souroutines +!-------------------------------------------------------- + subroutine cuflxn & + & ( klon, klev, ztmst & + & , pten, pqen, pqsen, ptenh, pqenh & + & , paph, pap, pgeoh, lndj, ldcum & + & , kcbot, kctop, kdtop, ktopm2 & + & , ktype, lddraf & + & , pmfu, pmfd, pmfus, pmfds & + & , pmfuq, pmfdq, pmful, plude & + & , pdmfup, pdmfdp, pdpmel, plglac & + & , prain, pmfdde_rate, pmflxr, pmflxs ) + +! m.tiedtke e.c.m.w.f. 7/86 modif. 12/89 + +! purpose +! ------- + +! this routine does the final calculation of convective +! fluxes in the cloud layer and in the subcloud layer + +! interface +! --------- +! this routine is called from *cumastr*. + + +! parameter description units +! --------- ----------- ----- +! input parameters (integer): + +! *klon* number of grid points per packet +! *klev* number of levels +! *kcbot* cloud base level +! *kctop* cloud top level +! *kdtop* top level of downdrafts + +! input parameters (logical): + +! *lndj* land sea mask (1 for land) +! *ldcum* flag: .true. for convective points + +! input parameters (real): + +! *ptsphy* time step for the physics s +! *pten* provisional environment temperature (t+1) k +! *pqen* provisional environment spec. humidity (t+1) kg/kg +! *pqsen* environment spec. saturation humidity (t+1) kg/kg +! *ptenh* env. temperature (t+1) on half levels k +! *pqenh* env. spec. humidity (t+1) on half levels kg/kg +! *paph* provisional pressure on half levels pa +! *pap* provisional pressure on full levels pa +! *pgeoh* geopotential on half levels m2/s2 + +! updated parameters (integer): + +! *ktype* set to zero if ldcum=.false. + +! updated parameters (logical): + +! *lddraf* set to .false. if ldcum=.false. or kdtop= kdtop(jl) + if ( llddraf .and.jk.ge.kdtop(jl)) then + pmfds(jl,jk) = pmfds(jl,jk)-pmfd(jl,jk) * & + (cpd*ptenh(jl,jk)+pgeoh(jl,jk)) + pmfdq(jl,jk) = pmfdq(jl,jk)-pmfd(jl,jk)*pqenh(jl,jk) + else + pmfd(jl,jk) = 0. + pmfds(jl,jk) = 0. + pmfdq(jl,jk) = 0. + pdmfdp(jl,jk-1) = 0. + end if + if ( llddraf .and. pmfd(jl,jk) < 0. .and. & + abs(pmfd(jl,ikb)) < 1.e-20 ) then + idbas(jl) = jk + end if + else + pmfu(jl,jk)=0. + pmfd(jl,jk)=0. + pmfus(jl,jk)=0. + pmfds(jl,jk)=0. + pmfuq(jl,jk)=0. + pmfdq(jl,jk)=0. + pmful(jl,jk)=0. + plglac(jl,jk)=0. + pdmfup(jl,jk-1)=0. + pdmfdp(jl,jk-1)=0. + plude(jl,jk-1)=0. + endif + enddo + enddo + + do jl=1,klon + pmflxr(jl,klev+1) = 0. + pmflxs(jl,klev+1) = 0. + end do + do jl=1,klon + if(ldcum(jl)) then + ikb=kcbot(jl) + ik=ikb+1 + zzp=((paph(jl,klev+1)-paph(jl,ik))/ & + & (paph(jl,klev+1)-paph(jl,ikb))) + if(ktype(jl).eq.3) then + zzp=zzp**2 + endif + pmfu(jl,ik)=pmfu(jl,ikb)*zzp + pmfus(jl,ik)=(pmfus(jl,ikb)- & + & foelhm(ptenh(jl,ikb))*pmful(jl,ikb))*zzp + pmfuq(jl,ik)=(pmfuq(jl,ikb)+pmful(jl,ikb))*zzp + pmful(jl,ik)=0. + endif + enddo + + do jk=ktopm2,klev + do jl=1,klon + if(ldcum(jl).and.jk.gt.kcbot(jl)+1) then + ikb=kcbot(jl)+1 + zzp=((paph(jl,klev+1)-paph(jl,jk))/ & + & (paph(jl,klev+1)-paph(jl,ikb))) + if(ktype(jl).eq.3) then + zzp=zzp**2 + endif + pmfu(jl,jk)=pmfu(jl,ikb)*zzp + pmfus(jl,jk)=pmfus(jl,ikb)*zzp + pmfuq(jl,jk)=pmfuq(jl,ikb)*zzp + pmful(jl,jk)=0. + endif + ik = idbas(jl) + llddraf = lddraf(jl) .and. jk > ik .and. ik < klev + if ( llddraf .and. ik == kcbot(jl)+1 ) then + zzp = ((paph(jl,klev+1)-paph(jl,jk))/(paph(jl,klev+1)-paph(jl,ik))) + if ( ktype(jl) == 3 ) zzp = zzp*zzp + pmfd(jl,jk) = pmfd(jl,ik)*zzp + pmfds(jl,jk) = pmfds(jl,ik)*zzp + pmfdq(jl,jk) = pmfdq(jl,ik)*zzp + pmfdde_rate(jl,jk) = -(pmfd(jl,jk-1)-pmfd(jl,jk)) + else if ( llddraf .and. ik /= kcbot(jl)+1 .and. jk == ik+1 ) then + pmfdde_rate(jl,jk) = -(pmfd(jl,jk-1)-pmfd(jl,jk)) + end if + enddo + enddo +!* 2. calculate rain/snow fall rates +!* calculate melting of snow +!* calculate evaporation of precip +! ------------------------------- + + do jk=ktopm2,klev + do jl=1,klon + if(ldcum(jl) .and. jk >=kctop(jl)-1 ) then + prain(jl)=prain(jl)+pdmfup(jl,jk) + if(pmflxs(jl,jk).gt.0..and.pten(jl,jk).gt.tmelt) then + zcons1=zcons1a*(1.+0.5*(pten(jl,jk)-tmelt)) + zfac=zcons1*(paph(jl,jk+1)-paph(jl,jk)) + zsnmlt=min(pmflxs(jl,jk),zfac*(pten(jl,jk)-tmelt)) + pdpmel(jl,jk)=zsnmlt + pqsen(jl,jk)=foeewm(pten(jl,jk)-zsnmlt/zfac)/pap(jl,jk) + endif + zalfaw=foealfa(pten(jl,jk)) + ! + ! No liquid precipitation above melting level + ! + if ( pten(jl,jk) < tmelt .and. zalfaw > 0. ) then + plglac(jl,jk) = plglac(jl,jk)+zalfaw*(pdmfup(jl,jk)+pdmfdp(jl,jk)) + zalfaw = 0. + end if + pmflxr(jl,jk+1)=pmflxr(jl,jk)+zalfaw* & + & (pdmfup(jl,jk)+pdmfdp(jl,jk))+pdpmel(jl,jk) + pmflxs(jl,jk+1)=pmflxs(jl,jk)+(1.-zalfaw)* & + & (pdmfup(jl,jk)+pdmfdp(jl,jk))-pdpmel(jl,jk) + if(pmflxr(jl,jk+1)+pmflxs(jl,jk+1).lt.0.0) then + pdmfdp(jl,jk)=-(pmflxr(jl,jk)+pmflxs(jl,jk)+pdmfup(jl,jk)) + pmflxr(jl,jk+1)=0.0 + pmflxs(jl,jk+1)=0.0 + pdpmel(jl,jk) =0.0 + else if ( pmflxr(jl,jk+1) < 0. ) then + pmflxs(jl,jk+1) = pmflxs(jl,jk+1)+pmflxr(jl,jk+1) + pmflxr(jl,jk+1) = 0. + else if ( pmflxs(jl,jk+1) < 0. ) then + pmflxr(jl,jk+1) = pmflxr(jl,jk+1)+pmflxs(jl,jk+1) + pmflxs(jl,jk+1) = 0. + end if + endif + enddo + enddo + do jk=ktopm2,klev + do jl=1,klon + if(ldcum(jl).and.jk.ge.kcbot(jl)) then + zrfl=pmflxr(jl,jk)+pmflxs(jl,jk) + if(zrfl.gt.1.e-20) then + zdrfl1=zcpecons*max(0.,pqsen(jl,jk)-pqen(jl,jk))*zcucov* & + & (sqrt(paph(jl,jk)/paph(jl,klev+1))/5.09e-3* & + & zrfl/zcucov)**0.5777* & + & (paph(jl,jk+1)-paph(jl,jk)) + zrnew=zrfl-zdrfl1 + zrmin=zrfl-zcucov*max(0.,rhevap(jl)*pqsen(jl,jk) & + & -pqen(jl,jk)) *zcons2*(paph(jl,jk+1)-paph(jl,jk)) + zrnew=max(zrnew,zrmin) + zrfln=max(zrnew,0.) + zdrfl=min(0.,zrfln-zrfl) + zdenom=1./max(1.e-20,pmflxr(jl,jk)+pmflxs(jl,jk)) + zalfaw=foealfa(pten(jl,jk)) + if ( pten(jl,jk) < tmelt ) zalfaw = 0. + zpdr=zalfaw*pdmfdp(jl,jk) + zpds=(1.-zalfaw)*pdmfdp(jl,jk) + pmflxr(jl,jk+1)=pmflxr(jl,jk)+zpdr & + & +pdpmel(jl,jk)+zdrfl*pmflxr(jl,jk)*zdenom + pmflxs(jl,jk+1)=pmflxs(jl,jk)+zpds & + & -pdpmel(jl,jk)+zdrfl*pmflxs(jl,jk)*zdenom + pdmfup(jl,jk)=pdmfup(jl,jk)+zdrfl + if ( pmflxr(jl,jk+1)+pmflxs(jl,jk+1) < 0. ) then + pdmfup(jl,jk) = pdmfup(jl,jk)-(pmflxr(jl,jk+1)+pmflxs(jl,jk+1)) + pmflxr(jl,jk+1) = 0. + pmflxs(jl,jk+1) = 0. + pdpmel(jl,jk) = 0. + else if ( pmflxr(jl,jk+1) < 0. ) then + pmflxs(jl,jk+1) = pmflxs(jl,jk+1)+pmflxr(jl,jk+1) + pmflxr(jl,jk+1) = 0. + else if ( pmflxs(jl,jk+1) < 0. ) then + pmflxr(jl,jk+1) = pmflxr(jl,jk+1)+pmflxs(jl,jk+1) + pmflxs(jl,jk+1) = 0. + end if + else + pmflxr(jl,jk+1)=0.0 + pmflxs(jl,jk+1)=0.0 + pdmfdp(jl,jk)=0.0 + pdpmel(jl,jk)=0.0 + endif + endif + enddo + enddo + + return + end subroutine cuflxn +!--------------------------------------------------------- +! level 3 souroutines +!-------------------------------------------------------- + subroutine cudtdqn(klon,klev,ktopm2,kctop,kdtop,ldcum, & + lddraf,ztmst,paph,pgeoh,pgeo,pten,ptenh,pqen, & + pqenh,pqsen,plglac,plude,pmfu,pmfd,pmfus,pmfds, & + pmfuq,pmfdq,pmful,pdmfup,pdmfdp,pdpmel,ptent,ptenq,pcte) + implicit none + integer klon,klev,ktopm2 + integer kctop(klon), kdtop(klon) + logical ldcum(klon), lddraf(klon) + real ztmst + real paph(klon,klev+1), pgeoh(klon,klev+1) + real pgeo(klon,klev), pten(klon,klev), & + pqen(klon,klev), ptenh(klon,klev),& + pqenh(klon,klev), pqsen(klon,klev),& + plglac(klon,klev), plude(klon,klev) + real pmfu(klon,klev), pmfd(klon,klev),& + pmfus(klon,klev), pmfds(klon,klev),& + pmfuq(klon,klev), pmfdq(klon,klev),& + pmful(klon,klev), pdmfup(klon,klev),& + pdpmel(klon,klev), pdmfdp(klon,klev) + real ptent(klon,klev), ptenq(klon,klev) + real pcte(klon,klev) + +! local variables + integer jk , ik , jl + real zalv , zzp + real zmfus(klon,klev) , zmfuq(klon,klev) + real zmfds(klon,klev) , zmfdq(klon,klev) + real zdtdt(klon,klev) , zdqdt(klon,klev) , zdp(klon,klev) + !* 1.0 SETUP AND INITIALIZATIONS + ! ------------------------- + do jk = 1 , klev + do jl = 1, klon + if ( ldcum(jl) ) then + zdp(jl,jk) = g/(paph(jl,jk+1)-paph(jl,jk)) + zmfus(jl,jk) = pmfus(jl,jk) + zmfds(jl,jk) = pmfds(jl,jk) + zmfuq(jl,jk) = pmfuq(jl,jk) + zmfdq(jl,jk) = pmfdq(jl,jk) + end if + end do + end do + !----------------------------------------------------------------------- + !* 2.0 COMPUTE TENDENCIES + ! ------------------ + do jk = ktopm2 , klev + if ( jk < klev ) then + do jl = 1,klon + if ( ldcum(jl) ) then + zalv = foelhm(pten(jl,jk)) + zdtdt(jl,jk) = zdp(jl,jk)*rcpd * & + (zmfus(jl,jk+1)-zmfus(jl,jk)+zmfds(jl,jk+1) - & + zmfds(jl,jk)+alf*plglac(jl,jk)-alf*pdpmel(jl,jk) - & + zalv*(pmful(jl,jk+1)-pmful(jl,jk)-plude(jl,jk)-pdmfup(jl,jk)-pdmfdp(jl,jk))) + zdqdt(jl,jk) = zdp(jl,jk)*(zmfuq(jl,jk+1) - & + zmfuq(jl,jk)+zmfdq(jl,jk+1)-zmfdq(jl,jk)+pmful(jl,jk+1) - & + pmful(jl,jk)-plude(jl,jk)-pdmfup(jl,jk)-pdmfdp(jl,jk)) + end if + end do + else + do jl = 1,klon + if ( ldcum(jl) ) then + zalv = foelhm(pten(jl,jk)) + zdtdt(jl,jk) = -zdp(jl,jk)*rcpd * & + (zmfus(jl,jk)+zmfds(jl,jk)+alf*pdpmel(jl,jk) - & + zalv*(pmful(jl,jk)+pdmfup(jl,jk)+pdmfdp(jl,jk))) + zdqdt(jl,jk) = -zdp(jl,jk)*(zmfuq(jl,jk) + & + zmfdq(jl,jk)+(pmful(jl,jk)+pdmfup(jl,jk)+pdmfdp(jl,jk))) + end if + end do + end if + end do + !--------------------------------------------------------------- + !* 3.0 UPDATE TENDENCIES + ! ----------------- + do jk = ktopm2 , klev + do jl = 1, klon + if ( ldcum(jl) ) then + ptent(jl,jk) = ptent(jl,jk) + zdtdt(jl,jk) + ptenq(jl,jk) = ptenq(jl,jk) + zdqdt(jl,jk) + pcte(jl,jk) = zdp(jl,jk)*plude(jl,jk) + end if + end do + end do + + return + end subroutine cudtdqn +!--------------------------------------------------------- +! level 3 souroutines +!-------------------------------------------------------- + subroutine cududvn(klon,klev,ktopm2,ktype,kcbot,kctop,ldcum, & + ztmst,paph,puen,pven,pmfu,pmfd,puu,pud,pvu,pvd,ptenu, & + ptenv) + implicit none + integer klon,klev,ktopm2 + integer ktype(klon), kcbot(klon), kctop(klon) + logical ldcum(klon) + real ztmst + real paph(klon,klev+1) + real puen(klon,klev), pven(klon,klev),& + pmfu(klon,klev), pmfd(klon,klev),& + puu(klon,klev), pud(klon,klev),& + pvu(klon,klev), pvd(klon,klev) + real ptenu(klon,klev), ptenv(klon,klev) + +!local variables + real zuen(klon,klev) , zven(klon,klev) , zmfuu(klon,klev), & + zmfdu(klon,klev), zmfuv(klon,klev), zmfdv(klon,klev) + + integer ik , ikb , jk , jl + real zzp, zdtdt + + real zdudt(klon,klev), zdvdt(klon,klev), zdp(klon,klev) +! + do jk = 1 , klev + do jl = 1, klon + if ( ldcum(jl) ) then + zuen(jl,jk) = puen(jl,jk) + zven(jl,jk) = pven(jl,jk) + zdp(jl,jk) = g/(paph(jl,jk+1)-paph(jl,jk)) + end if + end do + end do +!---------------------------------------------------------------------- +!* 1.0 CALCULATE FLUXES AND UPDATE U AND V TENDENCIES +! ---------------------------------------------- + do jk = ktopm2 , klev + ik = jk - 1 + do jl = 1,klon + if ( ldcum(jl) ) then + zmfuu(jl,jk) = pmfu(jl,jk)*(puu(jl,jk)-zuen(jl,ik)) + zmfuv(jl,jk) = pmfu(jl,jk)*(pvu(jl,jk)-zven(jl,ik)) + zmfdu(jl,jk) = pmfd(jl,jk)*(pud(jl,jk)-zuen(jl,ik)) + zmfdv(jl,jk) = pmfd(jl,jk)*(pvd(jl,jk)-zven(jl,ik)) + end if + end do + end do + ! linear fluxes below cloud + do jk = ktopm2 , klev + do jl = 1, klon + if ( ldcum(jl) .and. jk > kcbot(jl) ) then + ikb = kcbot(jl) + zzp = ((paph(jl,klev+1)-paph(jl,jk))/(paph(jl,klev+1)-paph(jl,ikb))) + if ( ktype(jl) == 3 ) zzp = zzp*zzp + zmfuu(jl,jk) = zmfuu(jl,ikb)*zzp + zmfuv(jl,jk) = zmfuv(jl,ikb)*zzp + zmfdu(jl,jk) = zmfdu(jl,ikb)*zzp + zmfdv(jl,jk) = zmfdv(jl,ikb)*zzp + end if + end do + end do +!---------------------------------------------------------------------- +!* 2.0 COMPUTE TENDENCIES +! ------------------ + do jk = ktopm2 , klev + if ( jk < klev ) then + ik = jk + 1 + do jl = 1,klon + if ( ldcum(jl) ) then + zdudt(jl,jk) = zdp(jl,jk) * & + (zmfuu(jl,ik)-zmfuu(jl,jk)+zmfdu(jl,ik)-zmfdu(jl,jk)) + zdvdt(jl,jk) = zdp(jl,jk) * & + (zmfuv(jl,ik)-zmfuv(jl,jk)+zmfdv(jl,ik)-zmfdv(jl,jk)) + end if + end do + else + do jl = 1,klon + if ( ldcum(jl) ) then + zdudt(jl,jk) = -zdp(jl,jk)*(zmfuu(jl,jk)+zmfdu(jl,jk)) + zdvdt(jl,jk) = -zdp(jl,jk)*(zmfuv(jl,jk)+zmfdv(jl,jk)) + end if + end do + end if + end do +!--------------------------------------------------------------------- +!* 3.0 UPDATE TENDENCIES +! ----------------- + do jk = ktopm2 , klev + do jl = 1, klon + if ( ldcum(jl) ) then + ptenu(jl,jk) = ptenu(jl,jk) + zdudt(jl,jk) + ptenv(jl,jk) = ptenv(jl,jk) + zdvdt(jl,jk) + end if + end do + end do +!---------------------------------------------------------------------- + return + end subroutine cududvn +!--------------------------------------------------------- +! level 3 souroutines +!-------------------------------------------------------- + subroutine cuadjtqn & + & (klon, klev, kk, psp, pt, pq, ldflag, kcall) +! m.tiedtke e.c.m.w.f. 12/89 +! purpose. +! -------- +! to produce t,q and l values for cloud ascent + +! interface +! --------- +! this routine is called from subroutines: +! *cond* (t and q at condensation level) +! *cubase* (t and q at condensation level) +! *cuasc* (t and q at cloud levels) +! *cuini* (environmental t and qs values at half levels) +! input are unadjusted t and q values, +! it returns adjusted values of t and q + +! parameter description units +! --------- ----------- ----- +! input parameters (integer): + +! *klon* number of grid points per packet +! *klev* number of levels +! *kk* level +! *kcall* defines calculation as +! kcall=0 env. t and qs in*cuini* +! kcall=1 condensation in updrafts (e.g. cubase, cuasc) +! kcall=2 evaporation in downdrafts (e.g. cudlfs,cuddraf) +! input parameters (real): + +! *psp* pressure pa + +! updated parameters (real): + +! *pt* temperature k +! *pq* specific humidity kg/kg +! externals +! --------- +! for condensation calculations. +! the tables are initialised in *suphec*. + +!---------------------------------------------------------------------- + + implicit none + + integer klev,klon + real pt(klon,klev), pq(klon,klev), & + & psp(klon) + logical ldflag(klon) +! local variables + integer jl,jk + integer isum,kcall,kk + real zqmax,zqsat,zcor,zqp,zcond,zcond1,zl,zi,zf +!---------------------------------------------------------------------- +! 1. define constants +! ---------------- + zqmax=0.5 + +! 2. calculate condensation and adjust t and q accordingly +! ----------------------------------------------------- + + if ( kcall == 1 ) then + do jl = 1,klon + if ( ldflag(jl) ) then + zqp = 1./psp(jl) + zl = 1./(pt(jl,kk)-c4les) + zi = 1./(pt(jl,kk)-c4ies) + zqsat = c2es*(foealfa(pt(jl,kk))*exp(c3les*(pt(jl,kk)-tmelt)*zl) + & + (1.-foealfa(pt(jl,kk)))*exp(c3ies*(pt(jl,kk)-tmelt)*zi)) + zqsat = zqsat*zqp + zqsat = min(0.5,zqsat) + zcor = 1. - vtmpc1*zqsat + zf = foealfa(pt(jl,kk))*r5alvcp*zl**2 + & + (1.-foealfa(pt(jl,kk)))*r5alscp*zi**2 + zcond = (pq(jl,kk)*zcor**2-zqsat*zcor)/(zcor**2+zqsat*zf) + if ( zcond > 0. ) then + pt(jl,kk) = pt(jl,kk) + foeldcpm(pt(jl,kk))*zcond + pq(jl,kk) = pq(jl,kk) - zcond + zl = 1./(pt(jl,kk)-c4les) + zi = 1./(pt(jl,kk)-c4ies) + zqsat = c2es*(foealfa(pt(jl,kk)) * & + exp(c3les*(pt(jl,kk)-tmelt)*zl)+(1.-foealfa(pt(jl,kk))) * & + exp(c3ies*(pt(jl,kk)-tmelt)*zi)) + zqsat = zqsat*zqp + zqsat = min(0.5,zqsat) + zcor = 1. - vtmpc1*zqsat + zf = foealfa(pt(jl,kk))*r5alvcp*zl**2 + & + (1.-foealfa(pt(jl,kk)))*r5alscp*zi**2 + zcond1 = (pq(jl,kk)*zcor**2-zqsat*zcor)/(zcor**2+zqsat*zf) + if ( abs(zcond) < 1.e-20 ) zcond1 = 0. + pt(jl,kk) = pt(jl,kk) + foeldcpm(pt(jl,kk))*zcond1 + pq(jl,kk) = pq(jl,kk) - zcond1 + end if + end if + end do + elseif ( kcall == 2 ) then + do jl = 1,klon + if ( ldflag(jl) ) then + zqp = 1./psp(jl) + zqsat = foeewm(pt(jl,kk))*zqp + zqsat = min(0.5,zqsat) + zcor = 1./(1.-vtmpc1*zqsat) + zqsat = zqsat*zcor + zcond = (pq(jl,kk)-zqsat)/(1.+zqsat*zcor*foedem(pt(jl,kk))) + zcond = min(zcond,0.) + pt(jl,kk) = pt(jl,kk) + foeldcpm(pt(jl,kk))*zcond + pq(jl,kk) = pq(jl,kk) - zcond + zqsat = foeewm(pt(jl,kk))*zqp + zqsat = min(0.5,zqsat) + zcor = 1./(1.-vtmpc1*zqsat) + zqsat = zqsat*zcor + zcond1 = (pq(jl,kk)-zqsat)/(1.+zqsat*zcor*foedem(pt(jl,kk))) + if ( abs(zcond) < 1.e-20 ) zcond1 = min(zcond1,0.) + pt(jl,kk) = pt(jl,kk) + foeldcpm(pt(jl,kk))*zcond1 + pq(jl,kk) = pq(jl,kk) - zcond1 + end if + end do + else if ( kcall == 0 ) then + do jl = 1,klon + zqp = 1./psp(jl) + zqsat = foeewm(pt(jl,kk))*zqp + zqsat = min(0.5,zqsat) + zcor = 1./(1.-vtmpc1*zqsat) + zqsat = zqsat*zcor + zcond1 = (pq(jl,kk)-zqsat)/(1.+zqsat*zcor*foedem(pt(jl,kk))) + pt(jl,kk) = pt(jl,kk) + foeldcpm(pt(jl,kk))*zcond1 + pq(jl,kk) = pq(jl,kk) - zcond1 + zqsat = foeewm(pt(jl,kk))*zqp + zqsat = min(0.5,zqsat) + zcor = 1./(1.-vtmpc1*zqsat) + zqsat = zqsat*zcor + zcond1 = (pq(jl,kk)-zqsat)/(1.+zqsat*zcor*foedem(pt(jl,kk))) + pt(jl,kk) = pt(jl,kk) + foeldcpm(pt(jl,kk))*zcond1 + pq(jl,kk) = pq(jl,kk) - zcond1 + end do + end if + + return + end subroutine cuadjtqn +!--------------------------------------------------------- +! level 4 souroutines +!-------------------------------------------------------- + subroutine cubasmcn & + & (klon, klev, klevm1, kk, pten,& + & pqen, pqsen, puen, pven, pverv,& + & pgeo, pgeoh, ldcum, ktype, klab, plrain,& + & pmfu, pmfub, kcbot, ptu,& + & pqu, plu, puu, pvu, pmfus,& + & pmfuq, pmful, pdmfup) + implicit none +! m.tiedtke e.c.m.w.f. 12/89 +! c.zhang iprc 05/2012 +!***purpose. +! -------- +! this routine calculates cloud base values +! for midlevel convection +!***interface +! --------- +! this routine is called from *cuasc*. +! input are environmental values t,q etc +! it returns cloudbase values for midlevel convection +!***method. +! ------- +! s. tiedtke (1989) +!***externals +! --------- +! none +! ---------------------------------------------------------------- + real pten(klon,klev), pqen(klon,klev),& + & puen(klon,klev), pven(klon,klev),& + & pqsen(klon,klev), pverv(klon,klev),& + & pgeo(klon,klev), pgeoh(klon,klev+1) + real ptu(klon,klev), pqu(klon,klev),& + & puu(klon,klev), pvu(klon,klev),& + & plu(klon,klev), pmfu(klon,klev),& + & pmfub(klon), & + & pmfus(klon,klev), pmfuq(klon,klev),& + & pmful(klon,klev), pdmfup(klon,klev),& + & plrain(klon,klev) + integer ktype(klon), kcbot(klon),& + & klab(klon,klev) + logical ldcum(klon) +! local variabels + integer jl,kk,klev,klon,klevp1,klevm1 + real zzzmb +!-------------------------------------------------------- +!* 1. calculate entrainment and detrainment rates +! ------------------------------------------------------- + do jl=1,klon + if(.not.ldcum(jl) .and. klab(jl,kk+1).eq.0) then + if(lmfmid .and. pqen(jl,kk) .gt. 0.80*pqsen(jl,kk).and. & + pgeo(jl,kk)*zrg .gt. 5.0e2 .and. & + & pgeo(jl,kk)*zrg .lt. 1.0e4 ) then + ptu(jl,kk+1)=(cpd*pten(jl,kk)+pgeo(jl,kk)-pgeoh(jl,kk+1))& + & *rcpd + pqu(jl,kk+1)=pqen(jl,kk) + plu(jl,kk+1)=0. + zzzmb=max(cmfcmin,-pverv(jl,kk)*zrg) + zzzmb=min(zzzmb,cmfcmax) + pmfub(jl)=zzzmb + pmfu(jl,kk+1)=pmfub(jl) + pmfus(jl,kk+1)=pmfub(jl)*(cpd*ptu(jl,kk+1)+pgeoh(jl,kk+1)) + pmfuq(jl,kk+1)=pmfub(jl)*pqu(jl,kk+1) + pmful(jl,kk+1)=0. + pdmfup(jl,kk+1)=0. + kcbot(jl)=kk + klab(jl,kk+1)=1 + plrain(jl,kk+1)=0.0 + ktype(jl)=3 + end if + end if + end do + return + end subroutine cubasmcn +!--------------------------------------------------------- +! level 4 souroutines +!--------------------------------------------------------- + subroutine cuentrn(klon,klev,kk,kcbot,ldcum,ldwork, & + pgeoh,pmfu,pdmfen,pdmfde) + implicit none + integer klon,klev,kk + integer kcbot(klon) + logical ldcum(klon) + logical ldwork + real pgeoh(klon,klev+1) + real pmfu(klon,klev) + real pdmfen(klon) + real pdmfde(klon) + logical llo1 + integer jl + real zdz , zmf + real zentr(klon) + ! + !* 1. CALCULATE ENTRAINMENT AND DETRAINMENT RATES + ! ------------------------------------------- + if ( ldwork ) then + do jl = 1,klon + pdmfen(jl) = 0. + pdmfde(jl) = 0. + zentr(jl) = 0. + end do + ! + !* 1.1 SPECIFY ENTRAINMENT RATES + ! ------------------------- + do jl = 1, klon + if ( ldcum(jl) ) then + zdz = (pgeoh(jl,kk)-pgeoh(jl,kk+1))*zrg + zmf = pmfu(jl,kk+1)*zdz + llo1 = kk < kcbot(jl) + if ( llo1 ) then + pdmfen(jl) = zentr(jl)*zmf + pdmfde(jl) = 0.75e-4*zmf + end if + end if + end do + end if + end subroutine cuentrn +!-------------------------------------------------------- +! external functions +!------------------------------------------------------ + real function foealfa(tt) +! foealfa is calculated to distinguish the three cases: +! +! foealfa=1 water phase +! foealfa=0 ice phase +! 0 < foealfa < 1 mixed phase +! +! input : tt = temperature +! + implicit none + real tt + foealfa = min(1.,((max(rtice,min(rtwat,tt))-rtice) & + & /(rtwat-rtice))**2) + + return + end function foealfa + + real function foelhm(tt) + implicit none + real tt + foelhm = foealfa(tt)*alv + (1.-foealfa(tt))*als + return + end function foelhm + + real function foeewm(tt) + implicit none + real tt + foeewm = c2es * & + & (foealfa(tt)*exp(c3les*(tt-tmelt)/(tt-c4les))+ & + & (1.-foealfa(tt))*exp(c3ies*(tt-tmelt)/(tt-c4ies))) + return + end function foeewm + + real function foedem(tt) + implicit none + real tt + foedem = foealfa(tt)*r5alvcp*(1./(tt-c4les)**2)+ & + & (1.-foealfa(tt))*r5alscp*(1./(tt-c4ies)**2) + return + end function foedem + + real function foeldcpm(tt) + implicit none + real tt + foeldcpm = foealfa(tt)*ralvdcp+ & + & (1.-foealfa(tt))*ralsdcp + return + end function foeldcpm + +end module module_cu_ntiedtke diff --git a/src/core_atmosphere/physics/physics_wrf/module_cu_tiedtke.F b/src/core_atmosphere/physics/physics_wrf/module_cu_tiedtke.F index 0033c18490..a2b57f8377 100644 --- a/src/core_atmosphere/physics/physics_wrf/module_cu_tiedtke.F +++ b/src/core_atmosphere/physics/physics_wrf/module_cu_tiedtke.F @@ -1,218 +1,186 @@ -!================================================================================================== -!WRF version 3.6 of the Tiedtke parameterization of deep convection. Implemented in MPAS -!on 2014-06-26. -!Laura D. Fowler (birch.mmm.ucar.edu) / 2014-06-26. -!================================================================================================== -!> -!> add-ons and modifications to sourcecode: -!> ---------------------------------------- -!> +!================================================================================================================= +! copied for implementation in MPAS from WRF version 3.8.1: +! modifications made to sourcecode: +! * used preprocessing option to replace module_model_constants with mpas_atmphys_constants; used preprocessing +! option to include the horizontal dependence of the array znu. +! Laura D. Fowler (laura@ucar.edu) / 2016-08-18. + +!================================================================================================================= !----------------------------------------------------------------------- ! -!WRF:MODEL_LAYER:PHYSICS +!wrf:model_layer:physics ! -!####################TIEDTKE SCHEME######################### -! Taken from the IPRC iRAM - Yuqing Wang, University of Hawaii -! Added by Chunxi Zhang and Yuqing Wang to WRF3.2, May, 2010 -! refenrence: Tiedtke (1989, MWR, 117, 1779-1800) -! Nordeng, T.E., (1995), CAPE closure and organized entrainment/detrainment -! Yuqing Wang et al. (2003,J. Climate, 16, 1721-1738) for improvements +!####################tiedtke scheme######################### +! taken from the IPRC IRAM - Yuqing Wang, university of hawaii +! added by Chunxi Zhang and Yuqing Wang to wrf3.2, may, 2010 +! refenrence: Tiedtke (1989, mwr, 117, 1779-1800) +! Nordeng, t.e., (1995), cape closure and organized entrainment/detrainment +! Yuqing Wang et al. (2003,j. climate, 16, 1721-1738) for improvements ! for cloud top detrainment -! (2004, Mon. Wea. Rev., 132, 274-296), improvements for PBL clouds -! (2007,Mon. Wea. Rev., 135, 567-585), diurnal cycle of precipitation -! This scheme is on testing +! (2004, mon. wea. rev., 132, 274-296), improvements for pbl clouds +! (2007,mon. wea. rev., 135, 567-585), diurnal cycle of precipitation !########################################################### -MODULE module_cu_tiedtke +module module_cu_tiedtke ! -!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -! epsl--- allowed minimum value for floating calculation -!--------------------------------------------------------------- - real,parameter :: epsl = 1.0e-20 - real,parameter :: t000 = 273.15 - real,parameter :: hgfr = 233.15 ! defined in param.f in explct -!------------------------------------------------------------- -! Ends the parameters set -!++++++++++++++++++++++++++++ #if defined(mpas) -!... In MPAS, the variable RV is already defined in ./src/framework/mpas_constants.F. Here, -! we declare RV as a private variable to avoid conflicts at compilation time. -! Laura D. Fowler (birch.mmm.ucar.edu) / 2014-06-26. - REAL,PRIVATE :: G,CPV,RV - REAL :: API,A,EOMEGA,RD,CPD,RCPD,VTMPC1,VTMPC2, & - RHOH2O,ALV,ALS,ALF,CLW,TMELT,SOLC,STBO,DAYL,YEARL, & - C1ES,C2ES,C3LES,C3IES,C4LES,C4IES,C5LES,C5IES,ZRG + use mpas_atmphys_constants, only: & + rd => R_d, & + rv => R_v, & + cpd => cp, & + alv => xlv, & + als => xls, & + alf => xlf, & + g => gravity #else - REAL,PRIVATE :: G,CPV - REAL :: API,A,EOMEGA,RD,RV,CPD,RCPD,VTMPC1,VTMPC2, & - RHOH2O,ALV,ALS,ALF,CLW,TMELT,SOLC,STBO,DAYL,YEARL, & - C1ES,C2ES,C3LES,C3IES,C4LES,C4IES,C5LES,C5IES,ZRG + use module_model_constants, only:rd=>r_d, rv=>r_v, & + & cpd=>cp, alv=>xlv, als=>xls, alf=>xlf, g #endif + implicit none - REAL :: ENTRPEN,ENTRSCV,ENTRMID,ENTRDD,CMFCTOP,RHM,RHC, & - CMFCMAX,CMFCMIN,CMFDEPS,RHCDD,CPRCON,CRIRH,ZBUO0, & - fdbk,ZTAU + real :: rcpd,vtmpc1,t000,hgfr,rhoh2o,tmelt, & + c1es,c2es,c3les,c3ies,c4les,c4ies,c5les,c5ies,zrg + + real :: entrpen,entrscv,entrmid,entrdd,cmfctop,rhm,rhc, & + cmfcmax,cmfcmin,cmfdeps,cprcon,crirh,zbuo0, & + fdbk,ztau - INTEGER :: orgen,nturben,cutrigger - - REAL :: CVDIFTS, CEVAPCU1, CEVAPCU2,ZDNOPRC + real :: cevapcu1, cevapcu2, zdnoprc - - PARAMETER(A=6371.22E03, & - ALV=2.5008E6, & - ALS=2.8345E6, & - ALF=ALS-ALV, & - CPD=1005.46, & - CPV=1869.46, & ! CPV in module is 1846.4 - RCPD=1.0/CPD, & - RHOH2O=1.0E03, & - TMELT=273.16, & - G=9.806, & ! G=9.806 - ZRG=1.0/G, & - RD=287.05, & - RV=461.51, & - C1ES=610.78, & - C2ES=C1ES*RD/RV, & - C3LES=17.269, & - C4LES=35.86, & - C5LES=C3LES*(TMELT-C4LES), & - C3IES=21.875, & - C4IES=7.66, & - C5IES=C3IES*(TMELT-C4IES), & - API=3.141593, & ! API=2.0*ASIN(1.) - VTMPC1=RV/RD-1.0, & - VTMPC2=CPV/CPD-1.0, & - CVDIFTS=1.0, & - CEVAPCU1=1.93E-6*261.0*0.5/G, & - CEVAPCU2=1.E3/(38.3*0.293) ) + parameter( & + rcpd=1.0/cpd, & + rhoh2o=1.0e03, & + tmelt=273.16, & + t000= 273.15, & + hgfr= 233.15, & + zrg=1.0/g, & + c1es=610.78, & + c2es=c1es*rd/rv, & + c3les=17.269, & + c4les=35.86, & + c5les=c3les*(tmelt-c4les), & + c3ies=21.875, & + c4ies=7.66, & + c5ies=c3ies*(tmelt-c4ies), & + vtmpc1=rv/rd-1.0, & + cevapcu1=1.93e-6*261.0*0.5/g, & + cevapcu2=1.e3/(38.3*0.293) ) -! SPECIFY PARAMETERS FOR MASSFLUX-SCHEME +! specify parameters for massflux-scheme ! -------------------------------------- -! These are tunable parameters +! these are tunable parameters ! -! ENTRPEN: AVERAGE ENTRAINMENT RATE FOR PENETRATIVE CONVECTION +! entrpen: average entrainment rate for penetrative convection ! ------- ! - PARAMETER(ENTRPEN=1.0E-4) + parameter(entrpen=1.0e-4) ! -! ENTRSCV: AVERAGE ENTRAINMENT RATE FOR SHALLOW CONVECTION +! entrscv: average entrainment rate for shallow convection ! ------- ! - PARAMETER(ENTRSCV=1.2E-3) + parameter(entrscv=1.2e-3) ! -! ENTRMID: AVERAGE ENTRAINMENT RATE FOR MIDLEVEL CONVECTION +! entrmid: average entrainment rate for midlevel convection ! ------- ! - PARAMETER(ENTRMID=1.0E-4) + parameter(entrmid=1.0e-4) ! -! ENTRDD: AVERAGE ENTRAINMENT RATE FOR DOWNDRAFTS +! entrdd: average entrainment rate for downdrafts ! ------ ! - PARAMETER(ENTRDD =2.0E-4) + parameter(entrdd =2.0e-4) ! -! CMFCTOP: RELATIVE CLOUD MASSFLUX AT LEVEL ABOVE NONBUOYANCY LEVEL +! cmfctop: relative cloud massflux at level above nonbuoyancy level ! ------- ! - PARAMETER(CMFCTOP=0.30) + parameter(cmfctop=0.30) ! -! CMFCMAX: MAXIMUM MASSFLUX VALUE ALLOWED FOR UPDRAFTS ETC +! cmfcmax: maximum massflux value allowed for updrafts etc ! ------- ! - PARAMETER(CMFCMAX=1.0) + parameter(cmfcmax=1.0) ! -! CMFCMIN: MINIMUM MASSFLUX VALUE (FOR SAFETY) +! cmfcmin: minimum massflux value (for safety) ! ------- ! - PARAMETER(CMFCMIN=1.E-10) + parameter(cmfcmin=1.e-10) ! -! CMFDEPS: FRACTIONAL MASSFLUX FOR DOWNDRAFTS AT LFS +! cmfdeps: fractional massflux for downdrafts at lfs ! ------- ! - PARAMETER(CMFDEPS=0.30) -! -! CPRCON: COEFFICIENTS FOR DETERMINING CONVERSION FROM CLOUD WATER + parameter(cmfdeps=0.30) ! - PARAMETER(CPRCON = 1.1E-3/G) +! cprcon: coefficients for determining conversion from cloud water ! -! ZDNOPRC: The pressure depth below which no precipitation + parameter(cprcon = 1.1e-3/g) ! - PARAMETER(ZDNOPRC =1.5E4) -!-------------------- - PARAMETER(orgen=1) ! Old organized entrainment rate -! PARAMETER(orgen=2) ! New organized entrainment rate - - PARAMETER(nturben=1) ! old deep turburent entrainment/detrainment rate -! PARAMETER(nturben=2) ! New deep turburent entrainment/detrainment rate - - PARAMETER(cutrigger=1) ! Old trigger function -! PARAMETER(cutrigger=2) ! New trigger function +! zdnoprc: the pressure depth below which no precipitation ! + parameter(zdnoprc = 1.5e4) !-------------------- - PARAMETER(RHC=0.80,RHM=1.0,ZBUO0=0.50) + parameter(rhc=0.80,rhm=1.0,zbuo0=0.50) !-------------------- - PARAMETER(CRIRH=0.70,fdbk = 1.0,ZTAU = 1800.0) + parameter(crirh=0.70,fdbk = 1.0,ztau = 2400.0) !-------------------- - LOGICAL :: LMFPEN,LMFMID,LMFSCV,LMFDD,LMFDUDV - PARAMETER(LMFPEN=.TRUE.,LMFMID=.TRUE.,LMFSCV=.TRUE.,LMFDD=.TRUE.,LMFDUDV=.TRUE.) + logical :: lmfpen,lmfmid,lmfscv,lmfdd,lmfdudv + parameter(lmfpen=.true.,lmfmid=.true.,lmfscv=.true.,lmfdd=.true.,lmfdudv=.true.) !-------------------- -!#################### END of Variables definition########################## +!#################### end of variables definition########################## !----------------------------------------------------------------------- ! -CONTAINS +contains !----------------------------------------------------------------------- - SUBROUTINE CU_TIEDTKE( & - DT,ITIMESTEP,STEPCU & - ,RAINCV,PRATEC,QFX,HFX,ZNU & - ,U3D,V3D,W,T3D,QV3D,QC3D,QI3D,PI3D,RHO3D & - ,QVFTEN,QVPBLTEN & - ,DZ8W,PCPS,P8W,XLAND,CU_ACT_FLAG & + subroutine cu_tiedtke( & + dt,itimestep,stepcu & + ,raincv,pratec,qfx,znu & + ,u3d,v3d,w,t3d,qv3d,qc3d,qi3d,pi3d,rho3d & + ,qvften,qvpblten & + ,dz8w,pcps,p8w,xland,cu_act_flag & ,ids,ide, jds,jde, kds,kde & ,ims,ime, jms,jme, kms,kme & ,its,ite, jts,jte, kts,kte & - ,RTHCUTEN,RQVCUTEN,RQCCUTEN,RQICUTEN & - ,RUCUTEN, RVCUTEN & - ,F_QV ,F_QC ,F_QR ,F_QI ,F_QS & + ,rthcuten,rqvcuten,rqccuten,rqicuten & + ,rucuten, rvcuten & + ,f_qv ,f_qc ,f_qr ,f_qi ,f_qs & ) + !------------------------------------------------------------------- - IMPLICIT NONE + implicit none !------------------------------------------------------------------- -!-- U3D 3D u-velocity interpolated to theta points (m/s) -!-- V3D 3D v-velocity interpolated to theta points (m/s) -!-- TH3D 3D potential temperature (K) -!-- T3D temperature (K) -!-- QV3D 3D water vapor mixing ratio (Kg/Kg) -!-- QC3D 3D cloud mixing ratio (Kg/Kg) -!-- QI3D 3D ice mixing ratio (Kg/Kg) -!-- RHO3D 3D air density (kg/m^3) -!-- P8w 3D hydrostatic pressure at full levels (Pa) -!-- Pcps 3D hydrostatic pressure at half levels (Pa) -!-- PI3D 3D exner function (dimensionless) -!-- QVFTEN 3D water vapor advection tendency -!-- QVPBLTEN 3D water vapor tendency due to a PBL -!-- RTHCUTEN Theta tendency due to -! cumulus scheme precipitation (K/s) -!-- RUCUTEN U wind tendency due to -! cumulus scheme precipitation (K/s) -!-- RVCUTEN V wind tendency due to -! cumulus scheme precipitation (K/s) -!-- RQVCUTEN Qv tendency due to +!-- u3d 3d u-velocity interpolated to theta points (m/s) +!-- v3d 3d v-velocity interpolated to theta points (m/s) +!-- th3d 3d potential temperature (k) +!-- t3d temperature (k) +!-- qv3d 3d water vapor mixing ratio (kg/kg) +!-- qc3d 3d cloud mixing ratio (kg/kg) +!-- qi3d 3d ice mixing ratio (kg/kg) +!-- rho3d 3d air density (kg/m^3) +!-- p8w 3d hydrostatic pressure at full levels (pa) +!-- pcps 3d hydrostatic pressure at half levels (pa) +!-- pi3d 3d exner function (dimensionless) +!-- rthcuten theta tendency due to +! cumulus scheme precipitation (k/s) +!-- rucuten u wind tendency due to +! cumulus scheme precipitation (k/s) +!-- rvcuten v wind tendency due to +! cumulus scheme precipitation (k/s) +!-- rqvcuten qv tendency due to ! cumulus scheme precipitation (kg/kg/s) -!-- RQRCUTEN Qr tendency due to +!-- rqrcuten qr tendency due to ! cumulus scheme precipitation (kg/kg/s) -!-- RQCCUTEN Qc tendency due to +!-- rqccuten qc tendency due to ! cumulus scheme precipitation (kg/kg/s) -!-- RQSCUTEN Qs tendency due to +!-- rqscuten qs tendency due to ! cumulus scheme precipitation (kg/kg/s) -!-- RQICUTEN Qi tendency due to +!-- rqicuten qi tendency due to ! cumulus scheme precipitation (kg/kg/s) -!-- RAINC accumulated total cumulus scheme precipitation (mm) -!-- RAINCV cumulus scheme precipitation (mm) -!-- PRATEC precipitiation rate from cumulus scheme (mm/s) +!-- rainc accumulated total cumulus scheme precipitation (mm) +!-- raincv cumulus scheme precipitation (mm) +!-- pratec precipitiation rate from cumulus scheme (mm/s) !-- dz8w dz between full levels (m) -!-- QFX upward moisture flux at the surface (kg/m^2/s) -!-- DT time step (s) -!-- F_QV etc flag values for tendencies, not used +!-- qfx upward moisture flux at the surface (kg/m^2/s) +!-- dt time step (s) !-- ids start index for i in domain !-- ide end index for i in domain !-- jds start index for j in domain @@ -232,2010 +200,1601 @@ SUBROUTINE CU_TIEDTKE( & !-- kts start index for k in tile !-- kte end index for k in tile !------------------------------------------------------------------- - INTEGER, INTENT(IN) :: ids,ide, jds,jde, kds,kde, & + integer, intent(in) :: ids,ide, jds,jde, kds,kde, & ims,ime, jms,jme, kms,kme, & its,ite, jts,jte, kts,kte, & - ITIMESTEP, & - STEPCU + itimestep, & + stepcu - REAL, INTENT(IN) :: & - DT + real, intent(in) :: & + dt - REAL, DIMENSION(ims:ime, jms:jme), INTENT(IN) :: & - XLAND + real, dimension(ims:ime, jms:jme), intent(in) :: & + xland - REAL, DIMENSION(ims:ime, jms:jme), INTENT(INOUT) :: & - RAINCV, PRATEC + real, dimension(ims:ime, jms:jme), intent(inout) :: & + raincv, pratec - LOGICAL, DIMENSION(IMS:IME,JMS:JME), INTENT(INOUT) :: & - CU_ACT_FLAG + logical, dimension(ims:ime,jms:jme), intent(inout) :: & + cu_act_flag - REAL, DIMENSION(ims:ime, kms:kme, jms:jme), INTENT(IN) :: & - DZ8W, & - P8w, & - Pcps, & - PI3D, & - QC3D, & - QVFTEN, & - QVPBLTEN, & - QI3D, & - QV3D, & - RHO3D, & - T3D, & - U3D, & - V3D, & - W + real, dimension(ims:ime, kms:kme, jms:jme), intent(in) :: & + dz8w, & + p8w, & + pcps, & + pi3d, & + qc3d, & + qvften, & + qvpblten, & + qi3d, & + qv3d, & + rho3d, & + t3d, & + u3d, & + v3d, & + w -!--------------------------- OPTIONAL VARS ---------------------------- +!--------------------------- optional vars ---------------------------- - REAL, DIMENSION(ims:ime, kms:kme, jms:jme), & - OPTIONAL, INTENT(INOUT) :: & - RQCCUTEN, & - RQICUTEN, & - RQVCUTEN, & - RTHCUTEN, & - RUCUTEN, & - RVCUTEN + real, dimension(ims:ime, kms:kme, jms:jme), & + optional, intent(inout) :: & + rqccuten, & + rqicuten, & + rqvcuten, & + rthcuten, & + rucuten, & + rvcuten ! -! Flags relating to the optional tendency arrays declared above -! Models that carry the optional tendencies will provdide the +! flags relating to the optional tendency arrays declared above +! models that carry the optional tendencies will provdide the ! optional arguments at compile time; these flags all the model ! to determine at run-time whether a particular tracer is in ! use or not. ! - LOGICAL, OPTIONAL :: & - F_QV & - ,F_QC & - ,F_QR & - ,F_QI & - ,F_QS + logical, optional :: & + f_qv & + ,f_qc & + ,f_qr & + ,f_qi & + ,f_qs -!--------------------------- LOCAL VARS ------------------------------ - - REAL, DIMENSION(ims:ime, jms:jme) :: & - QFX, & - HFX - - REAL :: & - DELT, & - RDELT - - REAL , DIMENSION(its:ite) :: & - RCS, & - RN, & - EVAP, & - heatflux, & - rho2d - INTEGER , DIMENSION(its:ite) :: SLIMSK - +!--------------------------- local vars ------------------------------ + + real, dimension(ims:ime, jms:jme) :: & + qfx - REAL , DIMENSION(its:ite, kts:kte+1) :: & - PRSI - - REAL , DIMENSION(its:ite, kts:kte) :: & - DEL, & - DOT, & - PHIL, & - PRSL, & - Q1, & - Q2, & - Q3, & - Q1B, & - Q1BL, & - Q11, & - Q12, & - T1, & - U1, & - V1, & - ZI, & - ZL, & - OMG, & - GHT - - INTEGER, DIMENSION(its:ite) :: & - KBOT, & - KTOP - - INTEGER :: & - I, & - IM, & - J, & - K, & - KM, & - KP, & - KX + real :: & + delt, & + rdelt + real , dimension(its:ite) :: & + rcs, & + rn, & + evap + integer , dimension(its:ite) :: slimsk + + + real , dimension(its:ite, kts:kte+1) :: & + prsi + + real , dimension(its:ite, kts:kte) :: & + del, & + dot, & + phil, & + prsl, & + q1, & + q2, & + q3, & + q1b, & + q1bl, & + q11, & + q12, & + t1, & + u1, & + v1, & + zi, & + zl, & + omg, & + ght + + integer, dimension(its:ite) :: & + kbot, & + ktop + + integer :: & + i, & + im, & + j, & + k, & + km, & + kp, & + kx + + + logical :: run_param , doing_adapt_dt , decided !-------other local variables---- #if defined(mpas) !MPAS specific (Laura D. Fowler): - INTEGER,DIMENSION(its:ite):: KTYPE - REAL,DIMENSION(its:ite,kts:kte):: SIG1 - REAL,DIMENSION(ims:ime,kms:kme,jms:jme):: ZNU - INTEGER:: zz + real,intent(in),dimension(ims:ime,kms:kme,jms:jme):: znu + integer,dimension(its:ite) :: ktype + real,dimension(its:ite,kts:kte):: sig1 + integer :: zz #else - INTEGER,DIMENSION( its:ite ) :: KTYPE - REAL, DIMENSION( kts:kte ) :: sig1 ! half sigma levels - REAL, DIMENSION( kms:kme ) :: ZNU - INTEGER :: zz + integer,dimension( its:ite ) :: ktype + real, dimension( kts:kte ) :: sig1 ! half sigma levels + real, dimension( kms:kme ) :: znu + integer :: zz #endif !----------------------------------------------------------------------- -! - DO J=JTS,JTE - DO I=ITS,ITE - CU_ACT_FLAG(I,J)=.TRUE. - ENDDO - ENDDO + do j=jts,jte + do i=its,ite + cu_act_flag(i,j)=.true. + enddo + enddo - IM=ITE-ITS+1 - KX=KTE-KTS+1 - DELT=DT*STEPCU - RDELT=1./DELT + im=ite-its+1 + kx=kte-kts+1 + delt=dt*stepcu + rdelt=1./delt -!------------- J LOOP (OUTER) -------------------------------------------------- +!------------- j loop (outer) -------------------------------------------------- - DO J=jts,jte + do j=jts,jte ! --------------- compute zi and zl ----------------------------------------- - DO i=its,ite - ZI(I,KTS)=0.0 - ENDDO - - DO k=kts+1,kte - KM=k-1 - DO i=its,ite - ZI(I,K)=ZI(I,KM)+dz8w(i,km,j) - ENDDO - ENDDO - - DO k=kts+1,kte - KM=k-1 - DO i=its,ite - ZL(I,KM)=(ZI(I,K)+ZI(I,KM))*0.5 - ENDDO - ENDDO - - DO i=its,ite - ZL(I,KTE)=2.*ZI(I,KTE)-ZL(I,KTE-1) - ENDDO + do i=its,ite + zi(i,kts)=0.0 + enddo + + do k=kts+1,kte + km=k-1 + do i=its,ite + zi(i,k)=zi(i,km)+dz8w(i,km,j) + enddo + enddo + + do k=kts+1,kte + km=k-1 + do i=its,ite + zl(i,km)=(zi(i,k)+zi(i,km))*0.5 + enddo + enddo + + do i=its,ite + zl(i,kte)=2.*zi(i,kte)-zl(i,kte-1) + enddo ! --------------- end compute zi and zl ------------------------------------- - DO i=its,ite - SLIMSK(i)=int(ABS(XLAND(i,j)-2.)) - ENDDO + do i=its,ite + slimsk(i)=int(abs(xland(i,j)-2.)) + enddo - DO k=kts,kte + do k=kts,kte kp=k+1 - DO i=its,ite - DOT(i,k)=-0.5*g*rho3d(i,k,j)*(w(i,k,j)+w(i,kp,j)) - ENDDO - ENDDO + do i=its,ite + dot(i,k)=-0.5*g*rho3d(i,k,j)*(w(i,k,j)+w(i,kp,j)) + enddo + enddo -#if defined(mpas) - DO k=kts,kte + do k=kts,kte zz = kte+1-k - DO i=its,ite - U1(i,zz) = U3D(i,k,j) - V1(i,zz) = V3D(i,k,j) - T1(i,zz) = T3D(i,k,j) - Q1(i,zz) = QV3D(i,k,j) - Q1B(i,zz) = QVFTEN(i,k,j) - Q1BL(i,zz) = QVPBLTEN(i,k,j) - Q2(i,zz) = QC3D(i,k,j) - Q3(i,zz) = QI3D(i,k,j) - OMG(i,zz) = DOT(i,k) - GHT(i,zz) = ZL(i,k) - PRSL(i,zz) = Pcps(i,k,j) - ENDDO - ENDDO -#else - DO k=kts,kte - zz = kte+1-k - DO i=its,ite - U1(i,zz)=U3D(i,k,j) - V1(i,zz)=V3D(i,k,j) - T1(i,zz)=T3D(i,k,j) - Q1(i,zz)= QV3D(i,k,j) + do i=its,ite + u1(i,zz)=u3d(i,k,j) + v1(i,zz)=v3d(i,k,j) + t1(i,zz)=t3d(i,k,j) + q1(i,zz)= qv3d(i,k,j) if(itimestep == 1) then - Q1B(i,zz)=0. - Q1BL(i,zz)=0. + q1b(i,zz)=0. + q1bl(i,zz)=0. else - Q1B(i,zz)=QVFTEN(i,k,j) - Q1BL(i,zz)=QVPBLTEN(i,k,j) + q1b(i,zz)=qvften(i,k,j) + q1bl(i,zz)=qvpblten(i,k,j) endif - Q2(i,zz)=QC3D(i,k,j) - Q3(i,zz)=QI3D(i,k,j) - OMG(i,zz)=DOT(i,k) - GHT(i,zz)=ZL(i,k) - PRSL(i,zz) = Pcps(i,k,j) - ENDDO - ENDDO -#endif + q2(i,zz)=qc3d(i,k,j) + q3(i,zz)=qi3d(i,k,j) + omg(i,zz)=dot(i,k) + ght(i,zz)=zl(i,k) + prsl(i,zz) = pcps(i,k,j) + enddo + enddo - DO k=kts,kte+1 + do k=kts,kte+1 zz = kte+2-k - DO i=its,ite - PRSI(i,zz) = P8w(i,k,j) - ENDDO - ENDDO + do i=its,ite + prsi(i,zz) = p8w(i,k,j) + enddo + enddo #if defined(mpas) -!MPAS specific (Laura D. Fowler): - DO k=kts,kte + do k=kts,kte zz = kte+1-k - DO i=its,ite + do i=its,ite sig1(i,zz) = znu(i,k,j) - ENDDO - ENDDO + enddo + enddo #else - DO k=kts,kte + do k=kts,kte zz = kte+1-k - sig1(zz) = ZNU(k) - ENDDO + sig1(zz) = znu(k) + enddo #endif -!###############before call TIECNV, we need EVAP######################## -! EVAP is the vapor flux at the surface +!###############before call tiecnv, we need evap######################## +! evap is the vapor flux at the surface !######################################################################## ! - DO i=its,ite - EVAP(i) = QFX(i,j) - heatflux(i)=HFX(i,j) - rho2d(i) = rho3d(i,1,j) - ENDDO + do i=its,ite + evap(i) = qfx(i,j) + enddo !######################################################################## - CALL TIECNV(U1,V1,T1,Q1,Q2,Q3,Q1B,Q1BL,GHT,OMG,PRSL,PRSI,EVAP,heatflux,rho2d, & - RN,SLIMSK,KTYPE,IM,KX,KX+1,sig1,DELT) + call tiecnv(u1,v1,t1,q1,q2,q3,q1b,q1bl,ght,omg,prsl,prsi,evap, & + rn,slimsk,ktype,im,kx,kx+1,sig1,delt) - DO I=ITS,ITE - RAINCV(I,J)=RN(I)/STEPCU - PRATEC(I,J)=RN(I)/(STEPCU * DT) - ENDDO + do i=its,ite + raincv(i,j)=rn(i)/stepcu + pratec(i,j)=rn(i)/(stepcu * dt) + enddo - DO K=KTS,KTE + do k=kts,kte zz = kte+1-k - DO I=ITS,ITE - RTHCUTEN(I,K,J)=(T1(I,zz)-T3D(I,K,J))/PI3D(I,K,J)*RDELT - RQVCUTEN(I,K,J)=(Q1(I,zz)-QV3D(I,K,J))*RDELT - RUCUTEN(I,K,J) =(U1(I,zz)-U3D(I,K,J))*RDELT - RVCUTEN(I,K,J) =(V1(I,zz)-V3D(I,K,J))*RDELT - ENDDO - ENDDO - - IF(PRESENT(RQCCUTEN))THEN - IF ( F_QC ) THEN - DO K=KTS,KTE + do i=its,ite + rthcuten(i,k,j)=(t1(i,zz)-t3d(i,k,j))/pi3d(i,k,j)*rdelt + rqvcuten(i,k,j)=(q1(i,zz)-qv3d(i,k,j))*rdelt + rucuten(i,k,j) =(u1(i,zz)-u3d(i,k,j))*rdelt + rvcuten(i,k,j) =(v1(i,zz)-v3d(i,k,j))*rdelt + enddo + enddo + + if(present(rqccuten))then + if ( f_qc ) then + do k=kts,kte zz = kte+1-k - DO I=ITS,ITE - RQCCUTEN(I,K,J)=(Q2(I,zz)-QC3D(I,K,J))*RDELT - ENDDO - ENDDO - ENDIF - ENDIF - - IF(PRESENT(RQICUTEN))THEN - IF ( F_QI ) THEN - DO K=KTS,KTE + do i=its,ite + rqccuten(i,k,j)=(q2(i,zz)-qc3d(i,k,j))*rdelt + enddo + enddo + endif + endif + + if(present(rqicuten))then + if ( f_qi ) then + do k=kts,kte zz = kte+1-k - DO I=ITS,ITE - RQICUTEN(I,K,J)=(Q3(I,zz)-QI3D(I,K,J))*RDELT - ENDDO - ENDDO - ENDIF - ENDIF + do i=its,ite + rqicuten(i,k,j)=(q3(i,zz)-qi3d(i,k,j))*rdelt + enddo + enddo + endif + endif - ENDDO + enddo - END SUBROUTINE CU_TIEDTKE + end subroutine cu_tiedtke !==================================================================== - SUBROUTINE tiedtkeinit(RTHCUTEN,RQVCUTEN,RQCCUTEN,RQICUTEN, & - RUCUTEN,RVCUTEN, & - RESTART,P_QC,P_QI,P_FIRST_SCALAR, & + subroutine tiedtkeinit(rthcuten,rqvcuten,rqccuten,rqicuten, & + rucuten,rvcuten, & + restart,p_qc,p_qi,p_first_scalar, & allowed_to_read, & ids, ide, jds, jde, kds, kde, & ims, ime, jms, jme, kms, kme, & its, ite, jts, jte, kts, kte) !-------------------------------------------------------------------- - IMPLICIT NONE + implicit none !-------------------------------------------------------------------- - LOGICAL , INTENT(IN) :: allowed_to_read,restart - INTEGER , INTENT(IN) :: ids, ide, jds, jde, kds, kde, & + logical , intent(in) :: allowed_to_read,restart + integer , intent(in) :: ids, ide, jds, jde, kds, kde, & ims, ime, jms, jme, kms, kme, & its, ite, jts, jte, kts, kte - INTEGER , INTENT(IN) :: P_FIRST_SCALAR, P_QI, P_QC + integer , intent(in) :: p_first_scalar, p_qi, p_qc - REAL, DIMENSION( ims:ime , kms:kme , jms:jme ) , INTENT(OUT) :: & - RTHCUTEN, & - RQVCUTEN, & - RQCCUTEN, & - RQICUTEN, & - RUCUTEN,RVCUTEN + real, dimension( ims:ime , kms:kme , jms:jme ) , intent(out) :: & + rthcuten, & + rqvcuten, & + rqccuten, & + rqicuten, & + rucuten,rvcuten - INTEGER :: i, j, k, itf, jtf, ktf + integer :: i, j, k, itf, jtf, ktf jtf=min0(jte,jde-1) ktf=min0(kte,kde-1) itf=min0(ite,ide-1) - IF(.not.restart)THEN - DO j=jts,jtf - DO k=kts,ktf - DO i=its,itf - RTHCUTEN(i,k,j)=0. - RQVCUTEN(i,k,j)=0. - RUCUTEN(i,k,j)=0. - RVCUTEN(i,k,j)=0. - ENDDO - ENDDO - ENDDO - - IF (P_QC .ge. P_FIRST_SCALAR) THEN - DO j=jts,jtf - DO k=kts,ktf - DO i=its,itf - RQCCUTEN(i,k,j)=0. - ENDDO - ENDDO - ENDDO - ENDIF - - IF (P_QI .ge. P_FIRST_SCALAR) THEN - DO j=jts,jtf - DO k=kts,ktf - DO i=its,itf - RQICUTEN(i,k,j)=0. - ENDDO - ENDDO - ENDDO - ENDIF - ENDIF - - END SUBROUTINE tiedtkeinit + if(.not.restart)then + do j=jts,jtf + do k=kts,ktf + do i=its,itf + rthcuten(i,k,j)=0. + rqvcuten(i,k,j)=0. + rucuten(i,k,j)=0. + rvcuten(i,k,j)=0. + enddo + enddo + enddo + + if (p_qc .ge. p_first_scalar) then + do j=jts,jtf + do k=kts,ktf + do i=its,itf + rqccuten(i,k,j)=0. + enddo + enddo + enddo + endif + + if (p_qi .ge. p_first_scalar) then + do j=jts,jtf + do k=kts,ktf + do i=its,itf + rqicuten(i,k,j)=0. + enddo + enddo + enddo + endif + endif + + end subroutine tiedtkeinit ! ------------------------------------------------------------------------ -!------------This is the combined version for tiedtke--------------- +!------------this is the combined version for tiedtke--------------- !---------------------------------------------------------------- -! In this module only the mass flux convection scheme of the ECMWF is included +! in this module only the mass flux convection scheme of the ecmwf is included !+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ !############################################################# ! -! LEVEL 1 SUBROUTINEs +! level 1 subroutines ! !############################################################# !******************************************************** -! subroutine TIECNV +! subroutine tiecnv !******************************************************** - SUBROUTINE TIECNV(pu,pv,pt,pqv,pqc,pqi,pqvf,pqvbl,poz,pomg, & - pap,paph,evap,hfx,rho,zprecc,lndj,KTYPE,lq,km,km1,sig1,dt) + subroutine tiecnv(pu,pv,pt,pqv,pqc,pqi,pqvf,pqvbl,poz,pomg, & + pap,paph,evap,zprecc,lndj,ktype,lq,km,km1,sig1,dt) !----------------------------------------------------------------- -! This is the interface between the meso-scale model and the mass +! this is the interface between the meso-scale model and the mass ! flux convection module !----------------------------------------------------------------- implicit none real pu(lq,km),pv(lq,km),pt(lq,km),pqv(lq,km),pqvf(lq,km) real poz(lq,km),pomg(lq,km),evap(lq),zprecc(lq),pqvbl(lq,km) - real PHHFL(lq),RHO(lq),hfx(lq) - REAL PUM1(lq,km), PVM1(lq,km), & - PTTE(lq,km), PQTE(lq,km), PVOM(lq,km), PVOL(lq,km), & - PVERV(lq,km), PGEO(lq,km), PAP(lq,km), PAPH(lq,km1) - REAL PQHFL(lq), ZQQ(lq,km), PAPRC(lq), PAPRS(lq), & - PRSFC(lq), PSSFC(lq), PAPRSM(lq), PCTE(lq,km) - REAL ZTP1(lq,km), ZQP1(lq,km), ZTU(lq,km), ZQU(lq,km), & - ZLU(lq,km), ZLUDE(lq,km), ZMFU(lq,km), ZMFD(lq,km), & - ZQSAT(lq,km), pqc(lq,km), pqi(lq,km), ZRAIN(lq) + + real pum1(lq,km), pvm1(lq,km), & + ptte(lq,km), pqte(lq,km), pvom(lq,km), pvol(lq,km), & + pverv(lq,km), pgeo(lq,km), pap(lq,km), paph(lq,km1) + real pqhfl(lq), zqq(lq,km), paprc(lq), paprs(lq), & + prsfc(lq), pssfc(lq), paprsm(lq), pcte(lq,km) + real ztp1(lq,km), zqp1(lq,km), ztu(lq,km), zqu(lq,km), & + zlu(lq,km), zlude(lq,km), zmfu(lq,km), zmfd(lq,km), & + zqsat(lq,km), pqc(lq,km), pqi(lq,km), zrain(lq) #if defined(mpas) -!MPAS specific (Laura D. Fowler): - REAL sig(km1) - REAL sig1(lq,km) +!mpas specific (Laura D. Fowler/2016-08-18): + real sig(km1) + real sig1(lq,km) #else - REAL sig(km1),sig1(km) + real sig(km1),sig1(km) #endif - INTEGER ICBOT(lq), ICTOP(lq), KTYPE(lq), lndj(lq) - REAL dt - LOGICAL LOCUM(lq) + integer icbot(lq), ictop(lq), ktype(lq), lndj(lq) + real dt + logical locum(lq) - real PSHEAT,PSRAIN,PSEVAP,PSMELT,PSDISS,TT - real ZTMST,ZTPP1,fliq,fice,ZTC,ZALF + real psheat,psrain,psevap,psmelt,psdiss,tt + real ztmst,ztpp1,fliq,fice,ztc,zalf integer i,j,k,lq,lp,km,km1 -! real TLUCUA -! external TLUCUA - - ZTMST=dt -! Masv flux diagnostics. - - PSHEAT=0.0 - PSRAIN=0.0 - PSEVAP=0.0 - PSMELT=0.0 - PSDISS=0.0 - DO 8 j=1,lq - ZRAIN(j)=0.0 - LOCUM(j)=.FALSE. - PRSFC(j)=0.0 - PSSFC(j)=0.0 - PAPRC(j)=0.0 - PAPRS(j)=0.0 - PAPRSM(j)=0.0 - PQHFL(j)=evap(j) - PHHFL(j)=hfx(j) - 8 CONTINUE - -! CONVERT MODEL VARIABLES FOR MFLUX SCHEME - - DO 10 k=1,km - DO 10 j=1,lq - PTTE(j,k)=0.0 - PCTE(j,k)=0.0 - PVOM(j,k)=0.0 - PVOL(j,k)=0.0 - ZTP1(j,k)=pt(j,k) - ZQP1(j,k)=pqv(j,k)/(1.0+pqv(j,k)) - PUM1(j,k)=pu(j,k) - PVM1(j,k)=pv(j,k) - PVERV(j,k)=pomg(j,k) - PGEO(j,k)=G*poz(j,k) - TT=ZTP1(j,k) - ZQSAT(j,k)=TLUCUA(TT)/PAP(j,k) - ZQSAT(j,k)=MIN(0.5,ZQSAT(j,k)) - ZQSAT(j,k)=ZQSAT(j,k)/(1.-VTMPC1*ZQSAT(j,k)) - PQTE(j,k)=pqvf(j,k)+pqvbl(j,k) - ZQQ(j,k)=PQTE(j,k) - 10 CONTINUE +! real tlucua +! external tlucua + + ztmst=dt +! masv flux diagnostics. + + psheat=0.0 + psrain=0.0 + psevap=0.0 + psmelt=0.0 + psdiss=0.0 + do j=1,lq + zrain(j)=0.0 + locum(j)=.false. + prsfc(j)=0.0 + pssfc(j)=0.0 + paprc(j)=0.0 + paprs(j)=0.0 + paprsm(j)=0.0 + pqhfl(j)=evap(j) + end do + +! convert model variables for mflux scheme + + do k=1,km + do j=1,lq + ptte(j,k)=0.0 + pcte(j,k)=0.0 + pvom(j,k)=0.0 + pvol(j,k)=0.0 + ztp1(j,k)=pt(j,k) + zqp1(j,k)=pqv(j,k)/(1.0+pqv(j,k)) + pum1(j,k)=pu(j,k) + pvm1(j,k)=pv(j,k) + pverv(j,k)=pomg(j,k) + pgeo(j,k)=g*poz(j,k) + tt=ztp1(j,k) + zqsat(j,k)=tlucua(tt)/pap(j,k) + zqsat(j,k)=min(0.5,zqsat(j,k)) + zqsat(j,k)=zqsat(j,k)/(1.-vtmpc1*zqsat(j,k)) + pqte(j,k)=pqvf(j,k)+pqvbl(j,k) + zqq(j,k)=pqte(j,k) + end do + end do ! !----------------------------------------------------------------------- -!* 2. CALL 'CUMASTR'(MASTER-ROUTINE FOR CUMULUS PARAMETERIZATION) -! - CALL CUMASTR_NEW & - (lq, km, km1, km-1, ZTP1, & - ZQP1, PUM1, PVM1, PVERV, ZQSAT, & - PQHFL, ZTMST, PAP, PAPH, PGEO, & - PTTE, PQTE, PVOM, PVOL, PRSFC, & - PSSFC, PAPRC, PAPRSM, PAPRS, LOCUM, & - KTYPE, ICBOT, ICTOP, ZTU, ZQU, & - ZLU, ZLUDE, ZMFU, ZMFD, ZRAIN, & - PSRAIN, PSEVAP, PSHEAT, PSDISS, PSMELT, & - PCTE, PHHFL, RHO, sig1, lndj) -! -! TO INCLUDE THE CLOUD WATER AND CLOUD ICE DETRAINED FROM CONVECTION -! - IF(fdbk.ge.1.0e-9) THEN - DO 20 K=1,km - DO 20 j=1,lq - If(PCTE(j,k).GT.0.0) then - ZTPP1=pt(j,k)+PTTE(j,k)*ZTMST - if(ZTPP1.ge.t000) then +!* 2. call 'cumastr'(master-routine for cumulus parameterization) +! + call cumastr_new & + (lq, km, km1, km-1, ztp1, & + zqp1, pum1, pvm1, pverv, zqsat, & + pqhfl, ztmst, pap, paph, pgeo, & + ptte, pqte, pvom, pvol, prsfc, & + pssfc, paprc, paprsm, paprs, locum, & + ktype, icbot, ictop, ztu, zqu, & + zlu, zlude, zmfu, zmfd, zrain, & + psrain, psevap, psheat, psdiss, psmelt, & + pcte, sig1, lndj) +! +! to include the cloud water and cloud ice detrained from convection +! + if(fdbk.ge.1.0e-9) then + do k=1,km + do j=1,lq + if(pcte(j,k).gt.0.0) then + ztpp1=pt(j,k)+ptte(j,k)*ztmst + if(ztpp1.ge.t000) then fliq=1.0 - ZALF=0.0 - else if(ZTPP1.le.hgfr) then + zalf=0.0 + else if(ztpp1.le.hgfr) then fliq=0.0 - ZALF=ALF + zalf=alf else - ZTC=ZTPP1-t000 - fliq=0.0059+0.9941*exp(-0.003102*ZTC*ZTC) - ZALF=ALF + ztc=ztpp1-t000 + fliq=0.0059+0.9941*exp(-0.003102*ztc*ztc) + zalf=alf endif fice=1.0-fliq - pqc(j,k)=pqc(j,k)+fliq*PCTE(j,k)*ZTMST - pqi(j,k)=pqi(j,k)+fice*PCTE(j,k)*ZTMST - PTTE(j,k)=PTTE(j,k)-ZALF*RCPD*fliq*PCTE(j,k) - Endif - 20 CONTINUE - ENDIF -! - DO 75 k=1,km - DO 75 j=1,lq - pt(j,k)=ZTP1(j,k)+PTTE(j,k)*ZTMST - ZQP1(j,k)=ZQP1(j,k)+(PQTE(j,k)-ZQQ(j,k))*ZTMST - pqv(j,k)=ZQP1(j,k)/(1.0-ZQP1(j,k)) - 75 CONTINUE - DO 85 j=1,lq - zprecc(j)=amax1(0.0,(PRSFC(j)+PSSFC(j))*ZTMST) - 85 CONTINUE - IF (LMFDUDV) THEN - DO 100 k=1,km - DO 100 j=1,lq - pu(j,k)=pu(j,k)+PVOM(j,k)*ZTMST - pv(j,k)=pv(j,k)+PVOL(j,k)*ZTMST - 100 CONTINUE - ENDIF -! - RETURN - END SUBROUTINE TIECNV + pqc(j,k)=pqc(j,k)+fliq*pcte(j,k)*ztmst + pqi(j,k)=pqi(j,k)+fice*pcte(j,k)*ztmst + ptte(j,k)=ptte(j,k)-zalf*rcpd*fliq*pcte(j,k) + endif + end do + end do + endif +! + do k=1,km + do j=1,lq + pt(j,k)=ztp1(j,k)+ptte(j,k)*ztmst + zqp1(j,k)=zqp1(j,k)+(pqte(j,k)-zqq(j,k))*ztmst + pqv(j,k)=zqp1(j,k)/(1.0-zqp1(j,k)) + end do + end do + do j=1,lq + zprecc(j)=amax1(0.0,(prsfc(j)+pssfc(j))*ztmst) + end do + if (lmfdudv) then + do k=1,km + do j=1,lq + pu(j,k)=pu(j,k)+pvom(j,k)*ztmst + pv(j,k)=pv(j,k)+pvol(j,k)*ztmst + end do + end do + endif +! + return + end subroutine tiecnv !############################################################# ! -! LEVEL 2 SUBROUTINEs +! level 2 subroutines ! !############################################################# !*********************************************************** -! SUBROUTINE CUMASTR_NEW +! subroutine cumastr_new !*********************************************************** - SUBROUTINE CUMASTR_NEW & - (KLON, KLEV, KLEVP1, KLEVM1, PTEN, & - PQEN, PUEN, PVEN, PVERV, PQSEN, & - PQHFL, ZTMST, PAP, PAPH, PGEO, & - PTTE, PQTE, PVOM, PVOL, PRSFC, & - PSSFC, PAPRC, PAPRSM, PAPRS, LDCUM, & - KTYPE, KCBOT, KCTOP, PTU, PQU, & - PLU, PLUDE, PMFU, PMFD, PRAIN, & - PSRAIN, PSEVAP, PSHEAT, PSDISS, PSMELT,& - PCTE, PHHFL, RHO, sig1, lndj) -! -!***CUMASTR* MASTER ROUTINE FOR CUMULUS MASSFLUX-SCHEME -! M.TIEDTKE E.C.M.W.F. 1986/1987/1989 -!***PURPOSE + subroutine cumastr_new & + (klon, klev, klevp1, klevm1, pten, & + pqen, puen, pven, pverv, pqsen, & + pqhfl, ztmst, pap, paph, pgeo, & + ptte, pqte, pvom, pvol, prsfc, & + pssfc, paprc, paprsm, paprs, ldcum, & + ktype, kcbot, kctop, ptu, pqu, & + plu, plude, pmfu, pmfd, prain, & + psrain, psevap, psheat, psdiss, psmelt,& + pcte, sig1, lndj) +! +!***cumastr* master routine for cumulus massflux-scheme +! m.tiedtke e.c.m.w.f. 1986/1987/1989 +!***purpose ! ------- -! THIS ROUTINE COMPUTES THE PHYSICAL TENDENCIES OF THE -! PROGNOSTIC VARIABLES T,Q,U AND V DUE TO CONVECTIVE PROCESSES. -! PROCESSES CONSIDERED ARE: CONVECTIVE FLUXES, FORMATION OF -! PRECIPITATION, EVAPORATION OF FALLING RAIN BELOW CLOUD BASE, -! SATURATED CUMULUS DOWNDRAFTS. -!***INTERFACE. +! this routine computes the physical tendencies of the +! prognostic variables t,q,u and v due to convective processes. +! processes considered are: convective fluxes, formation of +! precipitation, evaporation of falling rain below cloud base, +! saturated cumulus downdrafts. +!***interface. ! ---------- -! *CUMASTR* IS CALLED FROM *MSSFLX* -! THE ROUTINE TAKES ITS INPUT FROM THE LONG-TERM STORAGE -! T,Q,U,V,PHI AND P AND MOISTURE TENDENCIES. -! IT RETURNS ITS OUTPUT TO THE SAME SPACE -! 1.MODIFIED TENDENCIES OF MODEL VARIABLES -! 2.RATES OF CONVECTIVE PRECIPITATION -! (USED IN SUBROUTINE SURF) -! 3.CLOUD BASE, CLOUD TOP AND PRECIP FOR RADIATION -! (USED IN SUBROUTINE CLOUD) -!***METHOD +! *cumastr* is called from *mssflx* +! the routine takes its input from the long-term storage +! t,q,u,v,phi and p and moisture tendencies. +! it returns its output to the same space +! 1.modified tendencies of model variables +! 2.rates of convective precipitation +! (used in subroutine surf) +! 3.cloud base, cloud top and precip for radiation +! (used in subroutine cloud) +!***method ! ------ -! PARAMETERIZATION IS DONE USING A MASSFLUX-SCHEME. -! (1) DEFINE CONSTANTS AND PARAMETERS -! (2) SPECIFY VALUES (T,Q,QS...) AT HALF LEVELS AND -! INITIALIZE UPDRAFT- AND DOWNDRAFT-VALUES IN 'CUINI' -! (3) CALCULATE CLOUD BASE IN 'CUBASE' -! AND SPECIFY CLOUD BASE MASSFLUX FROM PBL MOISTURE BUDGET -! (4) DO CLOUD ASCENT IN 'CUASC' IN ABSENCE OF DOWNDRAFTS -! (5) DO DOWNDRAFT CALCULATIONS: -! (A) DETERMINE VALUES AT LFS IN 'CUDLFS' -! (B) DETERMINE MOIST DESCENT IN 'CUDDRAF' -! (C) RECALCULATE CLOUD BASE MASSFLUX CONSIDERING THE -! EFFECT OF CU-DOWNDRAFTS -! (6) DO FINAL CLOUD ASCENT IN 'CUASC' -! (7) DO FINAL ADJUSMENTS TO CONVECTIVE FLUXES IN 'CUFLX', -! DO EVAPORATION IN SUBCLOUD LAYER -! (8) CALCULATE INCREMENTS OF T AND Q IN 'CUDTDQ' -! (9) CALCULATE INCREMENTS OF U AND V IN 'CUDUDV' -!***EXTERNALS. +! parameterization is done using a massflux-scheme. +! (1) define constants and parameters +! (2) specify values (t,q,qs...) at half levels and +! initialize updraft- and downdraft-values in 'cuini' +! (3) calculate cloud base in 'cubase' +! and specify cloud base massflux from pbl moisture budget +! (4) do cloud ascent in 'cuasc' in absence of downdrafts +! (5) do downdraft calculations: +! (a) determine values at lfs in 'cudlfs' +! (b) determine moist descent in 'cuddraf' +! (c) recalculate cloud base massflux considering the +! effect of cu-downdrafts +! (6) do final cloud ascent in 'cuasc' +! (7) do final adjusments to convective fluxes in 'cuflx', +! do evaporation in subcloud layer +! (8) calculate increments of t and q in 'cudtdq' +! (9) calculate increments of u and v in 'cududv' +!***externals. ! ---------- -! CUINI: INITIALIZES VALUES AT VERTICAL GRID USED IN CU-PARAMETR. -! CUBASE: CLOUD BASE CALCULATION FOR PENETR.AND SHALLOW CONVECTION -! CUASC: CLOUD ASCENT FOR ENTRAINING PLUME -! CUDLFS: DETERMINES VALUES AT LFS FOR DOWNDRAFTS -! CUDDRAF:DOES MOIST DESCENT FOR CUMULUS DOWNDRAFTS -! CUFLX: FINAL ADJUSTMENTS TO CONVECTIVE FLUXES (ALSO IN PBL) -! CUDQDT: UPDATES TENDENCIES FOR T AND Q -! CUDUDV: UPDATES TENDENCIES FOR U AND V -!***SWITCHES. +! cuini: initializes values at vertical grid used in cu-parametr. +! cubase: cloud base calculation for penetr.and shallow convection +! cuasc: cloud ascent for entraining plume +! cudlfs: determines values at lfs for downdrafts +! cuddraf:does moist descent for cumulus downdrafts +! cuflx: final adjustments to convective fluxes (also in pbl) +! cudqdt: updates tendencies for t and q +! cududv: updates tendencies for u and v +!***switches. ! -------- -! LMFPEN=.T. PENETRATIVE CONVECTION IS SWITCHED ON -! LMFSCV=.T. SHALLOW CONVECTION IS SWITCHED ON -! LMFMID=.T. MIDLEVEL CONVECTION IS SWITCHED ON -! LMFDD=.T. CUMULUS DOWNDRAFTS SWITCHED ON -! LMFDUDV=.T. CUMULUS FRICTION SWITCHED ON +! lmfpen=.t. penetrative convection is switched on +! lmfscv=.t. shallow convection is switched on +! lmfmid=.t. midlevel convection is switched on +! lmfdd=.t. cumulus downdrafts switched on +! lmfdudv=.t. cumulus friction switched on !*** -! MODEL PARAMETERS (DEFINED IN SUBROUTINE CUPARAM) +! model parameters (defined in subroutine cuparam) ! ------------------------------------------------ -! ENTRPEN ENTRAINMENT RATE FOR PENETRATIVE CONVECTION -! ENTRSCV ENTRAINMENT RATE FOR SHALLOW CONVECTION -! ENTRMID ENTRAINMENT RATE FOR MIDLEVEL CONVECTION -! ENTRDD ENTRAINMENT RATE FOR CUMULUS DOWNDRAFTS -! CMFCTOP RELATIVE CLOUD MASSFLUX AT LEVEL ABOVE NONBUOYANCY -! LEVEL -! CMFCMAX MAXIMUM MASSFLUX VALUE ALLOWED FOR -! CMFCMIN MINIMUM MASSFLUX VALUE (FOR SAFETY) -! CMFDEPS FRACTIONAL MASSFLUX FOR DOWNDRAFTS AT LFS -! CPRCON COEFFICIENT FOR CONVERSION FROM CLOUD WATER TO RAIN -!***REFERENCE. +! entrpen entrainment rate for penetrative convection +! entrscv entrainment rate for shallow convection +! entrmid entrainment rate for midlevel convection +! entrdd entrainment rate for cumulus downdrafts +! cmfctop relative cloud massflux at level above nonbuoyancy +! level +! cmfcmax maximum massflux value allowed for +! cmfcmin minimum massflux value (for safety) +! cmfdeps fractional massflux for downdrafts at lfs +! cprcon coefficient for conversion from cloud water to rain +!***reference. ! ---------- -! PAPER ON MASSFLUX SCHEME (TIEDTKE,1989) +! paper on massflux scheme (tiedtke,1989) !----------------------------------------------------------------- !------------------------------------------------------------------- - IMPLICIT NONE + implicit none !------------------------------------------------------------------- - INTEGER KLON, KLEV, KLEVP1 - INTEGER KLEVM1 - REAL ZTMST - REAL PSRAIN, PSEVAP, PSHEAT, PSDISS, PSMELT, ZCONS2 - INTEGER JK,JL,IKB - REAL ZQUMQE, ZDQMIN, ZMFMAX, ZALVDCP, ZQALV - REAL ZHSAT, ZGAM, ZZZ, ZHHAT, ZBI, ZRO, ZDZ, ZDHDZ, ZDEPTH - REAL ZFAC, ZRH, ZPBMPT, DEPT, ZHT, ZEPS - INTEGER ICUM, ITOPM2 - REAL PTEN(KLON,KLEV), PQEN(KLON,KLEV), & - PUEN(KLON,KLEV), PVEN(KLON,KLEV), & - PTTE(KLON,KLEV), PQTE(KLON,KLEV), & - PVOM(KLON,KLEV), PVOL(KLON,KLEV), & - PQSEN(KLON,KLEV), PGEO(KLON,KLEV), & - PAP(KLON,KLEV), PAPH(KLON,KLEVP1),& - PVERV(KLON,KLEV), PQHFL(KLON), & - PHHFL(KLON), RHO(KLON) - REAL PTU(KLON,KLEV), PQU(KLON,KLEV), & - PLU(KLON,KLEV), PLUDE(KLON,KLEV), & - PMFU(KLON,KLEV), PMFD(KLON,KLEV), & - PAPRC(KLON), PAPRS(KLON), & - PAPRSM(KLON), PRAIN(KLON), & - PRSFC(KLON), PSSFC(KLON) - REAL ZTENH(KLON,KLEV), ZQENH(KLON,KLEV),& - ZGEOH(KLON,KLEV), ZQSENH(KLON,KLEV),& - ZTD(KLON,KLEV), ZQD(KLON,KLEV), & - ZMFUS(KLON,KLEV), ZMFDS(KLON,KLEV), & - ZMFUQ(KLON,KLEV), ZMFDQ(KLON,KLEV), & - ZDMFUP(KLON,KLEV), ZDMFDP(KLON,KLEV),& - ZMFUL(KLON,KLEV), ZRFL(KLON), & - ZUU(KLON,KLEV), ZVU(KLON,KLEV), & - ZUD(KLON,KLEV), ZVD(KLON,KLEV) - REAL ZENTR(KLON), ZHCBASE(KLON), & - ZMFUB(KLON), ZMFUB1(KLON), & - ZDQPBL(KLON), ZDQCV(KLON) - REAL ZSFL(KLON), ZDPMEL(KLON,KLEV), & - PCTE(KLON,KLEV), ZCAPE(KLON), & - ZHEAT(KLON), ZHHATT(KLON,KLEV), & - ZHMIN(KLON), ZRELH(KLON) + integer klon, klev, klevp1 + integer klevm1 + real ztmst + real psrain, psevap, psheat, psdiss, psmelt, zcons2 + integer jk,jl,ikb + real zqumqe, zdqmin, zmfmax, zalvdcp, zqalv + real zhsat, zgam, zzz, zhhat, zbi, zro, zdz, zdhdz, zdepth + real zfac, zrh, zpbmpt, dept, zht, zeps + integer icum, itopm2 + real pten(klon,klev), pqen(klon,klev), & + puen(klon,klev), pven(klon,klev), & + ptte(klon,klev), pqte(klon,klev), & + pvom(klon,klev), pvol(klon,klev), & + pqsen(klon,klev), pgeo(klon,klev), & + pap(klon,klev), paph(klon,klevp1),& + pverv(klon,klev), pqhfl(klon) + real ptu(klon,klev), pqu(klon,klev), & + plu(klon,klev), plude(klon,klev), & + pmfu(klon,klev), pmfd(klon,klev), & + paprc(klon), paprs(klon), & + paprsm(klon), prain(klon), & + prsfc(klon), pssfc(klon) + real ztenh(klon,klev), zqenh(klon,klev),& + zgeoh(klon,klev), zqsenh(klon,klev),& + ztd(klon,klev), zqd(klon,klev), & + zmfus(klon,klev), zmfds(klon,klev), & + zmfuq(klon,klev), zmfdq(klon,klev), & + zdmfup(klon,klev), zdmfdp(klon,klev),& + zmful(klon,klev), zrfl(klon), & + zuu(klon,klev), zvu(klon,klev), & + zud(klon,klev), zvd(klon,klev) + real zentr(klon), zhcbase(klon), & + zmfub(klon), zmfub1(klon), & + zdqpbl(klon), zdqcv(klon) + real zsfl(klon), zdpmel(klon,klev), & + pcte(klon,klev), zcape(klon), & + zheat(klon), zhhatt(klon,klev), & + zhmin(klon), zrelh(klon) #if defined(mpas) -!MPAS specific (Laura D. Fowler): - REAL sig1(KLON,KLEV) +!mpas specific (Laura D. Fowler/2016-08-18): + real sig1(klon,klev) #else - REAL sig1(KLEV) + real sig1(klev) #endif - INTEGER ILAB(KLON,KLEV), IDTOP(KLON), & - ICTOP0(KLON), ILWMIN(KLON) - INTEGER KCBOT(KLON), KCTOP(KLON), & - KTYPE(KLON), IHMIN(KLON), & - KTOP0, lndj(KLON) - LOGICAL LDCUM(KLON) - LOGICAL LODDRAF(KLON), LLO1 - REAL CRIRH1 + integer ilab(klon,klev), idtop(klon), & + ictop0(klon), ilwmin(klon) + integer kcbot(klon), kctop(klon), & + ktype(klon), ihmin(klon), & + ktop0, lndj(klon) + logical ldcum(klon) + logical loddraf(klon), llo1 !------------------------------------------- -! 1. SPECIFY CONSTANTS AND PARAMETERS +! 1. specify constants and parameters !------------------------------------------- - 100 CONTINUE - ZCONS2=1./(G*ZTMST) + zcons2=1./(g*ztmst) !-------------------------------------------------------------- -!* 2. INITIALIZE VALUES AT VERTICAL GRID POINTS IN 'CUINI' +!* 2. initialize values at vertical grid points in 'cuini' !-------------------------------------------------------------- - 200 CONTINUE - CALL CUINI & - (KLON, KLEV, KLEVP1, KLEVM1, PTEN, & - PQEN, PQSEN, PUEN, PVEN, PVERV, & - PGEO, PAPH, ZGEOH, ZTENH, ZQENH, & - ZQSENH, ILWMIN, PTU, PQU, ZTD, & - ZQD, ZUU, ZVU, ZUD, ZVD, & - PMFU, PMFD, ZMFUS, ZMFDS, ZMFUQ, & - ZMFDQ, ZDMFUP, ZDMFDP, ZDPMEL, PLU, & - PLUDE, ILAB) + call cuini & + (klon, klev, klevp1, klevm1, pten, & + pqen, pqsen, puen, pven, pverv, & + pgeo, paph, zgeoh, ztenh, zqenh, & + zqsenh, ilwmin, ptu, pqu, ztd, & + zqd, zuu, zvu, zud, zvd, & + pmfu, pmfd, zmfus, zmfds, zmfuq, & + zmfdq, zdmfup, zdmfdp, zdpmel, plu, & + plude, ilab) !---------------------------------- -!* 3.0 CLOUD BASE CALCULATIONS +!* 3.0 cloud base calculations !---------------------------------- - 300 CONTINUE -!* (A) DETERMINE CLOUD BASE VALUES IN 'CUBASE' +!* (a) determine cloud base values in 'cubase' ! ------------------------------------------- - CALL CUBASE & - (KLON, KLEV, KLEVP1, KLEVM1, ZTENH, & - ZQENH, ZGEOH, PAPH, PTU, PQU, & - PLU, PUEN, PVEN, ZUU, ZVU, & - LDCUM, KCBOT, ILAB) -!* (B) DETERMINE TOTAL MOISTURE CONVERGENCE AND -!* THEN DECIDE ON TYPE OF CUMULUS CONVECTION + call cubase & + (klon, klev, klevp1, klevm1, ztenh, & + zqenh, zgeoh, paph, ptu, pqu, & + plu, puen, pven, zuu, zvu, & + ldcum, kcbot, ilab) +!* (b) determine total moisture convergence and +!* then decide on type of cumulus convection ! ----------------------------------------- - JK=1 - DO 310 JL=1,KLON - ZDQCV(JL) =PQTE(JL,JK)*(PAPH(JL,JK+1)-PAPH(JL,JK)) - ZDQPBL(JL)=0.0 - IDTOP(JL)=0 - 310 CONTINUE - DO 320 JK=2,KLEV - DO 315 JL=1,KLON - ZDQCV(JL)=ZDQCV(JL)+PQTE(JL,JK)*(PAPH(JL,JK+1)-PAPH(JL,JK)) - IF(JK.GE.KCBOT(JL)) ZDQPBL(JL)=ZDQPBL(JL)+PQTE(JL,JK) & - *(PAPH(JL,JK+1)-PAPH(JL,JK)) - 315 CONTINUE - 320 CONTINUE - - if(cutrigger .eq. 1) then - DO JL=1,KLON - KTYPE(JL)=0 - IF(ZDQCV(JL).GT.MAX(0.,1.1*PQHFL(JL)*G)) THEN - KTYPE(JL)=1 - ELSE - KTYPE(JL)=2 - ENDIF - END DO - else if(cutrigger .eq. 2) then - CALL CUTYPE & - ( KLON, KLEV, KLEVP1, KLEVM1, & - ZTENH, ZQENH, ZQSENH, ZGEOH, PAPH, & - RHO, PHHFL, PQHFL, KTYPE, lndj ) - end if -!* (C) DETERMINE MOISTURE SUPPLY FOR BOUNDARY LAYER -!* AND DETERMINE CLOUD BASE MASSFLUX IGNORING -!* THE EFFECTS OF DOWNDRAFTS AT THIS STAGE + jk=1 + do jl=1,klon + zdqcv(jl) =pqte(jl,jk)*(paph(jl,jk+1)-paph(jl,jk)) + zdqpbl(jl)=0.0 + idtop(jl)=0 + end do + + do jk=2,klev + do jl=1,klon + zdqcv(jl)=zdqcv(jl)+pqte(jl,jk)*(paph(jl,jk+1)-paph(jl,jk)) + if(jk.ge.kcbot(jl)) zdqpbl(jl)=zdqpbl(jl)+pqte(jl,jk) & + *(paph(jl,jk+1)-paph(jl,jk)) + end do + end do + + do jl=1,klon + ktype(jl)=0 + if(zdqcv(jl).gt.max(0.,1.1*pqhfl(jl)*g)) then + ktype(jl)=1 + else + ktype(jl)=2 + endif + +!* (c) determine moisture supply for boundary layer +!* and determine cloud base massflux ignoring +!* the effects of downdrafts at this stage ! ------------------------------------------ -! do jl=1,klon -! if(ktype(jl) .ge. 1 ) then -! write(6,*)"ktype=", KTYPE(jl) -! end if -! end do - - DO 340 JL=1,KLON - IKB=KCBOT(JL) - ZQUMQE=PQU(JL,IKB)+PLU(JL,IKB)-ZQENH(JL,IKB) - ZDQMIN=MAX(0.01*ZQENH(JL,IKB),1.E-10) - IF(ZDQPBL(JL).GT.0..AND.ZQUMQE.GT.ZDQMIN.AND.LDCUM(JL)) THEN - ZMFUB(JL)=ZDQPBL(JL)/(G*MAX(ZQUMQE,ZDQMIN)) - ELSE - ZMFUB(JL)=0.01 - LDCUM(JL)=.FALSE. - ENDIF - ZMFMAX=(PAPH(JL,IKB)-PAPH(JL,IKB-1))*ZCONS2 - ZMFUB(JL)=MIN(ZMFUB(JL),ZMFMAX) + ikb=kcbot(jl) + zqumqe=pqu(jl,ikb)+plu(jl,ikb)-zqenh(jl,ikb) + zdqmin=max(0.01*zqenh(jl,ikb),1.e-10) + if(zdqpbl(jl).gt.0..and.zqumqe.gt.zdqmin.and.ldcum(jl)) then + zmfub(jl)=zdqpbl(jl)/(g*max(zqumqe,zdqmin)) + else + zmfub(jl)=0.01 + ldcum(jl)=.false. + endif + zmfmax=(paph(jl,ikb)-paph(jl,ikb-1))*zcons2 + zmfub(jl)=min(zmfub(jl),zmfmax) !------------------------------------------------------ -!* 4.0 DETERMINE CLOUD ASCENT FOR ENTRAINING PLUME +!* 4.0 determine cloud ascent for entraining plume !------------------------------------------------------ - 400 CONTINUE -!* (A) ESTIMATE CLOUD HEIGHT FOR ENTRAINMENT/DETRAINMENT -!* CALCULATIONS IN CUASC (MAX.POSSIBLE CLOUD HEIGHT -!* FOR NON-ENTRAINING PLUME, FOLLOWING A.-S.,1974) +!* (a) estimate cloud height for entrainment/detrainment +!* calculations in cuasc (max.possible cloud height +!* for non-entraining plume, following a.-s.,1974) ! ------------------------------------------------------------- - IKB=KCBOT(JL) - ZHCBASE(JL)=CPD*PTU(JL,IKB)+ZGEOH(JL,IKB)+ALV*PQU(JL,IKB) - ICTOP0(JL)=KCBOT(JL)-1 - 340 CONTINUE - ZALVDCP=ALV/CPD - ZQALV=1./ALV - DO 420 JK=KLEVM1,3,-1 - DO 420 JL=1,KLON - ZHSAT=CPD*ZTENH(JL,JK)+ZGEOH(JL,JK)+ALV*ZQSENH(JL,JK) - ZGAM=C5LES*ZALVDCP*ZQSENH(JL,JK)/ & - ((1.-VTMPC1*ZQSENH(JL,JK))*(ZTENH(JL,JK)-C4LES)**2) - ZZZ=CPD*ZTENH(JL,JK)*0.608 - ZHHAT=ZHSAT-(ZZZ+ZGAM*ZZZ)/(1.+ZGAM*ZZZ*ZQALV)* & - MAX(ZQSENH(JL,JK)-ZQENH(JL,JK),0.) - ZHHATT(JL,JK)=ZHHAT - IF(JK.LT.ICTOP0(JL).AND.ZHCBASE(JL).GT.ZHHAT) ICTOP0(JL)=JK - 420 CONTINUE - DO 430 JL=1,KLON - JK=KCBOT(JL) - ZHSAT=CPD*ZTENH(JL,JK)+ZGEOH(JL,JK)+ALV*ZQSENH(JL,JK) - ZGAM=C5LES*ZALVDCP*ZQSENH(JL,JK)/ & - ((1.-VTMPC1*ZQSENH(JL,JK))*(ZTENH(JL,JK)-C4LES)**2) - ZZZ=CPD*ZTENH(JL,JK)*0.608 - ZHHAT=ZHSAT-(ZZZ+ZGAM*ZZZ)/(1.+ZGAM*ZZZ*ZQALV)* & - MAX(ZQSENH(JL,JK)-ZQENH(JL,JK),0.) - ZHHATT(JL,JK)=ZHHAT - 430 CONTINUE -! -! Find lowest possible org. detrainment level -! - DO 440 JL = 1, KLON - ZHMIN(JL) = 0. - IF( LDCUM(JL).AND.KTYPE(JL).EQ.1 ) THEN - IHMIN(JL) = KCBOT(JL) - ELSE - IHMIN(JL) = -1 - END IF - 440 CONTINUE -! - ZBI = 1./(25.*G) - DO 450 JK = KLEV, 1, -1 - DO 450 JL = 1, KLON - LLO1 = LDCUM(JL).AND.KTYPE(JL).EQ.1.AND.IHMIN(JL).EQ.KCBOT(JL) - IF (LLO1.AND.JK.LT.KCBOT(JL).AND.JK.GE.ICTOP0(JL)) THEN - IKB = KCBOT(JL) - ZRO = RD*ZTENH(JL,JK)/(G*PAPH(JL,JK)) - ZDZ = (PAPH(JL,JK)-PAPH(JL,JK-1))*ZRO - ZDHDZ=(CPD*(PTEN(JL,JK-1)-PTEN(JL,JK))+ALV*(PQEN(JL,JK-1)- & - PQEN(JL,JK))+(PGEO(JL,JK-1)-PGEO(JL,JK)))*G/(PGEO(JL, & - JK-1)-PGEO(JL,JK)) - ZDEPTH = ZGEOH(JL,JK) - ZGEOH(JL,IKB) - ZFAC = SQRT(1.+ZDEPTH*ZBI) - ZHMIN(JL) = ZHMIN(JL) + ZDHDZ*ZFAC*ZDZ - ZRH = -ALV*(ZQSENH(JL,JK)-ZQENH(JL,JK))*ZFAC - IF (ZHMIN(JL).GT.ZRH) IHMIN(JL) = JK - END IF - 450 CONTINUE - DO 460 JL = 1, KLON - IF (LDCUM(JL).AND.KTYPE(JL).EQ.1) THEN - IF (IHMIN(JL).LT.ICTOP0(JL)) IHMIN(JL) = ICTOP0(JL) - END IF - IF(KTYPE(JL).EQ.1) THEN - ZENTR(JL)=ENTRPEN - ELSE - ZENTR(JL)=ENTRSCV - ENDIF - if(lndj(JL).eq.1) ZENTR(JL)=ZENTR(JL)*1.05 - 460 CONTINUE -!* (B) DO ASCENT IN 'CUASC'IN ABSENCE OF DOWNDRAFTS + ikb=kcbot(jl) + zhcbase(jl)=cpd*ptu(jl,ikb)+zgeoh(jl,ikb)+alv*pqu(jl,ikb) + ictop0(jl)=kcbot(jl)-1 + end do + + zalvdcp=alv/cpd + zqalv=1./alv + do jk=klevm1,3,-1 + do jl=1,klon + zhsat=cpd*ztenh(jl,jk)+zgeoh(jl,jk)+alv*zqsenh(jl,jk) + zgam=c5les*zalvdcp*zqsenh(jl,jk)/ & + ((1.-vtmpc1*zqsenh(jl,jk))*(ztenh(jl,jk)-c4les)**2) + zzz=cpd*ztenh(jl,jk)*0.608 + zhhat=zhsat-(zzz+zgam*zzz)/(1.+zgam*zzz*zqalv)* & + max(zqsenh(jl,jk)-zqenh(jl,jk),0.) + zhhatt(jl,jk)=zhhat + if(jk.lt.ictop0(jl).and.zhcbase(jl).gt.zhhat) ictop0(jl)=jk + end do + end do + + do jl=1,klon + jk=kcbot(jl) + zhsat=cpd*ztenh(jl,jk)+zgeoh(jl,jk)+alv*zqsenh(jl,jk) + zgam=c5les*zalvdcp*zqsenh(jl,jk)/ & + ((1.-vtmpc1*zqsenh(jl,jk))*(ztenh(jl,jk)-c4les)**2) + zzz=cpd*ztenh(jl,jk)*0.608 + zhhat=zhsat-(zzz+zgam*zzz)/(1.+zgam*zzz*zqalv)* & + max(zqsenh(jl,jk)-zqenh(jl,jk),0.) + zhhatt(jl,jk)=zhhat + end do +! +! find lowest possible org. detrainment level +! + do jl = 1, klon + zhmin(jl) = 0. + if( ldcum(jl).and.ktype(jl).eq.1 ) then + ihmin(jl) = kcbot(jl) + else + ihmin(jl) = -1 + end if + end do +! + zbi = 1./(25.*g) + do jk = klev, 1, -1 + do jl = 1, klon + llo1 = ldcum(jl).and.ktype(jl).eq.1.and.ihmin(jl).eq.kcbot(jl) + if (llo1.and.jk.lt.kcbot(jl).and.jk.ge.ictop0(jl)) then + ikb = kcbot(jl) + zro = rd*ztenh(jl,jk)/(g*paph(jl,jk)) + zdz = (paph(jl,jk)-paph(jl,jk-1))*zro + zdhdz=(cpd*(pten(jl,jk-1)-pten(jl,jk))+alv*(pqen(jl,jk-1)- & + pqen(jl,jk))+(pgeo(jl,jk-1)-pgeo(jl,jk)))*g/(pgeo(jl, & + jk-1)-pgeo(jl,jk)) + zdepth = zgeoh(jl,jk) - zgeoh(jl,ikb) + zfac = sqrt(1.+zdepth*zbi) + zhmin(jl) = zhmin(jl) + zdhdz*zfac*zdz + zrh = -alv*(zqsenh(jl,jk)-zqenh(jl,jk))*zfac + if (zhmin(jl).gt.zrh) ihmin(jl) = jk + end if + end do + end do + + do jl = 1, klon + if (ldcum(jl).and.ktype(jl).eq.1) then + if (ihmin(jl).lt.ictop0(jl)) ihmin(jl) = ictop0(jl) + end if + if(ktype(jl).eq.1) then + zentr(jl)=entrpen + else + zentr(jl)=entrscv + endif + if(lndj(jl).eq.1) zentr(jl)=zentr(jl)*1.1 + end do +!* (b) do ascent in 'cuasc'in absence of downdrafts !---------------------------------------------------------- - CALL CUASC_NEW & - (KLON, KLEV, KLEVP1, KLEVM1, ZTENH, & - ZQENH, PUEN, PVEN, PTEN, PQEN, & - PQSEN, PGEO, ZGEOH, PAP, PAPH, & - PQTE, PVERV, ILWMIN, LDCUM, ZHCBASE, & - KTYPE, ILAB, PTU, PQU, PLU, & - ZUU, ZVU, PMFU, ZMFUB, ZENTR, & - ZMFUS, ZMFUQ, ZMFUL, PLUDE, ZDMFUP, & - KCBOT, KCTOP, ICTOP0, ICUM, ZTMST, & - IHMIN, ZHHATT, ZQSENH) - IF(ICUM.EQ.0) GO TO 1000 -!* (C) CHECK CLOUD DEPTH AND CHANGE ENTRAINMENT RATE ACCORDINGLY -! CALCULATE PRECIPITATION RATE (FOR DOWNDRAFT CALCULATION) + call cuasc_new & + (klon, klev, klevp1, klevm1, ztenh, & + zqenh, puen, pven, pten, pqen, & + pqsen, pgeo, zgeoh, pap, paph, & + pqte, pverv, ilwmin, ldcum, zhcbase, & + ktype, ilab, ptu, pqu, plu, & + zuu, zvu, pmfu, zmfub, zentr, & + zmfus, zmfuq, zmful, plude, zdmfup, & + kcbot, kctop, ictop0, icum, ztmst, & + ihmin, zhhatt, zqsenh) + if(icum.eq.0) return +!* (c) check cloud depth and change entrainment rate accordingly +! calculate precipitation rate (for downdraft calculation) !------------------------------------------------------------------ - DO 480 JL=1,KLON - ZPBMPT=PAPH(JL,KCBOT(JL))-PAPH(JL,KCTOP(JL)) - IF(LDCUM(JL)) ICTOP0(JL)=KCTOP(JL) - IF(LDCUM(JL).AND.KTYPE(JL).EQ.1.AND.ZPBMPT.LT.ZDNOPRC) KTYPE(JL)=2 - IF(KTYPE(JL).EQ.2) then - ZENTR(JL)=ENTRSCV - if(lndj(JL).eq.1) ZENTR(JL)=ZENTR(JL)*1.05 + do jl=1,klon + zpbmpt=paph(jl,kcbot(jl))-paph(jl,kctop(jl)) + if(ldcum(jl)) ictop0(jl)=kctop(jl) + if(ldcum(jl).and.ktype(jl).eq.1.and.zpbmpt.lt.zdnoprc) ktype(jl)=2 + if(ktype(jl).eq.2) then + zentr(jl)=entrscv + if(lndj(jl).eq.1) zentr(jl)=zentr(jl)*1.1 endif - ZRFL(JL)=ZDMFUP(JL,1) - 480 CONTINUE - DO 490 JK=2,KLEV - DO 490 JL=1,KLON - ZRFL(JL)=ZRFL(JL)+ZDMFUP(JL,JK) - 490 CONTINUE + zrfl(jl)=zdmfup(jl,1) + end do + + do jk=2,klev + do jl=1,klon + zrfl(jl)=zrfl(jl)+zdmfup(jl,jk) + end do + end do !----------------------------------------- -!* 5.0 CUMULUS DOWNDRAFT CALCULATIONS +!* 5.0 cumulus downdraft calculations !----------------------------------------- - 500 CONTINUE - IF(LMFDD) THEN -!* (A) DETERMINE LFS IN 'CUDLFS' + if(lmfdd) then +!* (a) determine lfs in 'cudlfs' !-------------------------------------- - CALL CUDLFS & - (KLON, KLEV, KLEVP1, ZTENH, ZQENH, & - PUEN, PVEN, ZGEOH, PAPH, PTU, & - PQU, ZUU, ZVU, LDCUM, KCBOT, & - KCTOP, ZMFUB, ZRFL, ZTD, ZQD, & - ZUD, ZVD, PMFD, ZMFDS, ZMFDQ, & - ZDMFDP, IDTOP, LODDRAF) -!* (B) DETERMINE DOWNDRAFT T,Q AND FLUXES IN 'CUDDRAF' + call cudlfs & + (klon, klev, klevp1, ztenh, zqenh, & + puen, pven, zgeoh, paph, ptu, & + pqu, zuu, zvu, ldcum, kcbot, & + kctop, zmfub, zrfl, ztd, zqd, & + zud, zvd, pmfd, zmfds, zmfdq, & + zdmfdp, idtop, loddraf) +!* (b) determine downdraft t,q and fluxes in 'cuddraf' !------------------------------------------------------------ - CALL CUDDRAF & - (KLON, KLEV, KLEVP1, ZTENH, ZQENH, & - PUEN, PVEN, ZGEOH, PAPH, ZRFL, & - LODDRAF, ZTD, ZQD, ZUD, ZVD, & - PMFD, ZMFDS, ZMFDQ, ZDMFDP) -!* (C) RECALCULATE CONVECTIVE FLUXES DUE TO EFFECT OF -! DOWNDRAFTS ON BOUNDARY LAYER MOISTURE BUDGET + call cuddraf & + (klon, klev, klevp1, ztenh, zqenh, & + puen, pven, zgeoh, paph, zrfl, & + loddraf, ztd, zqd, zud, zvd, & + pmfd, zmfds, zmfdq, zdmfdp) +!* (c) recalculate convective fluxes due to effect of +! downdrafts on boundary layer moisture budget !----------------------------------------------------------- - END IF + end if ! -!-- 5.1 Recalculate cloud base massflux from a cape closure -! for deep convection (ktype=1) and by PBL equilibrium +!-- 5.1 recalculate cloud base massflux from a cape closure +! for deep convection (ktype=1) and by pbl equilibrium ! taking downdrafts into account for shallow convection ! (ktype=2) -! implemented by Y. WANG based on ECHAM4 in Nov. 2001. -! - DO 510 JL=1,KLON - ZHEAT(JL)=0.0 - ZCAPE(JL)=0.0 - ZRELH(JL)=0.0 - ZMFUB1(JL)=ZMFUB(JL) - 510 CONTINUE -! - DO 511 JL=1,KLON - IF(LDCUM(JL).AND.KTYPE(JL).EQ.1) THEN - do jk=KLEVM1,2,-1 - if(abs(paph(jl,jk)*0.01 - 300) .lt. 50.) then - KTOP0=MAX(jk,KCTOP(JL)) - exit - end if +! implemented by y. wang based on echam4 in nov. 2001. +! + do jl=1,klon + zheat(jl)=0.0 + zcape(jl)=0.0 + zrelh(jl)=0.0 + zmfub1(jl)=zmfub(jl) end do -! KTOP0=MAX(12,KCTOP(JL)) - DO JK=2,KLEV - IF(JK.LE.KCBOT(JL).AND.JK.GT.KCTOP(JL)) THEN - ZRO=PAPH(JL,JK)/(RD*ZTENH(JL,JK)) - ZDZ=(PAPH(JL,JK)-PAPH(JL,JK-1))/(G*ZRO) - ZHEAT(JL)=ZHEAT(JL)+((PTEN(JL,JK-1)-PTEN(JL,JK) & - +G*ZDZ/CPD)/ZTENH(JL,JK)+0.608*(PQEN(JL,JK-1)- & - PQEN(JL,JK)))*(PMFU(JL,JK)+PMFD(JL,JK))*G/ZRO - ZCAPE(JL)=ZCAPE(JL)+G*((PTU(JL,JK)*(1.+.608*PQU(JL,JK) & - -PLU(JL,JK)))/(ZTENH(JL,JK)*(1.+.608*ZQENH(JL,JK))) & - -1.0)*ZDZ - ENDIF - IF(JK.LE.KCBOT(JL).AND.JK.GT.KTOP0) THEN - dept=(PAPH(JL,JK)-PAPH(JL,JK-1))/(PAPH(JL,KCBOT(JL))- & - PAPH(JL,KTOP0)) - ZRELH(JL)=ZRELH(JL)+dept*PQEN(JL,JK)/PQSEN(JL,JK) - ENDIF - ENDDO -! - - if(cutrigger .eq. 1 ) then - IF(lndj(JL).EQ.1) then - CRIRH1=CRIRH*0.8 - ELSE - CRIRH1=CRIRH - ENDIF +! + do jl=1,klon + if(ldcum(jl).and.ktype(jl).eq.1) then + ktop0=max(12,kctop(jl)) + ikb = kcbot(jl) + do jk=2,klev + if(jk.le.kcbot(jl).and.jk.gt.kctop(jl)) then + zro=paph(jl,jk)/(rd*ztenh(jl,jk)) + zdz=(paph(jl,jk)-paph(jl,jk-1))/(g*zro) + zheat(jl)=zheat(jl)+((pten(jl,jk-1)-pten(jl,jk) & + +g*zdz/cpd)/ztenh(jl,jk)+0.608*(pqen(jl,jk-1)- & + pqen(jl,jk)))*(pmfu(jl,jk)+pmfd(jl,jk))*g/zro + zcape(jl)=zcape(jl)+g*((ptu(jl,jk)*(1.+.608*pqu(jl,jk) & + -plu(jl,jk)))/(ztenh(jl,jk)*(1.+.608*zqenh(jl,jk))) & + -1.0)*zdz + endif + if(jk.le.kcbot(jl).and.jk.gt.ktop0) then + dept=(paph(jl,jk+1)-paph(jl,jk))/(paph(jl,ikb+1)- & + paph(jl,ktop0+1)) + zrelh(jl)=zrelh(jl)+dept*pqen(jl,jk)/pqsen(jl,jk) + endif + enddo +! + if(zrelh(jl).ge.crirh) then + zht=max(0.0,(zcape(jl)-0.0))/(ztau*zheat(jl)) + zmfub1(jl)=max(zmfub(jl)*zht,0.01) + zmfmax=(paph(jl,ikb)-paph(jl,ikb-1))*zcons2 + zmfub1(jl)=min(zmfub1(jl),zmfmax) else - CRIRH1=0. - end if - - IF(ZRELH(JL).GE.CRIRH1 .AND. ZCAPE(JL) .GT. 100.) THEN - IKB=KCBOT(JL) - ZHT=ZCAPE(JL)/(ZTAU*ZHEAT(JL)) - ZMFUB1(JL)=MAX(ZMFUB(JL)*ZHT,0.01) - ZMFMAX=(PAPH(JL,IKB)-PAPH(JL,IKB-1))*ZCONS2 - ZMFUB1(JL)=MIN(ZMFUB1(JL),ZMFMAX) - ELSE - ZMFUB1(JL)=0.01 - ZMFUB(JL)=0.01 - LDCUM(JL)=.FALSE. - ENDIF - ENDIF - 511 CONTINUE -! -!* 5.2 RECALCULATE CONVECTIVE FLUXES DUE TO EFFECT OF -! DOWNDRAFTS ON BOUNDARY LAYER MOISTURE BUDGET + zmfub1(jl)=0.01 + zmfub(jl)=0.01 + ldcum(jl)=.false. + endif + endif + end do +! +!* 5.2 recalculate convective fluxes due to effect of +! downdrafts on boundary layer moisture budget !-------------------------------------------------------- - DO 512 JL=1,KLON - IF(KTYPE(JL).NE.1) THEN - IKB=KCBOT(JL) - IF(PMFD(JL,IKB).LT.0.0.AND.LODDRAF(JL)) THEN - ZEPS=CMFDEPS - ELSE - ZEPS=0. - ENDIF - ZQUMQE=PQU(JL,IKB)+PLU(JL,IKB)- & - ZEPS*ZQD(JL,IKB)-(1.-ZEPS)*ZQENH(JL,IKB) - ZDQMIN=MAX(0.01*ZQENH(JL,IKB),1.E-10) - ZMFMAX=(PAPH(JL,IKB)-PAPH(JL,IKB-1))*ZCONS2 - IF(ZDQPBL(JL).GT.0..AND.ZQUMQE.GT.ZDQMIN.AND.LDCUM(JL) & - .AND.ZMFUB(JL).LT.ZMFMAX) THEN - ZMFUB1(JL)=ZDQPBL(JL)/(G*MAX(ZQUMQE,ZDQMIN)) - ELSE - ZMFUB1(JL)=ZMFUB(JL) - ENDIF - LLO1=(KTYPE(JL).EQ.2).AND.ABS(ZMFUB1(JL) & - -ZMFUB(JL)).LT.0.2*ZMFUB(JL) - IF(.NOT.LLO1) ZMFUB1(JL)=ZMFUB(JL) - ZMFUB1(JL)=MIN(ZMFUB1(JL),ZMFMAX) - END IF - 512 CONTINUE - DO 530 JK=1,KLEV - DO 530 JL=1,KLON - IF(LDCUM(JL)) THEN - ZFAC=ZMFUB1(JL)/MAX(ZMFUB(JL),1.E-10) - PMFD(JL,JK)=PMFD(JL,JK)*ZFAC - ZMFDS(JL,JK)=ZMFDS(JL,JK)*ZFAC - ZMFDQ(JL,JK)=ZMFDQ(JL,JK)*ZFAC - ZDMFDP(JL,JK)=ZDMFDP(JL,JK)*ZFAC - ELSE - PMFD(JL,JK)=0.0 - ZMFDS(JL,JK)=0.0 - ZMFDQ(JL,JK)=0.0 - ZDMFDP(JL,JK)=0.0 - ENDIF - 530 CONTINUE - DO 538 JL=1,KLON - IF(LDCUM(JL)) THEN - ZMFUB(JL)=ZMFUB1(JL) - ELSE - ZMFUB(JL)=0.0 - ENDIF - 538 CONTINUE + do jl=1,klon + if(ktype(jl).ne.1) then + ikb=kcbot(jl) + if(pmfd(jl,ikb).lt.0.0.and.loddraf(jl)) then + zeps=cmfdeps + else + zeps=0. + endif + zqumqe=pqu(jl,ikb)+plu(jl,ikb)- & + zeps*zqd(jl,ikb)-(1.-zeps)*zqenh(jl,ikb) + zdqmin=max(0.01*zqenh(jl,ikb),1.e-10) + zmfmax=(paph(jl,ikb)-paph(jl,ikb-1))*zcons2 + if(zdqpbl(jl).gt.0..and.zqumqe.gt.zdqmin.and.ldcum(jl) & + .and.zmfub(jl).lt.zmfmax) then + zmfub1(jl)=zdqpbl(jl)/(g*max(zqumqe,zdqmin)) + else + zmfub1(jl)=zmfub(jl) + endif + llo1=(ktype(jl).eq.2).and.abs(zmfub1(jl) & + -zmfub(jl)).lt.0.2*zmfub(jl) + if(.not.llo1) zmfub1(jl)=zmfub(jl) + zmfub1(jl)=min(zmfub1(jl),zmfmax) + end if + end do + + do jk=1,klev + do jl=1,klon + if(ldcum(jl)) then + zfac=zmfub1(jl)/max(zmfub(jl),1.e-10) + pmfd(jl,jk)=pmfd(jl,jk)*zfac + zmfds(jl,jk)=zmfds(jl,jk)*zfac + zmfdq(jl,jk)=zmfdq(jl,jk)*zfac + zdmfdp(jl,jk)=zdmfdp(jl,jk)*zfac + else + pmfd(jl,jk)=0.0 + zmfds(jl,jk)=0.0 + zmfdq(jl,jk)=0.0 + zdmfdp(jl,jk)=0.0 + endif + end do + end do + + do jl=1,klon + if(ldcum(jl)) then + zmfub(jl)=zmfub1(jl) + else + zmfub(jl)=0.0 + endif + end do ! !--------------------------------------------------------------- -!* 6.0 DETERMINE FINAL CLOUD ASCENT FOR ENTRAINING PLUME -!* FOR PENETRATIVE CONVECTION (TYPE=1), -!* FOR SHALLOW TO MEDIUM CONVECTION (TYPE=2) -!* AND FOR MID-LEVEL CONVECTION (TYPE=3). +!* 6.0 determine final cloud ascent for entraining plume +!* for penetrative convection (type=1), +!* for shallow to medium convection (type=2) +!* and for mid-level convection (type=3). !--------------------------------------------------------------- - 600 CONTINUE - CALL CUASC_NEW & - (KLON, KLEV, KLEVP1, KLEVM1, ZTENH, & - ZQENH, PUEN, PVEN, PTEN, PQEN, & - PQSEN, PGEO, ZGEOH, PAP, PAPH, & - PQTE, PVERV, ILWMIN, LDCUM, ZHCBASE,& - KTYPE, ILAB, PTU, PQU, PLU, & - ZUU, ZVU, PMFU, ZMFUB, ZENTR, & - ZMFUS, ZMFUQ, ZMFUL, PLUDE, ZDMFUP, & - KCBOT, KCTOP, ICTOP0, ICUM, ZTMST, & - IHMIN, ZHHATT, ZQSENH) + call cuasc_new & + (klon, klev, klevp1, klevm1, ztenh, & + zqenh, puen, pven, pten, pqen, & + pqsen, pgeo, zgeoh, pap, paph, & + pqte, pverv, ilwmin, ldcum, zhcbase,& + ktype, ilab, ptu, pqu, plu, & + zuu, zvu, pmfu, zmfub, zentr, & + zmfus, zmfuq, zmful, plude, zdmfup, & + kcbot, kctop, ictop0, icum, ztmst, & + ihmin, zhhatt, zqsenh) !---------------------------------------------------------- -!* 7.0 DETERMINE FINAL CONVECTIVE FLUXES IN 'CUFLX' +!* 7.0 determine final convective fluxes in 'cuflx' !---------------------------------------------------------- - 700 CONTINUE - CALL CUFLX & - (KLON, KLEV, KLEVP1, PQEN, PQSEN, & - ZTENH, ZQENH, PAPH, ZGEOH, KCBOT, & - KCTOP, IDTOP, KTYPE, LODDRAF, LDCUM, & - PMFU, PMFD, ZMFUS, ZMFDS, ZMFUQ, & - ZMFDQ, ZMFUL, PLUDE, ZDMFUP, ZDMFDP, & - ZRFL, PRAIN, PTEN, ZSFL, ZDPMEL, & - ITOPM2, ZTMST, sig1) + call cuflx & + (klon, klev, klevp1, pqen, pqsen, & + ztenh, zqenh, paph, zgeoh, kcbot, & + kctop, idtop, ktype, loddraf, ldcum, & + pmfu, pmfd, zmfus, zmfds, zmfuq, & + zmfdq, zmful, plude, zdmfup, zdmfdp, & + zrfl, prain, pten, zsfl, zdpmel, & + itopm2, ztmst, sig1) !---------------------------------------------------------------- -!* 8.0 UPDATE TENDENCIES FOR T AND Q IN SUBROUTINE CUDTDQ +!* 8.0 update tendencies for t and q in subroutine cudtdq !---------------------------------------------------------------- - 800 CONTINUE - CALL CUDTDQ & - (KLON, KLEV, KLEVP1, ITOPM2, PAPH, & - LDCUM, PTEN, PTTE, PQTE, ZMFUS, & - ZMFDS, ZMFUQ, ZMFDQ, ZMFUL, ZDMFUP, & - ZDMFDP, ZTMST, ZDPMEL, PRAIN, ZRFL, & - ZSFL, PSRAIN, PSEVAP, PSHEAT, PSMELT, & - PRSFC, PSSFC, PAPRC, PAPRSM, PAPRS, & - PQEN, PQSEN, PLUDE, PCTE) + call cudtdq & + (klon, klev, klevp1, itopm2, paph, & + ldcum, pten, ptte, pqte, zmfus, & + zmfds, zmfuq, zmfdq, zmful, zdmfup, & + zdmfdp, ztmst, zdpmel, prain, zrfl, & + zsfl, psrain, psevap, psheat, psmelt, & + prsfc, pssfc, paprc, paprsm, paprs, & + pqen, pqsen, plude, pcte) !---------------------------------------------------------------- -!* 9.0 UPDATE TENDENCIES FOR U AND U IN SUBROUTINE CUDUDV +!* 9.0 update tendencies for u and u in subroutine cududv !---------------------------------------------------------------- - 900 CONTINUE - IF(LMFDUDV) THEN - CALL CUDUDV & - (KLON, KLEV, KLEVP1, ITOPM2, KTYPE, & - KCBOT, PAPH, LDCUM, PUEN, PVEN, & - PVOM, PVOL, ZUU, ZUD, ZVU, & - ZVD, PMFU, PMFD, PSDISS) - END IF - 1000 CONTINUE - RETURN - END SUBROUTINE CUMASTR_NEW + if(lmfdudv) then + call cududv & + (klon, klev, klevp1, itopm2, ktype, & + kcbot, paph, ldcum, puen, pven, & + pvom, pvol, zuu, zud, zvu, & + zvd, pmfu, pmfd, psdiss) + end if + return + end subroutine cumastr_new ! !############################################################# ! -! LEVEL 3 SUBROUTINEs +! level 3 subroutines ! !############################################################# !********************************************** -! SUBROUTINE CUINI +! subroutine cuini !********************************************** ! - SUBROUTINE CUINI & - (KLON, KLEV, KLEVP1, KLEVM1, PTEN, & - PQEN, PQSEN, PUEN, PVEN, PVERV, & - PGEO, PAPH, PGEOH, PTENH, PQENH, & - PQSENH, KLWMIN, PTU, PQU, PTD, & - PQD, PUU, PVU, PUD, PVD, & - PMFU, PMFD, PMFUS, PMFDS, PMFUQ, & - PMFDQ, PDMFUP, PDMFDP, PDPMEL, PLU, & - PLUDE, KLAB) -! M.TIEDTKE E.C.M.W.F. 12/89 -!***PURPOSE + subroutine cuini & + (klon, klev, klevp1, klevm1, pten, & + pqen, pqsen, puen, pven, pverv, & + pgeo, paph, pgeoh, ptenh, pqenh, & + pqsenh, klwmin, ptu, pqu, ptd, & + pqd, puu, pvu, pud, pvd, & + pmfu, pmfd, pmfus, pmfds, pmfuq, & + pmfdq, pdmfup, pdmfdp, pdpmel, plu, & + plude, klab) +! m.tiedtke e.c.m.w.f. 12/89 +!***purpose ! ------- -! THIS ROUTINE INTERPOLATES LARGE-SCALE FIELDS OF T,Q ETC. -! TO HALF LEVELS (I.E. GRID FOR MASSFLUX SCHEME), -! AND INITIALIZES VALUES FOR UPDRAFTS AND DOWNDRAFTS -!***INTERFACE +! this routine interpolates large-scale fields of t,q etc. +! to half levels (i.e. grid for massflux scheme), +! and initializes values for updrafts and downdrafts +!***interface ! --------- -! THIS ROUTINE IS CALLED FROM *CUMASTR*. -!***METHOD. +! this routine is called from *cumastr*. +!***method. ! -------- -! FOR EXTRAPOLATION TO HALF LEVELS SEE TIEDTKE(1989) -!***EXTERNALS +! for extrapolation to half levels see tiedtke(1989) +!***externals ! --------- -! *CUADJTQ* TO SPECIFY QS AT HALF LEVELS +! *cuadjtq* to specify qs at half levels ! ---------------------------------------------------------------- !------------------------------------------------------------------- - IMPLICIT NONE + implicit none !------------------------------------------------------------------- - INTEGER KLON, KLEV, KLEVP1 - INTEGER klevm1 - INTEGER JK,JL,IK, ICALL - REAL ZDP, ZZS - REAL PTEN(KLON,KLEV), PQEN(KLON,KLEV), & - PUEN(KLON,KLEV), PVEN(KLON,KLEV), & - PQSEN(KLON,KLEV), PVERV(KLON,KLEV), & - PGEO(KLON,KLEV), PGEOH(KLON,KLEV), & - PAPH(KLON,KLEVP1), PTENH(KLON,KLEV), & - PQENH(KLON,KLEV), PQSENH(KLON,KLEV) - REAL PTU(KLON,KLEV), PQU(KLON,KLEV), & - PTD(KLON,KLEV), PQD(KLON,KLEV), & - PUU(KLON,KLEV), PUD(KLON,KLEV), & - PVU(KLON,KLEV), PVD(KLON,KLEV), & - PMFU(KLON,KLEV), PMFD(KLON,KLEV), & - PMFUS(KLON,KLEV), PMFDS(KLON,KLEV), & - PMFUQ(KLON,KLEV), PMFDQ(KLON,KLEV), & - PDMFUP(KLON,KLEV), PDMFDP(KLON,KLEV), & - PLU(KLON,KLEV), PLUDE(KLON,KLEV) - REAL ZWMAX(KLON), ZPH(KLON), & - PDPMEL(KLON,KLEV) - INTEGER KLAB(KLON,KLEV), KLWMIN(KLON) - LOGICAL LOFLAG(KLON) + integer klon, klev, klevp1 + integer klevm1 + integer jk,jl,ik, icall + real zdp, zzs + real pten(klon,klev), pqen(klon,klev), & + puen(klon,klev), pven(klon,klev), & + pqsen(klon,klev), pverv(klon,klev), & + pgeo(klon,klev), pgeoh(klon,klev), & + paph(klon,klevp1), ptenh(klon,klev), & + pqenh(klon,klev), pqsenh(klon,klev) + real ptu(klon,klev), pqu(klon,klev), & + ptd(klon,klev), pqd(klon,klev), & + puu(klon,klev), pud(klon,klev), & + pvu(klon,klev), pvd(klon,klev), & + pmfu(klon,klev), pmfd(klon,klev), & + pmfus(klon,klev), pmfds(klon,klev), & + pmfuq(klon,klev), pmfdq(klon,klev), & + pdmfup(klon,klev), pdmfdp(klon,klev), & + plu(klon,klev), plude(klon,klev) + real zwmax(klon), zph(klon), & + pdpmel(klon,klev) + integer klab(klon,klev), klwmin(klon) + logical loflag(klon) !------------------------------------------------------------ -!* 1. SPECIFY LARGE SCALE PARAMETERS AT HALF LEVELS -!* ADJUST TEMPERATURE FIELDS IF STATICLY UNSTABLE -!* FIND LEVEL OF MAXIMUM VERTICAL VELOCITY +!* 1. specify large scale parameters at half levels +!* adjust temperature fields if staticly unstable +!* find level of maximum vertical velocity ! ----------------------------------------------------------- - 100 CONTINUE - ZDP=0.5 - DO 130 JK=2,KLEV - DO 110 JL=1,KLON - PGEOH(JL,JK)=PGEO(JL,JK)+(PGEO(JL,JK-1)-PGEO(JL,JK))*ZDP - PTENH(JL,JK)=(MAX(CPD*PTEN(JL,JK-1)+PGEO(JL,JK-1), & - CPD*PTEN(JL,JK)+PGEO(JL,JK))-PGEOH(JL,JK))*RCPD - PQSENH(JL,JK)=PQSEN(JL,JK-1) - ZPH(JL)=PAPH(JL,JK) - LOFLAG(JL)=.TRUE. - 110 CONTINUE - IK=JK - ICALL=0 - CALL CUADJTQ(KLON,KLEV,IK,ZPH,PTENH,PQSENH,LOFLAG,ICALL) - DO 120 JL=1,KLON - PQENH(JL,JK)=MIN(PQEN(JL,JK-1),PQSEN(JL,JK-1)) & - +(PQSENH(JL,JK)-PQSEN(JL,JK-1)) - PQENH(JL,JK)=MAX(PQENH(JL,JK),0.) - 120 CONTINUE - 130 CONTINUE - DO 140 JL=1,KLON - PTENH(JL,KLEV)=(CPD*PTEN(JL,KLEV)+PGEO(JL,KLEV)- & - PGEOH(JL,KLEV))*RCPD - PQENH(JL,KLEV)=PQEN(JL,KLEV) - PTENH(JL,1)=PTEN(JL,1) - PQENH(JL,1)=PQEN(JL,1) - PGEOH(JL,1)=PGEO(JL,1) - KLWMIN(JL)=KLEV - ZWMAX(JL)=0. - 140 CONTINUE - DO 160 JK=KLEVM1,2,-1 - DO 150 JL=1,KLON - ZZS=MAX(CPD*PTENH(JL,JK)+PGEOH(JL,JK), & - CPD*PTENH(JL,JK+1)+PGEOH(JL,JK+1)) - PTENH(JL,JK)=(ZZS-PGEOH(JL,JK))*RCPD - 150 CONTINUE - 160 CONTINUE - DO 190 JK=KLEV,3,-1 - DO 180 JL=1,KLON - IF(PVERV(JL,JK).LT.ZWMAX(JL)) THEN - ZWMAX(JL)=PVERV(JL,JK) - KLWMIN(JL)=JK - END IF - 180 CONTINUE - 190 CONTINUE -!----------------------------------------------------------- -!* 2.0 INITIALIZE VALUES FOR UPDRAFTS AND DOWNDRAFTS -!----------------------------------------------------------- - 200 CONTINUE - DO 230 JK=1,KLEV - IK=JK-1 - IF(JK.EQ.1) IK=1 - DO 220 JL=1,KLON - PTU(JL,JK)=PTENH(JL,JK) - PTD(JL,JK)=PTENH(JL,JK) - PQU(JL,JK)=PQENH(JL,JK) - PQD(JL,JK)=PQENH(JL,JK) - PLU(JL,JK)=0. - PUU(JL,JK)=PUEN(JL,IK) - PUD(JL,JK)=PUEN(JL,IK) - PVU(JL,JK)=PVEN(JL,IK) - PVD(JL,JK)=PVEN(JL,IK) - PMFU(JL,JK)=0. - PMFD(JL,JK)=0. - PMFUS(JL,JK)=0. - PMFDS(JL,JK)=0. - PMFUQ(JL,JK)=0. - PMFDQ(JL,JK)=0. - PDMFUP(JL,JK)=0. - PDMFDP(JL,JK)=0. - PDPMEL(JL,JK)=0. - PLUDE(JL,JK)=0. - KLAB(JL,JK)=0 - 220 CONTINUE - 230 CONTINUE - RETURN - END SUBROUTINE CUINI - -!********************************************** -! SUBROUTINE CUBASE -!********************************************** - SUBROUTINE CUBASE & - (KLON, KLEV, KLEVP1, KLEVM1, PTENH, & - PQENH, PGEOH, PAPH, PTU, PQU, & - PLU, PUEN, PVEN, PUU, PVU, & - LDCUM, KCBOT, KLAB) -! THIS ROUTINE CALCULATES CLOUD BASE VALUES (T AND Q) -! FOR CUMULUS PARAMETERIZATION -! M.TIEDTKE E.C.M.W.F. 7/86 MODIF. 12/89 -!***PURPOSE. -! -------- -! TO PRODUCE CLOUD BASE VALUES FOR CU-PARAMETRIZATION -!***INTERFACE -! --------- -! THIS ROUTINE IS CALLED FROM *CUMASTR*. -! INPUT ARE ENVIRONM. VALUES OF T,Q,P,PHI AT HALF LEVELS. -! IT RETURNS CLOUD BASE VALUES AND FLAGS AS FOLLOWS; -! KLAB=1 FOR SUBCLOUD LEVELS -! KLAB=2 FOR CONDENSATION LEVEL -!***METHOD. -! -------- -! LIFT SURFACE AIR DRY-ADIABATICALLY TO CLOUD BASE -! (NON ENTRAINING PLUME,I.E.CONSTANT MASSFLUX) -!***EXTERNALS -! --------- -! *CUADJTQ* FOR ADJUSTING T AND Q DUE TO CONDENSATION IN ASCENT -! ---------------------------------------------------------------- -!------------------------------------------------------------------- - IMPLICIT NONE -!------------------------------------------------------------------- - INTEGER KLON, KLEV, KLEVP1 - INTEGER klevm1 - INTEGER JL,JK,IS,IK,ICALL,IKB - REAL ZBUO,ZZ - REAL PTENH(KLON,KLEV), PQENH(KLON,KLEV), & - PGEOH(KLON,KLEV), PAPH(KLON,KLEVP1) - REAL PTU(KLON,KLEV), PQU(KLON,KLEV), & - PLU(KLON,KLEV) - REAL PUEN(KLON,KLEV), PVEN(KLON,KLEV), & - PUU(KLON,KLEV), PVU(KLON,KLEV) - REAL ZQOLD(KLON,KLEV), ZPH(KLON) - INTEGER KLAB(KLON,KLEV), KCBOT(KLON) - LOGICAL LDCUM(KLON), LOFLAG(KLON) -!***INPUT VARIABLES: -! PTENH [ZTENH] - Environment Temperature on half levels. (CUINI) -! PQENH [ZQENH] - Env. specific humidity on half levels. (CUINI) -! PGEOH [ZGEOH] - Geopotential on half levels, (MSSFLX) -! PAPH - Pressure of half levels. (MSSFLX) -!***VARIABLES MODIFIED BY CUBASE: -! LDCUM - Logical denoting profiles. (CUBASE) -! KTYPE - Convection type - 1: Penetrative (CUMASTR) -! 2: Stratocumulus (CUMASTR) -! 3: Mid-level (CUASC) -! PTU - Cloud Temperature. -! PQU - Cloud specific Humidity. -! PLU - Cloud Liquid Water (Moisture condensed out) -! KCBOT - Cloud Base Level. (CUBASE) -! KLAB [ILAB] - Level Label - 1: Sub-cloud layer (CUBASE) -!------------------------------------------------ -! 1. INITIALIZE VALUES AT LIFTING LEVEL -!------------------------------------------------ - 100 CONTINUE - DO 110 JL=1,KLON - KLAB(JL,KLEV)=1 - KCBOT(JL)=KLEVM1 - LDCUM(JL)=.FALSE. - PUU(JL,KLEV)=PUEN(JL,KLEV)*(PAPH(JL,KLEVP1)-PAPH(JL,KLEV)) - PVU(JL,KLEV)=PVEN(JL,KLEV)*(PAPH(JL,KLEVP1)-PAPH(JL,KLEV)) - 110 CONTINUE -!------------------------------------------------------- -! 2.0 DO ASCENT IN SUBCLOUD LAYER, -! CHECK FOR EXISTENCE OF CONDENSATION LEVEL, -! ADJUST T,Q AND L ACCORDINGLY IN *CUADJTQ*, -! CHECK FOR BUOYANCY AND SET FLAGS -!------------------------------------------------------- - DO 200 JK=1,KLEV - DO 200 JL=1,KLON - ZQOLD(JL,JK)=0.0 - 200 CONTINUE - DO 290 JK=KLEVM1,2,-1 - IS=0 - DO 210 JL=1,KLON - IF(KLAB(JL,JK+1).EQ.1) THEN - IS=IS+1 - LOFLAG(JL)=.TRUE. - ELSE - LOFLAG(JL)=.FALSE. - ENDIF - ZPH(JL)=PAPH(JL,JK) - 210 CONTINUE - IF(IS.EQ.0) GO TO 290 - DO 220 JL=1,KLON - IF(LOFLAG(JL)) THEN - PQU(JL,JK)=PQU(JL,JK+1) - PTU(JL,JK)=(CPD*PTU(JL,JK+1)+PGEOH(JL,JK+1) & - -PGEOH(JL,JK))*RCPD - ZBUO=PTU(JL,JK)*(1.+VTMPC1*PQU(JL,JK))- & - PTENH(JL,JK)*(1.+VTMPC1*PQENH(JL,JK))+ZBUO0 - IF(ZBUO.GT.0.) KLAB(JL,JK)=1 - ZQOLD(JL,JK)=PQU(JL,JK) - END IF - 220 CONTINUE - IK=JK - ICALL=1 - CALL CUADJTQ(KLON,KLEV,IK,ZPH,PTU,PQU,LOFLAG,ICALL) - DO 240 JL=1,KLON - IF(LOFLAG(JL).AND.PQU(JL,JK).NE.ZQOLD(JL,JK)) THEN - KLAB(JL,JK)=2 - PLU(JL,JK)=PLU(JL,JK)+ZQOLD(JL,JK)-PQU(JL,JK) - ZBUO=PTU(JL,JK)*(1.+VTMPC1*PQU(JL,JK))- & - PTENH(JL,JK)*(1.+VTMPC1*PQENH(JL,JK))+ZBUO0 - IF(ZBUO.GT.0.) THEN - KCBOT(JL)=JK - LDCUM(JL)=.TRUE. - END IF - END IF - 240 CONTINUE -! CALCULATE AVERAGES OF U AND V FOR SUBCLOUD ARA,. -! THE VALUES WILL BE USED TO DEFINE CLOUD BASE VALUES. - IF(LMFDUDV) THEN - DO 250 JL=1,KLON - IF(JK.GE.KCBOT(JL)) THEN - PUU(JL,KLEV)=PUU(JL,KLEV)+ & - PUEN(JL,JK)*(PAPH(JL,JK+1)-PAPH(JL,JK)) - PVU(JL,KLEV)=PVU(JL,KLEV)+ & - PVEN(JL,JK)*(PAPH(JL,JK+1)-PAPH(JL,JK)) - END IF - 250 CONTINUE - END IF - 290 CONTINUE - IF(LMFDUDV) THEN - DO 310 JL=1,KLON - IF(LDCUM(JL)) THEN - IKB=KCBOT(JL) - ZZ=1./(PAPH(JL,KLEVP1)-PAPH(JL,IKB)) - PUU(JL,KLEV)=PUU(JL,KLEV)*ZZ - PVU(JL,KLEV)=PVU(JL,KLEV)*ZZ - ELSE - PUU(JL,KLEV)=PUEN(JL,KLEVM1) - PVU(JL,KLEV)=PVEN(JL,KLEVM1) - END IF - 310 CONTINUE - END IF - RETURN - END SUBROUTINE CUBASE - -!********************************************** -! SUBROUTINE CUTYPE -!********************************************** - SUBROUTINE CUTYPE & - ( KLON, KLEV, KLEVP1, KLEVM1,& - PTENH, PQENH, PQSENH, PGEOH, PAPH,& - RHO, HFX, QFX, KTYPE, lndj ) -! THIS ROUTINE CALCULATES CLOUD BASE and TOP -! AND RETURN CLOUD TYPES -! ZHANG & WANG IPRC 12/2010 -!***PURPOSE. -! -------- -! TO PRODUCE CLOUD TYPE for CU-PARAMETERIZATIONS -!***INTERFACE -! --------- -! THIS ROUTINE IS CALLED FROM *CUMASTR*. -! INPUT ARE ENVIRONM. VALUES OF T,Q,P,PHI AT HALF LEVELS. -! IT RETURNS CLOUD TYPES AS FOLLOWS; -! KTYPE=1 FOR deep cumulus -! KTYPE=2 FOR shallow cumulus -!***METHOD. -! -------- -! based on a simplified updraught equation -! partial(Hup)/partial(z)=eta(H - Hup) -! eta is the entrainment rate for test parcel -! H stands for dry static energy or the total water specific humidity -! references: Christian Jakob, 2003: A new subcloud model for mass-flux convection schemes -! influence on triggering, updraft properties, and model climate, Mon.Wea.Rev. -! 131, 2765-2778 -! and -! IFS Documentation - Cy33r1 -! -!***EXTERNALS -! --------- -! *CUADJTQ* FOR ADJUSTING T AND Q DUE TO CONDENSATION IN ASCENT -! ---------------------------------------------------------------- -!------------------------------------------------------------------- - IMPLICIT NONE -!------------------------------------------------------------------- - INTEGER KLON, KLEV, KLEVP1 - INTEGER klevm1 - INTEGER JL,JK,IS,IK,ICALL,IKB,LEVELS - REAL PTENH(KLON,KLEV), PQENH(KLON,KLEV), & - PQSENH(KLON,KLEV),& - PGEOH(KLON,KLEV), PAPH(KLON,KLEVP1) - REAL ZRELH(KLON) - REAL QFX(KLON),RHO(KLON),HFX(KLON) - REAL ZQOLD(KLON,KLEV), ZPH(KLON) - INTEGER KCTOP(KLON),KCBOT(KLON) - INTEGER KTYPE(KLON),LCLFLAG(KLON) - LOGICAL TOPFLAG(KLON),DEEPFLAG(KLON),MYFLAG(KLON) - - REAL part1(klon), part2(klon), root(klon) - REAL conw(klon),deltT(klon),deltQ(klon) - REAL eta(klon),dz(klon),coef(klon) - REAL dhen(KLON,KLEV), dh(KLON,KLEV),qh(KLON,KLEV) - REAL Tup(KLON,KLEV),Qup(KLON,KLEV),ql(KLON,KLEV) - REAL ww(KLON,KLEV),Kup(KLON,KLEV) - REAL Vtup(KLON,KLEV),Vten(KLON,KLEV),buoy(KLON,KLEV) - - INTEGER lndj(KLON) - REAL CRIRH1 -!***INPUT VARIABLES: -! PTENH [ZTENH] - Environment Temperature on half levels. (CUINI) -! PQENH [ZQENH] - Env. specific humidity on half levels. (CUINI) -! PGEOH [ZGEOH] - Geopotential on half levels, (MSSFLX) -! PAPH - Pressure of half levels. (MSSFLX) -! RHO - Density of the lowest Model level -! QFX - net upward moisture flux at the surface (kg/m^2/s) -! HFX - net upward heat flux at the surface (W/m^2) -!***VARIABLES OUTPUT BY CUTYPE: -! KTYPE - Convection type - 1: Penetrative (CUMASTR) -! 2: Stratocumulus (CUMASTR) -! 3: Mid-level (CUASC) -!-------------------------------------------------------------- - DO JL=1,KLON - KCBOT(JL)=KLEVM1 - KCTOP(JL)=KLEVM1 - KTYPE(JL)=0 - END DO -!----------------------------------------------------------- -! let's do test,and check the shallow convection first -! the first level is JK+1 -! define deltaT and deltaQ -!----------------------------------------------------------- - DO JK=1,KLEV - DO JL=1,KLON - ZQOLD(JL,JK)=0.0 - ql(jl,jk)=0.0 ! parcel liquid water - Tup(jl,jk)=0.0 ! parcel temperature - Qup(jl,jk)=0.0 ! parcel specific humidity - dh(jl,jk)=0.0 ! parcel dry static energy - qh(jl,jk)=0.0 ! parcel total water specific humidity - ww(jl,jk)=0.0 ! parcel vertical speed (m/s) - dhen(jl,jk)=0.0 ! environment dry static energy - Kup(jl,jk)=0.0 ! updraught kinetic energy for parcel - Vtup(jl,jk)=0.0 ! parcel virtual temperature considering water-loading - Vten(jl,jk)=0.0 ! environment virtual temperature - buoy(jl,jk)=0.0 ! parcel buoyancy - END DO - END DO - + zdp=0.5 + do jk=2,klev do jl=1,klon - lclflag(jl) = 0 ! flag for the condensation level - conw(jl) = 0.0 ! convective-scale velocity,also used for the vertical speed at the first level - myflag(jl) = .true. ! just as input for cuadjqt subroutine - topflag(jl) = .false.! flag for whether the cloud top is found + pgeoh(jl,jk)=pgeo(jl,jk)+(pgeo(jl,jk-1)-pgeo(jl,jk))*zdp + ptenh(jl,jk)=(max(cpd*pten(jl,jk-1)+pgeo(jl,jk-1), & + cpd*pten(jl,jk)+pgeo(jl,jk))-pgeoh(jl,jk))*rcpd + pqsenh(jl,jk)=pqsen(jl,jk-1) + zph(jl)=paph(jl,jk) + loflag(jl)=.true. end do -! check the levels from lowest level to second top level - do JK=KLEVM1,2,-1 - DO JL=1,KLON - ZPH(JL)=PAPH(JL,JK) - END DO - -! define the variables at the first level - if(jk .eq. KLEVM1) then + ik=jk + icall=0 + call cuadjtq(klon,klev,ik,zph,ptenh,pqsenh,loflag,icall) do jl=1,klon - part1(jl) = 1.5*0.4*pgeoh(jl,jk+1)/(rho(jl)*ptenh(jl,jk+1)) - part2(jl) = hfx(jl)/cpd+0.61*ptenh(jl,jk+1)*qfx(jl) - root(jl) = 0.001-part1(jl)*part2(jl) - if(root(jl) .gt. 0) then - conw(jl) = 1.2*(root(jl))**(1.0/3.0) - else - conw(jl) = -1.2*(-root(jl))**(1.0/3.0) - end if - deltT(jl) = -1.5*hfx(jl)/(rho(jl)*cpd*conw(jl)) - deltQ(jl) = -1.5*qfx(jl)/(rho(jl)*conw(jl)) - - Tup(jl,jk+1) = ptenh(jl,jk+1) + deltT(jl) - Qup(jl,jk+1) = pqenh(jl,jk+1) + deltQ(jl) - ql(jl,jk+1) = 0. - dh(jl,jk+1) = pgeoh(jl,jk+1) + Tup(jl,jk+1)*cpd - qh(jl,jk+1) = pqenh(jl,jk+1) + deltQ(jl) + ql(jl,jk+1) - ww(jl,jk+1) = conw(jl) + pqenh(jl,jk)=min(pqen(jl,jk-1),pqsen(jl,jk-1)) & + +(pqsenh(jl,jk)-pqsen(jl,jk-1)) + pqenh(jl,jk)=max(pqenh(jl,jk),0.) end do - end if - -! the next levels, we use the variables at the first level as initial values - do jl=1,klon - if(.not. topflag(jl)) then - eta(jl) = 0.5*(0.55/(pgeoh(jl,jk)*zrg)+1.0e-3) - dz(jl) = (pgeoh(jl,jk)-pgeoh(jl,jk+1))*zrg - coef(jl)= eta(jl)*dz(jl) - dhen(jl,jk) = pgeoh(jl,jk) + cpd*ptenh(jl,jk) - dh(jl,jk) = (coef(jl)*dhen(jl,jk) + dh(jl,jk+1))/(1+coef(jl)) - qh(jl,jk) = (coef(jl)*pqenh(jl,jk)+ qh(jl,jk+1))/(1+coef(jl)) - Tup(jl,jk) = (dh(jl,jk)-pgeoh(jl,jk))*RCPD - Qup(jl,jk) = qh(jl,jk) - ql(jl,jk+1) - zqold(jl,jk) = Qup(jl,jk) - end if end do -! check if the parcel is saturated - ik=jk - icall=1 - call CUADJTQ(klon,klev,ik,zph,Tup,Qup,myflag,icall) + do jl=1,klon - if( .not. topflag(jl) .and. zqold(jl,jk) .ne. Qup(jl,jk) ) then - lclflag(jl) = lclflag(jl) + 1 - ql(jl,jk) = ql(jl,jk+1) + zqold(jl,jk) - Qup(jl,jk) - dh(jl,jk) = pgeoh(jl,jk) + cpd*Tup(jl,jk) - end if + ptenh(jl,klev)=(cpd*pten(jl,klev)+pgeo(jl,klev)- & + pgeoh(jl,klev))*rcpd + pqenh(jl,klev)=pqen(jl,klev) + ptenh(jl,1)=pten(jl,1) + pqenh(jl,1)=pqen(jl,1) + pgeoh(jl,1)=pgeo(jl,1) + klwmin(jl)=klev + zwmax(jl)=0. end do -! compute the updraft speed + do jk=klevm1,2,-1 do jl=1,klon - if(.not. topflag(jl))then - Kup(jl,jk+1) = 0.5*ww(jl,jk+1)**2 - Vtup(jl,jk) = Tup(jl,jk)*(1.+VTMPC1*Qup(jl,jk)-ql(jl,jk)) - Vten(jl,jk) = ptenh(jl,jk)*(1.+VTMPC1*pqenh(jl,jk)) - buoy(jl,jk) = (Vtup(jl,jk) - Vten(jl,jk))/Vten(jl,jk)*g - Kup(jl,jk) = (Kup(jl,jk+1) + 0.333*dz(jl)*buoy(jl,jk))/ & - (1+2*2*eta(jl)*dz(jl)) - if(Kup(jl,jk) .gt. 0 ) then - ww(jl,jk) = sqrt(2*Kup(jl,jk)) - if(lclflag(jl) .eq. 1 ) kcbot(jl) = jk - if(jk .eq. 2) then - kctop(jl) = jk - topflag(jl)= .true. - end if - else - ww(jl,jk) = 0 - kctop(jl) = jk + 1 - topflag(jl) = .true. - end if - end if + zzs=max(cpd*ptenh(jl,jk)+pgeoh(jl,jk), & + cpd*ptenh(jl,jk+1)+pgeoh(jl,jk+1)) + ptenh(jl,jk)=(zzs-pgeoh(jl,jk))*rcpd + end do end do - end do ! end all the levels + do jk=klev,3,-1 do jl=1,klon - if(paph(jl,kcbot(jl)) - paph(jl,kctop(jl)) .lt. ZDNOPRC .and. & - paph(jl,kcbot(jl)) - paph(jl,kctop(jl)) .gt. 0 & - .and. lclflag(jl) .gt. 0) then - ktype(jl) = 2 - end if + if(pverv(jl,jk).lt.zwmax(jl)) then + zwmax(jl)=pverv(jl,jk) + klwmin(jl)=jk + end if + end do end do - !----------------------------------------------------------- -! Next, let's check the deep convection -! the first level is JK -! define deltaT and deltaQ -!---------------------------------------------------------- -! we check the parcel starting level by level (from the second lowest level to the next 12th level, -! usually, the 12th level around 700 hPa for common eta levels) - do levels=KLEVM1-1,KLEVM1-12,-1 - DO JK=1,KLEV - DO JL=1,KLON - ZQOLD(JL,JK)=0.0 - ql(jl,jk)=0.0 ! parcel liquid water - Tup(jl,jk)=0.0 ! parcel temperature - Qup(jl,jk)=0.0 ! parcel specific humidity - dh(jl,jk)=0.0 ! parcel dry static energy - qh(jl,jk)=0.0 ! parcel total water specific humidity - ww(jl,jk)=0.0 ! parcel vertical speed (m/s) - dhen(jl,jk)=0.0 ! environment dry static energy - Kup(jl,jk)=0.0 ! updraught kinetic energy for parcel - Vtup(jl,jk)=0.0 ! parcel virtual temperature considering water-loading - Vten(jl,jk)=0.0 ! environment virtual temperature - buoy(jl,jk)=0.0 ! parcel buoyancy - END DO - END DO - +!* 2.0 initialize values for updrafts and downdrafts +!----------------------------------------------------------- + do jk=1,klev + ik=jk-1 + if(jk.eq.1) ik=1 do jl=1,klon - lclflag(jl) = 0 ! flag for the condensation level - kctop(jl) = levels - kcbot(jl) = levels - myflag(jl) = .true. ! just as input for cuadjqt subroutine - topflag(jl) = .false.! flag for whether the cloud top is found + ptu(jl,jk)=ptenh(jl,jk) + ptd(jl,jk)=ptenh(jl,jk) + pqu(jl,jk)=pqenh(jl,jk) + pqd(jl,jk)=pqenh(jl,jk) + plu(jl,jk)=0. + puu(jl,jk)=puen(jl,ik) + pud(jl,jk)=puen(jl,ik) + pvu(jl,jk)=pven(jl,ik) + pvd(jl,jk)=pven(jl,ik) + pmfu(jl,jk)=0. + pmfd(jl,jk)=0. + pmfus(jl,jk)=0. + pmfds(jl,jk)=0. + pmfuq(jl,jk)=0. + pmfdq(jl,jk)=0. + pdmfup(jl,jk)=0. + pdmfdp(jl,jk)=0. + pdpmel(jl,jk)=0. + plude(jl,jk)=0. + klab(jl,jk)=0 end do - -! check the levels from lowest level to second top level - do JK=levels,2,-1 - DO JL=1,KLON - ZPH(JL)=PAPH(JL,JK) - END DO - -! define the variables at the first level - if(jk .eq. levels) then - do jl=1,klon - deltT(jl) = 0.2 - deltQ(jl) = 1.0e-4 - - if(paph(jl,KLEVM1-1)-paph(jl,jk) .le. 6.e3) then - ql(jl,jk+1) = 0. - Tup(jl,jk+1) = 0.25*(ptenh(jl,jk+1)+ptenh(jl,jk)+ & - ptenh(jl,jk-1)+ptenh(jl,jk-2)) + & - deltT(jl) - dh(jl,jk+1) = 0.25*(pgeoh(jl,jk+1)+pgeoh(jl,jk)+ & - pgeoh(jl,jk-1)+pgeoh(jl,jk-2)) + & - Tup(jl,jk+1)*cpd - qh(jl,jk+1) = 0.25*(pqenh(jl,jk+1)+pqenh(jl,jk)+ & - pqenh(jl,jk-1)+pqenh(jl,jk-2))+ & - deltQ(jl) + ql(jl,jk+1) - Qup(jl,jk+1) = qh(jl,jk+1) - ql(jl,jk+1) - else - ql(jl,jk+1) = 0. - Tup(jl,jk+1) = ptenh(jl,jk+1) + deltT(jl) - dh(jl,jk+1) = pgeoh(jl,jk+1) + Tup(jl,jk+1)*cpd - qh(jl,jk+1) = pqenh(jl,jk+1) + deltQ(jl) - Qup(jl,jk+1) = qh(jl,jk+1) - ql(jl,jk+1) - end if - ww(jl,jk+1) = 1.0 - end do - end if + return + end subroutine cuini -! the next levels, we use the variables at the first level as initial values +!********************************************** +! subroutine cubase +!********************************************** + subroutine cubase & + (klon, klev, klevp1, klevm1, ptenh, & + pqenh, pgeoh, paph, ptu, pqu, & + plu, puen, pven, puu, pvu, & + ldcum, kcbot, klab) +! this routine calculates cloud base values (t and q) +! for cumulus parameterization +! m.tiedtke e.c.m.w.f. 7/86 modif. 12/89 +!***purpose. +! -------- +! to produce cloud base values for cu-parametrization +!***interface +! --------- +! this routine is called from *cumastr*. +! input are environm. values of t,q,p,phi at half levels. +! it returns cloud base values and flags as follows; +! klab=1 for subcloud levels +! klab=2 for condensation level +!***method. +! -------- +! lift surface air dry-adiabatically to cloud base +! (non entraining plume,i.e.constant massflux) +!***externals +! --------- +! *cuadjtq* for adjusting t and q due to condensation in ascent +! ---------------------------------------------------------------- +!------------------------------------------------------------------- + implicit none +!------------------------------------------------------------------- + integer klon, klev, klevp1 + integer klevm1 + integer jl,jk,is,ik,icall,ikb + real zbuo,zz + real ptenh(klon,klev), pqenh(klon,klev), & + pgeoh(klon,klev), paph(klon,klevp1) + real ptu(klon,klev), pqu(klon,klev), & + plu(klon,klev) + real puen(klon,klev), pven(klon,klev), & + puu(klon,klev), pvu(klon,klev) + real zqold(klon,klev), zph(klon) + integer klab(klon,klev), kcbot(klon) + logical ldcum(klon), loflag(klon) + logical ldbase(klon) + logical llo1 +!***input variables: +! ptenh [ztenh] - environment temperature on half levels. (cuini) +! pqenh [zqenh] - env. specific humidity on half levels. (cuini) +! pgeoh [zgeoh] - geopotential on half levels, (mssflx) +! paph - pressure of half levels. (mssflx) +!***variables modified by cubase: +! ldcum - logical denoting profiles. (cubase) +! ktype - convection type - 1: penetrative (cumastr) +! 2: stratocumulus (cumastr) +! 3: mid-level (cuasc) +! ptu - cloud temperature. +! pqu - cloud specific humidity. +! plu - cloud liquid water (moisture condensed out) +! kcbot - cloud base level. (cubase) +! klab [ilab] - level label - 1: sub-cloud layer (cubase) +!------------------------------------------------ +! 1. initialize values at lifting level +!------------------------------------------------ do jl=1,klon - if(.not. topflag(jl)) then - eta(jl) = 1.1e-4 - dz(jl) = (pgeoh(jl,jk)-pgeoh(jl,jk+1))*zrg - coef(jl)= eta(jl)*dz(jl) - dhen(jl,jk) = pgeoh(jl,jk) + cpd*ptenh(jl,jk) - dh(jl,jk) = (coef(jl)*dhen(jl,jk) + dh(jl,jk+1))/(1+coef(jl)) - qh(jl,jk) = (coef(jl)*pqenh(jl,jk)+ qh(jl,jk+1))/(1+coef(jl)) - Tup(jl,jk) = (dh(jl,jk)-pgeoh(jl,jk))*RCPD - Qup(jl,jk) = qh(jl,jk) - ql(jl,jk+1) - zqold(jl,jk) = Qup(jl,jk) - end if + klab(jl,klev)=1 + kcbot(jl)=klevm1 + ldcum(jl)=.false. + ldbase(jl)=.false. + puu(jl,klev)=puen(jl,klev)*(paph(jl,klevp1)-paph(jl,klev)) + pvu(jl,klev)=pven(jl,klev)*(paph(jl,klevp1)-paph(jl,klev)) end do -! check if the parcel is saturated - ik=jk - icall=1 - call CUADJTQ(klon,klev,ik,zph,Tup,Qup,myflag,icall) +!------------------------------------------------------- +! 2.0 do ascent in subcloud layer, +! check for existence of condensation level, +! adjust t,q and l accordingly in *cuadjtq*, +! check for buoyancy and set flags +!------------------------------------------------------- + do jk=1,klev do jl=1,klon - if( .not. topflag(jl) .and. zqold(jl,jk) .ne. Qup(jl,jk) ) then - lclflag(jl) = lclflag(jl) + 1 - ql(jl,jk) = ql(jl,jk+1) + zqold(jl,jk) - Qup(jl,jk) - dh(jl,jk) = pgeoh(jl,jk) + cpd*Tup(jl,jk) - end if + zqold(jl,jk)=0.0 + end do end do -! compute the updraft speed - do jl=1,klon - if(.not. topflag(jl))then - Kup(jl,jk+1) = 0.5*ww(jl,jk+1)**2 - Vtup(jl,jk) = Tup(jl,jk)*(1.+VTMPC1*Qup(jl,jk)-ql(jl,jk)) - Vten(jl,jk) = ptenh(jl,jk)*(1.+VTMPC1*pqenh(jl,jk)) - buoy(jl,jk) = (Vtup(jl,jk) - Vten(jl,jk))/Vten(jl,jk)*g - Kup(jl,jk) = (Kup(jl,jk+1) + 0.333*dz(jl)*buoy(jl,jk))/ & - (1+2*2*eta(jl)*dz(jl)) - if(Kup(jl,jk) .gt. 0 ) then - ww(jl,jk) = sqrt(2*Kup(jl,jk)) - if(lclflag(jl) .eq. 1 ) kcbot(jl) = jk - if(jk .eq. 2) then - kctop(jl) = jk - topflag(jl)= .true. - end if + do jk=klevm1,2,-1 + is=0 + do jl=1,klon + if(klab(jl,jk+1).eq.1 .or.(ldcum(jl).and.kcbot(jl).eq.jk+1)) then + is=is+1 + loflag(jl)=.true. else - ww(jl,jk) = 0 - kctop(jl) = jk + 1 - topflag(jl) = .true. + loflag(jl)=.false. + endif + zph(jl)=paph(jl,jk) + end do + if(is.eq.0) cycle + +! calculate averages of u and v for subcloud area, +! the values will be used to define cloud base values. + + if(lmfdudv) then + do jl=1,klon + if(.not.ldbase(jl)) then + puu(jl,klev)=puu(jl,klev)+ & + puen(jl,jk)*(paph(jl,jk+1)-paph(jl,jk)) + pvu(jl,klev)=pvu(jl,klev)+ & + pven(jl,jk)*(paph(jl,jk+1)-paph(jl,jk)) + endif + enddo + endif + + do jl=1,klon + if(loflag(jl)) then + pqu(jl,jk)=pqu(jl,jk+1) + ptu(jl,jk)=(cpd*ptu(jl,jk+1)+pgeoh(jl,jk+1) & + -pgeoh(jl,jk))*rcpd + zqold(jl,jk)=pqu(jl,jk) end if - end if + end do + + ik=jk + icall=1 + call cuadjtq(klon,klev,ik,zph,ptu,pqu,loflag,icall) + + do jl=1,klon + if(loflag(jl)) then + if(pqu(jl,jk).eq.zqold(jl,jk)) then + zbuo=ptu(jl,jk)*(1.+vtmpc1*pqu(jl,jk))- & + ptenh(jl,jk)*(1.+vtmpc1*pqenh(jl,jk))+zbuo0 + if(zbuo.gt.0.) klab(jl,jk)=1 + else + klab(jl,jk)=2 + plu(jl,jk)=plu(jl,jk)+zqold(jl,jk)-pqu(jl,jk) + zbuo=ptu(jl,jk)*(1.+vtmpc1*pqu(jl,jk))- & + ptenh(jl,jk)*(1.+vtmpc1*pqenh(jl,jk))+zbuo0 + llo1=zbuo.gt.0..and.klab(jl,jk+1).eq.1 + if(llo1) then + kcbot(jl)=jk + ldcum(jl)=.true. + ldbase(jl)=.true. + end if + end if + end if + end do end do - end do ! end all the levels - do jl = 1, klon - if(paph(jl,kcbot(jl)) - paph(jl,kctop(jl)) .gt. ZDNOPRC .and. & - lclflag(jl) .gt. 0 ) then - ZRELH(JL) = 0. - do jk=kcbot(jl),kctop(jl),-1 - ZRELH(JL)=ZRELH(JL)+ PQENH(JL,JK)/PQSENH(JL,JK) - end do - ZRELH(JL) = ZRELH(JL)/(kcbot(jl)-kctop(jl)+1) - - if(lndj(JL) .eq. 1) then - CRIRH1 = CRIRH*0.8 + if(lmfdudv) then + do jl=1,klon + if(ldcum(jl)) then + ikb=kcbot(jl) + zz=1./(paph(jl,klevp1)-paph(jl,ikb)) + puu(jl,klev)=puu(jl,klev)*zz + pvu(jl,klev)=pvu(jl,klev)*zz else - CRIRH1 = CRIRH + puu(jl,klev)=puen(jl,klevm1) + pvu(jl,klev)=pven(jl,klevm1) end if - if(ZRELH(JL) .ge. CRIRH1) ktype(jl) = 1 - end if - end do - - end do ! end all cycles - - END SUBROUTINE CUTYPE - + end do + end if + return + end subroutine cubase ! !********************************************** -! SUBROUTINE CUASC_NEW +! subroutine cuasc_new !********************************************** - SUBROUTINE CUASC_NEW & - (KLON, KLEV, KLEVP1, KLEVM1, PTENH, & - PQENH, PUEN, PVEN, PTEN, PQEN, & - PQSEN, PGEO, PGEOH, PAP, PAPH, & - PQTE, PVERV, KLWMIN, LDCUM, PHCBASE,& - KTYPE, KLAB, PTU, PQU, PLU, & - PUU, PVU, PMFU, PMFUB, PENTR, & - PMFUS, PMFUQ, PMFUL, PLUDE, PDMFUP, & - KCBOT, KCTOP, KCTOP0, KCUM, ZTMST, & - KHMIN, PHHATT, PQSENH) -! THIS ROUTINE DOES THE CALCULATIONS FOR CLOUD ASCENTS -! FOR CUMULUS PARAMETERIZATION -! M.TIEDTKE E.C.M.W.F. 7/86 MODIF. 12/89 -! Y.WANG IPRC 11/01 MODIF. -!***PURPOSE. + subroutine cuasc_new & + (klon, klev, klevp1, klevm1, ptenh, & + pqenh, puen, pven, pten, pqen, & + pqsen, pgeo, pgeoh, pap, paph, & + pqte, pverv, klwmin, ldcum, phcbase,& + ktype, klab, ptu, pqu, plu, & + puu, pvu, pmfu, pmfub, pentr, & + pmfus, pmfuq, pmful, plude, pdmfup, & + kcbot, kctop, kctop0, kcum, ztmst, & + khmin, phhatt, pqsenh) +! this routine does the calculations for cloud ascents +! for cumulus parameterization +! m.tiedtke e.c.m.w.f. 7/86 modif. 12/89 +! y.wang iprc 11/01 modif. +!***purpose. ! -------- -! TO PRODUCE CLOUD ASCENTS FOR CU-PARAMETRIZATION -! (VERTICAL PROFILES OF T,Q,L,U AND V AND CORRESPONDING -! FLUXES AS WELL AS PRECIPITATION RATES) -!***INTERFACE +! to produce cloud ascents for cu-parametrization +! (vertical profiles of t,q,l,u and v and corresponding +! fluxes as well as precipitation rates) +!***interface ! --------- -! THIS ROUTINE IS CALLED FROM *CUMASTR*. -!***METHOD. +! this routine is called from *cumastr*. +!***method. ! -------- -! LIFT SURFACE AIR DRY-ADIABATICALLY TO CLOUD BASE -! AND THEN CALCULATE MOIST ASCENT FOR -! ENTRAINING/DETRAINING PLUME. -! ENTRAINMENT AND DETRAINMENT RATES DIFFER FOR -! SHALLOW AND DEEP CUMULUS CONVECTION. -! IN CASE THERE IS NO PENETRATIVE OR SHALLOW CONVECTION -! CHECK FOR POSSIBILITY OF MID LEVEL CONVECTION -! (CLOUD BASE VALUES CALCULATED IN *CUBASMC*) -!***EXTERNALS +! lift surface air dry-adiabatically to cloud base +! and then calculate moist ascent for +! entraining/detraining plume. +! entrainment and detrainment rates differ for +! shallow and deep cumulus convection. +! in case there is no penetrative or shallow convection +! check for possibility of mid level convection +! (cloud base values calculated in *cubasmc*) +!***externals ! --------- -! *CUADJTQ* ADJUST T AND Q DUE TO CONDENSATION IN ASCENT -! *CUENTR_NEW* CALCULATE ENTRAINMENT/DETRAINMENT RATES -! *CUBASMC* CALCULATE CLOUD BASE VALUES FOR MIDLEVEL CONVECTION -!***REFERENCE +! *cuadjtq* adjust t and q due to condensation in ascent +! *cuentr_new* calculate entrainment/detrainment rates +! *cubasmc* calculate cloud base values for midlevel convection +!***reference ! --------- -! (TIEDTKE,1989) -!***INPUT VARIABLES: -! PTENH [ZTENH] - Environ Temperature on half levels. (CUINI) -! PQENH [ZQENH] - Env. specific humidity on half levels. (CUINI) -! PUEN - Environment wind u-component. (MSSFLX) -! PVEN - Environment wind v-component. (MSSFLX) -! PTEN - Environment Temperature. (MSSFLX) -! PQEN - Environment Specific Humidity. (MSSFLX) -! PQSEN - Environment Saturation Specific Humidity. (MSSFLX) -! PGEO - Geopotential. (MSSFLX) -! PGEOH [ZGEOH] - Geopotential on half levels, (MSSFLX) -! PAP - Pressure in Pa. (MSSFLX) -! PAPH - Pressure of half levels. (MSSFLX) -! PQTE - Moisture convergence (Delta q/Delta t). (MSSFLX) -! PVERV - Large Scale Vertical Velocity (Omega). (MSSFLX) -! KLWMIN [ILWMIN] - Level of Minimum Omega. (CUINI) -! KLAB [ILAB] - Level Label - 1: Sub-cloud layer. -! 2: Condensation Level (Cloud Base) -! PMFUB [ZMFUB] - Updraft Mass Flux at Cloud Base. (CUMASTR) -!***VARIABLES MODIFIED BY CUASC: -! LDCUM - Logical denoting profiles. (CUBASE) -! KTYPE - Convection type - 1: Penetrative (CUMASTR) -! 2: Stratocumulus (CUMASTR) -! 3: Mid-level (CUASC) -! PTU - Cloud Temperature. -! PQU - Cloud specific Humidity. -! PLU - Cloud Liquid Water (Moisture condensed out) -! PUU [ZUU] - Cloud Momentum U-Component. -! PVU [ZVU] - Cloud Momentum V-Component. -! PMFU - Updraft Mass Flux. -! PENTR [ZENTR] - Entrainment Rate. (CUMASTR ) (CUBASMC) -! PMFUS [ZMFUS] - Updraft Flux of Dry Static Energy. (CUBASMC) -! PMFUQ [ZMFUQ] - Updraft Flux of Specific Humidity. -! PMFUL [ZMFUL] - Updraft Flux of Cloud Liquid Water. -! PLUDE - Liquid Water Returned to Environment by Detrainment. -! PDMFUP [ZMFUP] - FLUX DIFFERENCE OF PRECIP. IN UPDRAFTS -! KCBOT - Cloud Base Level. (CUBASE) -! KCTOP - -! KCTOP0 [ICTOP0] - Estimate of Cloud Top. (CUMASTR) -! KCUM [ICUM] - +! (tiedtke,1989) +!***input variables: +! ptenh [ztenh] - environ temperature on half levels. (cuini) +! pqenh [zqenh] - env. specific humidity on half levels. (cuini) +! puen - environment wind u-component. (mssflx) +! pven - environment wind v-component. (mssflx) +! pten - environment temperature. (mssflx) +! pqen - environment specific humidity. (mssflx) +! pqsen - environment saturation specific humidity. (mssflx) +! pgeo - geopotential. (mssflx) +! pgeoh [zgeoh] - geopotential on half levels, (mssflx) +! pap - pressure in pa. (mssflx) +! paph - pressure of half levels. (mssflx) +! pqte - moisture convergence (delta q/delta t). (mssflx) +! pverv - large scale vertical velocity (omega). (mssflx) +! klwmin [ilwmin] - level of minimum omega. (cuini) +! klab [ilab] - level label - 1: sub-cloud layer. +! 2: condensation level (cloud base) +! pmfub [zmfub] - updraft mass flux at cloud base. (cumastr) +!***variables modified by cuasc: +! ldcum - logical denoting profiles. (cubase) +! ktype - convection type - 1: penetrative (cumastr) +! 2: stratocumulus (cumastr) +! 3: mid-level (cuasc) +! ptu - cloud temperature. +! pqu - cloud specific humidity. +! plu - cloud liquid water (moisture condensed out) +! puu [zuu] - cloud momentum u-component. +! pvu [zvu] - cloud momentum v-component. +! pmfu - updraft mass flux. +! pentr [zentr] - entrainment rate. (cumastr ) (cubasmc) +! pmfus [zmfus] - updraft flux of dry static energy. (cubasmc) +! pmfuq [zmfuq] - updraft flux of specific humidity. +! pmful [zmful] - updraft flux of cloud liquid water. +! plude - liquid water returned to environment by detrainment. +! pdmfup [zmfup] - +! kcbot - cloud base level. (cubase) +! kctop - +! kctop0 [ictop0] - estimate of cloud top. (cumastr) +! kcum [icum] - !------------------------------------------------------------------- - IMPLICIT NONE + implicit none !------------------------------------------------------------------- - INTEGER KLON, KLEV, KLEVP1 - INTEGER klevm1,kcum - REAL ZTMST,ZCONS2,ZDZ,ZDRODZ - INTEGER JL,JK,IKB,IK,IS,IKT,ICALL - REAL ZMFMAX,ZFAC,ZMFTEST,ZDPRHO,ZMSE,ZNEVN,ZODMAX - REAL ZQEEN,ZSEEN,ZSCDE,ZGA,ZDT,ZSCOD - REAL ZQUDE,ZQCOD, ZMFUSK, ZMFUQK,ZMFULK - REAL ZBUO, ZPRCON, ZLNEW, ZZ, ZDMFEU, ZDMFDU - REAL ZBUOYZ,ZZDMF - REAL PTENH(KLON,KLEV), PQENH(KLON,KLEV), & - PUEN(KLON,KLEV), PVEN(KLON,KLEV), & - PTEN(KLON,KLEV), PQEN(KLON,KLEV), & - PGEO(KLON,KLEV), PGEOH(KLON,KLEV), & - PAP(KLON,KLEV), PAPH(KLON,KLEVP1), & - PQSEN(KLON,KLEV), PQTE(KLON,KLEV), & - PVERV(KLON,KLEV), PQSENH(KLON,KLEV) - REAL PTU(KLON,KLEV), PQU(KLON,KLEV), & - PUU(KLON,KLEV), PVU(KLON,KLEV), & - PMFU(KLON,KLEV), ZPH(KLON), & - PMFUB(KLON), PENTR(KLON), & - PMFUS(KLON,KLEV), PMFUQ(KLON,KLEV), & - PLU(KLON,KLEV), PLUDE(KLON,KLEV), & - PMFUL(KLON,KLEV), PDMFUP(KLON,KLEV) - REAL ZDMFEN(KLON), ZDMFDE(KLON), & - ZMFUU(KLON), ZMFUV(KLON), & - ZPBASE(KLON), ZQOLD(KLON), & - PHHATT(KLON,KLEV), ZODETR(KLON,KLEV), & - ZOENTR(KLON,KLEV), ZBUOY(KLON) - REAL PHCBASE(KLON) - INTEGER KLWMIN(KLON), KTYPE(KLON), & - KLAB(KLON,KLEV), KCBOT(KLON), & - KCTOP(KLON), KCTOP0(KLON), & - KHMIN(KLON) - LOGICAL LDCUM(KLON), LOFLAG(KLON) - integer leveltop,levelbot - real tt(klon),ttb(klon) - real zqsat(klon), zqsatb(klon) - real fscale(klon) - + integer klon, klev, klevp1 + integer klevm1,kcum + real ztmst,zcons2,zdz,zdrodz + integer jl,jk,ikb,ik,is,ikt,icall + real zmfmax,zfac,zmftest,zdprho,zmse,znevn,zodmax + real zqeen,zseen,zscde,zga,zdt,zscod + real zqude,zqcod, zmfusk, zmfuqk,zmfulk + real zbuo, zprcon, zlnew, zz, zdmfeu, zdmfdu + real zbuoyz,zzdmf + real ptenh(klon,klev), pqenh(klon,klev), & + puen(klon,klev), pven(klon,klev), & + pten(klon,klev), pqen(klon,klev), & + pgeo(klon,klev), pgeoh(klon,klev), & + pap(klon,klev), paph(klon,klevp1), & + pqsen(klon,klev), pqte(klon,klev), & + pverv(klon,klev), pqsenh(klon,klev) + real ptu(klon,klev), pqu(klon,klev), & + puu(klon,klev), pvu(klon,klev), & + pmfu(klon,klev), zph(klon), & + pmfub(klon), pentr(klon), & + pmfus(klon,klev), pmfuq(klon,klev), & + plu(klon,klev), plude(klon,klev), & + pmful(klon,klev), pdmfup(klon,klev) + real zdmfen(klon), zdmfde(klon), & + zmfuu(klon), zmfuv(klon), & + zpbase(klon), zqold(klon), & + phhatt(klon,klev), zodetr(klon,klev), & + zoentr(klon,klev), zbuoy(klon) + real phcbase(klon) + integer klwmin(klon), ktype(klon), & + klab(klon,klev), kcbot(klon), & + kctop(klon), kctop0(klon), & + khmin(klon) + logical ldcum(klon), loflag(klon) !-------------------------------- -!* 1. SPECIFY PARAMETERS +!* 1. specify parameters !-------------------------------- - 100 CONTINUE - ZCONS2=1./(G*ZTMST) + zcons2=1./(g*ztmst) !--------------------------------- -! 2. SET DEFAULT VALUES +! 2. set default values !--------------------------------- - 200 CONTINUE - DO 210 JL=1,KLON - ZMFUU(JL)=0. - ZMFUV(JL)=0. - ZBUOY(JL)=0. - IF(.NOT.LDCUM(JL)) KTYPE(JL)=0 - 210 CONTINUE - DO 230 JK=1,KLEV - DO 230 JL=1,KLON - PLU(JL,JK)=0. - PMFU(JL,JK)=0. - PMFUS(JL,JK)=0. - PMFUQ(JL,JK)=0. - PMFUL(JL,JK)=0. - PLUDE(JL,JK)=0. - PDMFUP(JL,JK)=0. - ZOENTR(JL,JK)=0. - ZODETR(JL,JK)=0. - IF(.NOT.LDCUM(JL).OR.KTYPE(JL).EQ.3) KLAB(JL,JK)=0 - IF(.NOT.LDCUM(JL).AND.PAPH(JL,JK).LT.4.E4) KCTOP0(JL)=JK - 230 CONTINUE + do jl=1,klon + zmfuu(jl)=0. + zmfuv(jl)=0. + zbuoy(jl)=0. + if(.not.ldcum(jl)) ktype(jl)=0 + end do + + do jk=1,klev + do jl=1,klon + plu(jl,jk)=0. + pmfu(jl,jk)=0. + pmfus(jl,jk)=0. + pmfuq(jl,jk)=0. + pmful(jl,jk)=0. + plude(jl,jk)=0. + pdmfup(jl,jk)=0. + zoentr(jl,jk)=0. + zodetr(jl,jk)=0. + if(.not.ldcum(jl).or.ktype(jl).eq.3) klab(jl,jk)=0 + if(.not.ldcum(jl).and.paph(jl,jk).lt.4.e4) kctop0(jl)=jk + end do + end do !------------------------------------------------ -! 3.0 INITIALIZE VALUES AT LIFTING LEVEL +! 3.0 initialize values at lifting level !------------------------------------------------ - DO 310 JL=1,KLON - KCTOP(JL)=KLEVM1 - IF(.NOT.LDCUM(JL)) THEN - KCBOT(JL)=KLEVM1 - PMFUB(JL)=0. - PQU(JL,KLEV)=0. - END IF - PMFU(JL,KLEV)=PMFUB(JL) - PMFUS(JL,KLEV)=PMFUB(JL)*(CPD*PTU(JL,KLEV)+PGEOH(JL,KLEV)) - PMFUQ(JL,KLEV)=PMFUB(JL)*PQU(JL,KLEV) - IF(LMFDUDV) THEN - ZMFUU(JL)=PMFUB(JL)*PUU(JL,KLEV) - ZMFUV(JL)=PMFUB(JL)*PVU(JL,KLEV) - END IF - 310 CONTINUE -! -!-- 3.1 Find organized entrainment at cloud base -! - DO 322 JL=1,KLON - LDCUM(JL)=.FALSE. - IF (KTYPE(JL).EQ.1) THEN - IKB = KCBOT(JL) - if(orgen .eq. 1 ) then -! old scheme - ZBUOY(JL)=G*((PTU(JL,IKB)-PTENH(JL,IKB))/PTENH(JL,IKB)+ & - 0.608*(PQU(JL,IKB)-PQENH(JL,IKB))) - IF (ZBUOY(JL).GT.0.) THEN - ZDZ = (PGEO(JL,IKB-1)-PGEO(JL,IKB))*ZRG - ZDRODZ = -LOG(PTEN(JL,IKB-1)/PTEN(JL,IKB))/ZDZ - & - G/(RD*PTENH(JL,IKB)) - ZOENTR(JL,IKB-1)=ZBUOY(JL)*0.5/(1.+ZBUOY(JL)*ZDZ) & - +ZDRODZ - ZOENTR(JL,IKB-1) = MIN(ZOENTR(JL,IKB-1),1.E-3) - ZOENTR(JL,IKB-1) = MAX(ZOENTR(JL,IKB-1),0.) - END IF -! New scheme -! Let's define the fscale - else if(orgen .eq. 2 ) then - tt(jl) = ptenh(jl,ikb-1) - zqsat(jl) = TLUCUA(tt(jl))/paph(jl,ikb-1) - zqsat(jl) = zqsat(jl)/(1.-VTMPC1*zqsat(jl)) - ttb(jl) = ptenh(jl,ikb) - zqsatb(jl) = TLUCUA(ttb(jl))/paph(jl,ikb) - zqsatb(jl) = zqsatb(jl)/(1.-VTMPC1*zqsatb(jl)) - fscale(jl) = (zqsat(jl)/zqsatb(jl))**3 -! end of defining the fscale - zoentr(jl,ikb-1) = 1.E-3*(1.3-PQEN(jl,ikb-1)/PQSEN(jl,ikb-1))*fscale(jl) - zoentr(jl,ikb-1) = MIN(zoentr(jl,ikb-1),1.E-3) - zoentr(jl,ikb-1) = MAX(zoentr(jl,ikb-1),0.) + do jl=1,klon + kctop(jl)=klevm1 + if(.not.ldcum(jl)) then + kcbot(jl)=klevm1 + pmfub(jl)=0. + pqu(jl,klev)=0. + end if + pmfu(jl,klev)=pmfub(jl) + pmfus(jl,klev)=pmfub(jl)*(cpd*ptu(jl,klev)+pgeoh(jl,klev)) + pmfuq(jl,klev)=pmfub(jl)*pqu(jl,klev) + if(lmfdudv) then + zmfuu(jl)=pmfub(jl)*puu(jl,klev) + zmfuv(jl)=pmfub(jl)*pvu(jl,klev) + end if + end do +! +!-- 3.1 find organized entrainment at cloud base +! + do jl=1,klon + ldcum(jl)=.false. + if (ktype(jl).eq.1) then + ikb = kcbot(jl) + zbuoy(jl)=g*((ptu(jl,ikb)-ptenh(jl,ikb))/ptenh(jl,ikb)+ & + 0.608*(pqu(jl,ikb)-pqenh(jl,ikb))) + if (zbuoy(jl).gt.0.) then + zdz = (pgeo(jl,ikb-1)-pgeo(jl,ikb))*zrg + zdrodz = -log(pten(jl,ikb-1)/pten(jl,ikb))/zdz - & + g/(rd*ptenh(jl,ikb)) + zoentr(jl,ikb-1)=zbuoy(jl)*0.5/(1.+zbuoy(jl)*zdz) & + +zdrodz + zoentr(jl,ikb-1) = min(zoentr(jl,ikb-1),1.e-3) + zoentr(jl,ikb-1) = max(zoentr(jl,ikb-1),0.) end if - END IF - 322 CONTINUE + end if + end do ! !----------------------------------------------------------------- -! 4. DO ASCENT: SUBCLOUD LAYER (KLAB=1) ,CLOUDS (KLAB=2) -! BY DOING FIRST DRY-ADIABATIC ASCENT AND THEN -! BY ADJUSTING T,Q AND L ACCORDINGLY IN *CUADJTQ*, -! THEN CHECK FOR BUOYANCY AND SET FLAGS ACCORDINGLY +! 4. do ascent: subcloud layer (klab=1) ,clouds (klab=2) +! by doing first dry-adiabatic ascent and then +! by adjusting t,q and l accordingly in *cuadjtq*, +! then check for buoyancy and set flags accordingly !----------------------------------------------------------------- - 400 CONTINUE - -! let's define the levels in which the middle level convection could be activated - do jk=KLEVM1,2,-1 - if(abs(paph(1,jk)*0.01 - 250) .lt. 50.) then - leveltop = jk - exit - end if - end do - leveltop = min(KLEV-15,leveltop) - levelbot = KLEVM1 - 4 - - DO 480 JK=KLEVM1,2,-1 -! SPECIFY CLOUD BASE VALUES FOR MIDLEVEL CONVECTION -! IN *CUBASMC* IN CASE THERE IS NOT ALREADY CONVECTION + do jk=klevm1,2,-1 +! specify cloud base values for midlevel convection +! in *cubasmc* in case there is not already convection ! --------------------------------------------------------------------- - IK=JK - IF(LMFMID.AND.IK.LT.levelbot.AND.IK.GT.leveltop) THEN - CALL CUBASMC & - (KLON, KLEV, KLEVM1, IK, PTEN, & - PQEN, PQSEN, PUEN, PVEN, PVERV, & - PGEO, PGEOH, LDCUM, KTYPE, KLAB, & - PMFU, PMFUB, PENTR, KCBOT, PTU, & - PQU, PLU, PUU, PVU, PMFUS, & - PMFUQ, PMFUL, PDMFUP, ZMFUU, ZMFUV) - ENDIF - IS=0 - DO 410 JL=1,KLON - ZQOLD(JL)=0.0 - IS=IS+KLAB(JL,JK+1) - IF(KLAB(JL,JK+1).EQ.0) KLAB(JL,JK)=0 - LOFLAG(JL)=KLAB(JL,JK+1).GT.0 - ZPH(JL)=PAPH(JL,JK) - IF(KTYPE(JL).EQ.3.AND.JK.EQ.KCBOT(JL)) THEN - ZMFMAX=(PAPH(JL,JK)-PAPH(JL,JK-1))*ZCONS2 - IF(PMFUB(JL).GT.ZMFMAX) THEN - ZFAC=ZMFMAX/PMFUB(JL) - PMFU(JL,JK+1)=PMFU(JL,JK+1)*ZFAC - PMFUS(JL,JK+1)=PMFUS(JL,JK+1)*ZFAC - PMFUQ(JL,JK+1)=PMFUQ(JL,JK+1)*ZFAC - ZMFUU(JL)=ZMFUU(JL)*ZFAC - ZMFUV(JL)=ZMFUV(JL)*ZFAC - PMFUB(JL)=ZMFMAX - END IF - END IF - 410 CONTINUE - IF(IS.EQ.0) GO TO 480 -! -!* SPECIFY ENTRAINMENT RATES IN *CUENTR_NEW* + ik=jk + if(lmfmid.and.ik.lt.klevm1.and.ik.gt.klev-13) then + call cubasmc & + (klon, klev, klevm1, ik, pten, & + pqen, pqsen, puen, pven, pverv, & + pgeo, pgeoh, ldcum, ktype, klab, & + pmfu, pmfub, pentr, kcbot, ptu, & + pqu, plu, puu, pvu, pmfus, & + pmfuq, pmful, pdmfup, zmfuu, zmfuv) + endif + is=0 + do jl=1,klon + zqold(jl)=0.0 + is=is+klab(jl,jk+1) + if(klab(jl,jk+1).eq.0) klab(jl,jk)=0 + loflag(jl)=klab(jl,jk+1).gt.0 + zph(jl)=paph(jl,jk) + if(ktype(jl).eq.3.and.jk.eq.kcbot(jl)) then + zmfmax=(paph(jl,jk)-paph(jl,jk-1))*zcons2 + if(pmfub(jl).gt.zmfmax) then + zfac=zmfmax/pmfub(jl) + pmfu(jl,jk+1)=pmfu(jl,jk+1)*zfac + pmfus(jl,jk+1)=pmfus(jl,jk+1)*zfac + pmfuq(jl,jk+1)=pmfuq(jl,jk+1)*zfac + zmfuu(jl)=zmfuu(jl)*zfac + zmfuv(jl)=zmfuv(jl)*zfac + pmfub(jl)=zmfmax + end if + end if + end do + + if(is.eq.0) cycle +! +!* specify entrainment rates in *cuentr_new* ! ------------------------------------- - IK=JK - CALL CUENTR_NEW & - (KLON, KLEV, KLEVP1, IK, PTENH,& - PAPH, PAP, PGEOH, KLWMIN, LDCUM,& - KTYPE, KCBOT, KCTOP0, ZPBASE, PMFU, & - PENTR, ZDMFEN, ZDMFDE, ZODETR, KHMIN) -! -! DO ADIABATIC ASCENT FOR ENTRAINING/DETRAINING PLUME + ik=jk + call cuentr_new & + (klon, klev, klevp1, ik, ptenh,& + paph, pap, pgeoh, klwmin, ldcum,& + ktype, kcbot, kctop0, zpbase, pmfu, & + pentr, zdmfen, zdmfde, zodetr, khmin) +! +! do adiabatic ascent for entraining/detraining plume ! ------------------------------------------------------- -! Do adiabatic ascent for entraining/detraining plume +! do adiabatic ascent for entraining/detraining plume ! the cloud ensemble entrains environmental values ! in turbulent detrainment cloud ensemble values are detrained ! in organized detrainment the dry static energy and ! moisture that are neutral compared to the ! environmental air are detrained ! - DO 420 JL=1,KLON - IF(LOFLAG(JL)) THEN - IF(JK.LT.KCBOT(JL)) THEN - ZMFTEST=PMFU(JL,JK+1)+ZDMFEN(JL)-ZDMFDE(JL) - ZMFMAX=MIN(ZMFTEST,(PAPH(JL,JK)-PAPH(JL,JK-1))*ZCONS2) - ZDMFEN(JL)=MAX(ZDMFEN(JL)-MAX(ZMFTEST-ZMFMAX,0.),0.) - END IF - ZDMFDE(JL)=MIN(ZDMFDE(JL),0.75*PMFU(JL,JK+1)) - PMFU(JL,JK)=PMFU(JL,JK+1)+ZDMFEN(JL)-ZDMFDE(JL) - IF (JK.LT.kcbot(jl)) THEN + do jl=1,klon + if(loflag(jl)) then + if(jk.lt.kcbot(jl)) then + zmftest=pmfu(jl,jk+1)+zdmfen(jl)-zdmfde(jl) + zmfmax=min(zmftest,(paph(jl,jk)-paph(jl,jk-1))*zcons2) + zdmfen(jl)=max(zdmfen(jl)-max(zmftest-zmfmax,0.),0.) + end if + zdmfde(jl)=min(zdmfde(jl),0.75*pmfu(jl,jk+1)) + pmfu(jl,jk)=pmfu(jl,jk+1)+zdmfen(jl)-zdmfde(jl) + if (jk.lt.kcbot(jl)) then zdprho = (pgeoh(jl,jk)-pgeoh(jl,jk+1))*zrg zoentr(jl,jk) = zoentr(jl,jk)*zdprho*pmfu(jl,jk+1) zmftest = pmfu(jl,jk) + zoentr(jl,jk)-zodetr(jl,jk) - zmfmax = MIN(zmftest,(paph(jl,jk)-paph(jl,jk-1))*zcons2) - zoentr(jl,jk) = MAX(zoentr(jl,jk)-MAX(zmftest-zmfmax,0.),0.) - END IF + zmfmax = min(zmftest,(paph(jl,jk)-paph(jl,jk-1))*zcons2) + zoentr(jl,jk) = max(zoentr(jl,jk)-max(zmftest-zmfmax,0.),0.) + end if ! ! limit organized detrainment to not allowing for too deep clouds ! - IF (ktype(jl).EQ.1.AND.jk.LT.kcbot(jl).AND.jk.LE.khmin(jl)) THEN + if (ktype(jl).eq.1.and.jk.lt.kcbot(jl).and.jk.le.khmin(jl)) then zmse = cpd*ptu(jl,jk+1) + alv*pqu(jl,jk+1) + pgeoh(jl,jk+1) ikt = kctop0(jl) znevn=(pgeoh(jl,ikt)-pgeoh(jl,jk+1))*(zmse-phhatt(jl, & jk+1))*zrg - IF (znevn.LE.0.) znevn = 1. + if (znevn.le.0.) znevn = 1. zdprho = (pgeoh(jl,jk)-pgeoh(jl,jk+1))*zrg zodmax = ((phcbase(jl)-zmse)/znevn)*zdprho*pmfu(jl,jk+1) - zodmax = MAX(zodmax,0.) - zodetr(jl,jk) = MIN(zodetr(jl,jk),zodmax) - END IF - zodetr(jl,jk) = MIN(zodetr(jl,jk),0.75*pmfu(jl,jk)) + zodmax = max(zodmax,0.) + zodetr(jl,jk) = min(zodetr(jl,jk),zodmax) + end if + zodetr(jl,jk) = min(zodetr(jl,jk),0.75*pmfu(jl,jk)) pmfu(jl,jk) = pmfu(jl,jk) + zoentr(jl,jk) - zodetr(jl,jk) - ZQEEN=PQENH(JL,JK+1)*ZDMFEN(JL) + zqeen=pqenh(jl,jk+1)*zdmfen(jl) zqeen=zqeen + pqenh(jl,jk+1)*zoentr(jl,jk) - ZSEEN=(CPD*PTENH(JL,JK+1)+PGEOH(JL,JK+1))*ZDMFEN(JL) + zseen=(cpd*ptenh(jl,jk+1)+pgeoh(jl,jk+1))*zdmfen(jl) zseen=zseen+(cpd*ptenh(jl,jk+1)+pgeoh(jl,jk+1))* & zoentr(jl,jk) - ZSCDE=(CPD*PTU(JL,JK+1)+PGEOH(JL,JK+1))*ZDMFDE(JL) + zscde=(cpd*ptu(jl,jk+1)+pgeoh(jl,jk+1))*zdmfde(jl) ! find moist static energy that give nonbuoyant air zga = alv*pqsenh(jl,jk+1)/(rv*(ptenh(jl,jk+1)**2)) zdt = (plu(jl,jk+1)-0.608*(pqsenh(jl,jk+1)-pqenh(jl, & @@ -2250,1313 +1809,1284 @@ SUBROUTINE CUASC_NEW & zmfusk = pmfus(jl,jk+1) + zseen - zscde zmfuqk = pmfuq(jl,jk+1) + zqeen - zqude zmfulk = pmful(jl,jk+1) - plude(jl,jk) - plu(jl,jk) = zmfulk*(1./MAX(cmfcmin,pmfu(jl,jk))) - pqu(jl,jk) = zmfuqk*(1./MAX(cmfcmin,pmfu(jl,jk))) - ptu(jl,jk)=(zmfusk*(1./MAX(cmfcmin,pmfu(jl,jk)))- & + plu(jl,jk) = zmfulk*(1./max(cmfcmin,pmfu(jl,jk))) + pqu(jl,jk) = zmfuqk*(1./max(cmfcmin,pmfu(jl,jk))) + ptu(jl,jk)=(zmfusk*(1./max(cmfcmin,pmfu(jl,jk)))- & pgeoh(jl,jk))*rcpd - ptu(jl,jk) = MAX(100.,ptu(jl,jk)) - ptu(jl,jk) = MIN(400.,ptu(jl,jk)) + ptu(jl,jk) = max(100.,ptu(jl,jk)) + ptu(jl,jk) = min(400.,ptu(jl,jk)) zqold(jl) = pqu(jl,jk) - END IF - 420 CONTINUE -!* DO CORRECTIONS FOR MOIST ASCENT -!* BY ADJUSTING T,Q AND L IN *CUADJTQ* + end if + end do +!* do corrections for moist ascent +!* by adjusting t,q and l in *cuadjtq* !------------------------------------------------ - IK=JK - ICALL=1 -! - CALL CUADJTQ(KLON,KLEV,IK,ZPH,PTU,PQU,LOFLAG,ICALL) -! - DO 440 JL=1,KLON - IF(LOFLAG(JL).AND.PQU(JL,JK).NE.ZQOLD(JL)) THEN - KLAB(JL,JK)=2 - PLU(JL,JK)=PLU(JL,JK)+ZQOLD(JL)-PQU(JL,JK) - ZBUO=PTU(JL,JK)*(1.+VTMPC1*PQU(JL,JK)-PLU(JL,JK))- & - PTENH(JL,JK)*(1.+VTMPC1*PQENH(JL,JK)) - IF(KLAB(JL,JK+1).EQ.1) ZBUO=ZBUO+ZBUO0 - IF(ZBUO.GT.0..AND.PMFU(JL,JK).GT.0.01*PMFUB(JL).AND. & - JK.GE.KCTOP0(JL)) THEN - KCTOP(JL)=JK - LDCUM(JL)=.TRUE. - IF(ZPBASE(JL)-PAPH(JL,JK).GE.ZDNOPRC) THEN - ZPRCON=CPRCON - ELSE - ZPRCON=0. - ENDIF - ZLNEW=PLU(JL,JK)/(1.+ZPRCON*(PGEOH(JL,JK)-PGEOH(JL,JK+1))) - PDMFUP(JL,JK)=MAX(0.,(PLU(JL,JK)-ZLNEW)*PMFU(JL,JK)) - PLU(JL,JK)=ZLNEW - ELSE - KLAB(JL,JK)=0 - PMFU(JL,JK)=0. - END IF - END IF - IF(LOFLAG(JL)) THEN - PMFUL(JL,JK)=PLU(JL,JK)*PMFU(JL,JK) - PMFUS(JL,JK)=(CPD*PTU(JL,JK)+PGEOH(JL,JK))*PMFU(JL,JK) - PMFUQ(JL,JK)=PQU(JL,JK)*PMFU(JL,JK) - END IF - 440 CONTINUE -! - IF(LMFDUDV) THEN -! - DO 460 JL=1,KLON + ik=jk + icall=1 +! + call cuadjtq(klon,klev,ik,zph,ptu,pqu,loflag,icall) +! + do jl=1,klon + if(loflag(jl).and.pqu(jl,jk).ne.zqold(jl)) then + klab(jl,jk)=2 + plu(jl,jk)=plu(jl,jk)+zqold(jl)-pqu(jl,jk) + zbuo=ptu(jl,jk)*(1.+vtmpc1*pqu(jl,jk)-plu(jl,jk))- & + ptenh(jl,jk)*(1.+vtmpc1*pqenh(jl,jk)) + if(klab(jl,jk+1).eq.1) zbuo=zbuo+zbuo0 + if(zbuo.gt.0..and.pmfu(jl,jk).gt.0.01*pmfub(jl).and. & + jk.ge.kctop0(jl)) then + kctop(jl)=jk + ldcum(jl)=.true. + if(zpbase(jl)-paph(jl,jk).ge.zdnoprc) then + zprcon=cprcon + else + zprcon=0. + endif + zlnew=plu(jl,jk)/(1.+zprcon*(pgeoh(jl,jk)-pgeoh(jl,jk+1))) + pdmfup(jl,jk)=max(0.,(plu(jl,jk)-zlnew)*pmfu(jl,jk)) + plu(jl,jk)=zlnew + else + klab(jl,jk)=0 + pmfu(jl,jk)=0. + end if + end if + if(loflag(jl)) then + pmful(jl,jk)=plu(jl,jk)*pmfu(jl,jk) + pmfus(jl,jk)=(cpd*ptu(jl,jk)+pgeoh(jl,jk))*pmfu(jl,jk) + pmfuq(jl,jk)=pqu(jl,jk)*pmfu(jl,jk) + end if + end do +! + if(lmfdudv) then +! + do jl=1,klon zdmfen(jl) = zdmfen(jl) + zoentr(jl,jk) zdmfde(jl) = zdmfde(jl) + zodetr(jl,jk) - IF(LOFLAG(JL)) THEN - IF(KTYPE(JL).EQ.1.OR.KTYPE(JL).EQ.3) THEN - IF(ZDMFEN(JL).LE.1.E-20) THEN - ZZ=3. - ELSE - ZZ=2. - ENDIF - ELSE - IF(ZDMFEN(JL).LE.1.0E-20) THEN - ZZ=1. - ELSE - ZZ=0. - ENDIF - END IF - ZDMFEU=ZDMFEN(JL)+ZZ*ZDMFDE(JL) - ZDMFDU=ZDMFDE(JL)+ZZ*ZDMFDE(JL) - ZDMFDU=MIN(ZDMFDU,0.75*PMFU(JL,JK+1)) - ZMFUU(JL)=ZMFUU(JL)+ & - ZDMFEU*PUEN(JL,JK)-ZDMFDU*PUU(JL,JK+1) - ZMFUV(JL)=ZMFUV(JL)+ & - ZDMFEU*PVEN(JL,JK)-ZDMFDU*PVU(JL,JK+1) - IF(PMFU(JL,JK).GT.0.) THEN - PUU(JL,JK)=ZMFUU(JL)*(1./PMFU(JL,JK)) - PVU(JL,JK)=ZMFUV(JL)*(1./PMFU(JL,JK)) - END IF - END IF - 460 CONTINUE -! - END IF -! -! Compute organized entrainment + if(loflag(jl)) then + if(ktype(jl).eq.1.or.ktype(jl).eq.3) then + if(zdmfen(jl).le.1.e-20) then + zz=3. + else + zz=2. + endif + else + if(zdmfen(jl).le.1.0e-20) then + zz=1. + else + zz=0. + endif + end if + zdmfeu=zdmfen(jl)+zz*zdmfde(jl) + zdmfdu=zdmfde(jl)+zz*zdmfde(jl) + zdmfdu=min(zdmfdu,0.75*pmfu(jl,jk+1)) + zmfuu(jl)=zmfuu(jl)+ & + zdmfeu*puen(jl,jk)-zdmfdu*puu(jl,jk+1) + zmfuv(jl)=zmfuv(jl)+ & + zdmfeu*pven(jl,jk)-zdmfdu*pvu(jl,jk+1) + if(pmfu(jl,jk).gt.0.) then + puu(jl,jk)=zmfuu(jl)*(1./pmfu(jl,jk)) + pvu(jl,jk)=zmfuv(jl)*(1./pmfu(jl,jk)) + end if + end if + end do +! + end if +! +! compute organized entrainment ! for use at next level ! - DO 470 jl = 1, klon - IF (loflag(jl).AND.ktype(jl).EQ.1) THEN -! old scheme - if(orgen .eq. 1 ) then + do jl = 1, klon + if (loflag(jl).and.ktype(jl).eq.1) then zbuoyz=g*((ptu(jl,jk)-ptenh(jl,jk))/ptenh(jl,jk)+ & 0.608*(pqu(jl,jk)-pqenh(jl,jk))-plu(jl,jk)) - zbuoyz = MAX(zbuoyz,0.0) + zbuoyz = max(zbuoyz,0.0) zdz = (pgeo(jl,jk-1)-pgeo(jl,jk))*zrg - zdrodz = -LOG(pten(jl,jk-1)/pten(jl,jk))/zdz - & + zdrodz = -log(pten(jl,jk-1)/pten(jl,jk))/zdz - & g/(rd*ptenh(jl,jk)) zbuoy(jl) = zbuoy(jl) + zbuoyz*zdz zoentr(jl,jk-1) = zbuoyz*0.5/(1.+zbuoy(jl))+zdrodz - zoentr(jl,jk-1) = MIN(zoentr(jl,jk-1),1.E-3) - zoentr(jl,jk-1) = MAX(zoentr(jl,jk-1),0.) - else if(orgen .eq. 2 ) then -! Let's define the fscale - tt(jl) = ptenh(jl,jk-1) - zqsat(jl) = TLUCUA(tt(jl))/paph(jl,jk-1) - zqsat(jl) = zqsat(jl)/(1.-VTMPC1*zqsat(jl)) - ttb(jl) = ptenh(jl,kcbot(jl)) - zqsatb(jl) = TLUCUA(ttb(jl))/paph(jl,kcbot(jl)) - zqsatb(jl) = zqsatb(jl)/(1.-VTMPC1*zqsatb(jl)) - fscale(jl) = (zqsat(jl)/zqsatb(jl))**3 -! end of defining the fscale - zoentr(jl,jk-1) = 1.E-3*(1.3-PQEN(jl,jk-1)/PQSEN(jl,jk-1))*fscale(jl) - zoentr(jl,jk-1) = MIN(zoentr(jl,jk-1),1.E-3) - zoentr(jl,jk-1) = MAX(zoentr(jl,jk-1),0.) -! write(6,*) "zoentr=",zoentr(jl,jk-1) + zoentr(jl,jk-1) = min(zoentr(jl,jk-1),1.e-3) + zoentr(jl,jk-1) = max(zoentr(jl,jk-1),0.) end if - END IF - 470 CONTINUE + end do ! - 480 CONTINUE + end do ! end outer cycle ! ----------------------------------------------------------------- -! 5. DETERMINE CONVECTIVE FLUXES ABOVE NON-BUOYANCY LEVEL +! 5. determine convective fluxes above non-buoyancy level ! ----------------------------------------------------------------- -! (NOTE: CLOUD VARIABLES LIKE T,Q AND L ARE NOT -! AFFECTED BY DETRAINMENT AND ARE ALREADY KNOWN -! FROM PREVIOUS CALCULATIONS ABOVE) - 500 CONTINUE - DO 510 JL=1,KLON - IF(KCTOP(JL).EQ.KLEVM1) LDCUM(JL)=.FALSE. - KCBOT(JL)=MAX(KCBOT(JL),KCTOP(JL)) - 510 CONTINUE - IS=0 - DO 520 JL=1,KLON - IF(LDCUM(JL)) THEN - IS=IS+1 - ENDIF - 520 CONTINUE - KCUM=IS - IF(IS.EQ.0) GO TO 800 - DO 530 JL=1,KLON - IF(LDCUM(JL)) THEN - JK=KCTOP(JL)-1 - ZZDMF=CMFCTOP - ZDMFDE(JL)=(1.-ZZDMF)*PMFU(JL,JK+1) - PLUDE(JL,JK)=ZDMFDE(JL)*PLU(JL,JK+1) - PMFU(JL,JK)=PMFU(JL,JK+1)-ZDMFDE(JL) - PMFUS(JL,JK)=(CPD*PTU(JL,JK)+PGEOH(JL,JK))*PMFU(JL,JK) - PMFUQ(JL,JK)=PQU(JL,JK)*PMFU(JL,JK) - PMFUL(JL,JK)=PLU(JL,JK)*PMFU(JL,JK) - PLUDE(JL,JK-1)=PMFUL(JL,JK) - PDMFUP(JL,JK)=0. - END IF - 530 CONTINUE - IF(LMFDUDV) THEN - DO 540 JL=1,KLON - IF(LDCUM(JL)) THEN - JK=KCTOP(JL)-1 - PUU(JL,JK)=PUU(JL,JK+1) - PVU(JL,JK)=PVU(JL,JK+1) - END IF - 540 CONTINUE - END IF - 800 CONTINUE - RETURN - END SUBROUTINE CUASC_NEW +! (note: cloud variables like t,q and l are not +! affected by detrainment and are already known +! from previous calculations above) + do jl=1,klon + if(kctop(jl).eq.klevm1) ldcum(jl)=.false. + kcbot(jl)=max(kcbot(jl),kctop(jl)) + end do + + is=0 + do jl=1,klon + if(ldcum(jl)) then + is=is+1 + endif + end do + kcum=is + if(is.eq.0) return + do jl=1,klon + if(ldcum(jl)) then + jk=kctop(jl)-1 + zzdmf=cmfctop + zdmfde(jl)=(1.-zzdmf)*pmfu(jl,jk+1) + plude(jl,jk)=zdmfde(jl)*plu(jl,jk+1) + pmfu(jl,jk)=pmfu(jl,jk+1)-zdmfde(jl) + pmfus(jl,jk)=(cpd*ptu(jl,jk)+pgeoh(jl,jk))*pmfu(jl,jk) + pmfuq(jl,jk)=pqu(jl,jk)*pmfu(jl,jk) + pmful(jl,jk)=plu(jl,jk)*pmfu(jl,jk) + plude(jl,jk-1)=pmful(jl,jk) + pdmfup(jl,jk)=0. + end if + end do + + if(lmfdudv) then + do jl=1,klon + if(ldcum(jl)) then + jk=kctop(jl)-1 + puu(jl,jk)=puu(jl,jk+1) + pvu(jl,jk)=pvu(jl,jk+1) + end if + end do + end if + return + end subroutine cuasc_new ! !********************************************** -! SUBROUTINE CUDLFS +! subroutine cudlfs !********************************************** - SUBROUTINE CUDLFS & - (KLON, KLEV, KLEVP1, PTENH, PQENH, & - PUEN, PVEN, PGEOH, PAPH, PTU, & - PQU, PUU, PVU, LDCUM, KCBOT, & - KCTOP, PMFUB, PRFL, PTD, PQD, & - PUD, PVD, PMFD, PMFDS, PMFDQ, & - PDMFDP, KDTOP, LDDRAF) -! THIS ROUTINE CALCULATES LEVEL OF FREE SINKING FOR -! CUMULUS DOWNDRAFTS AND SPECIFIES T,Q,U AND V VALUES -! M.TIEDTKE E.C.M.W.F. 12/86 MODIF. 12/89 -!***PURPOSE. + subroutine cudlfs & + (klon, klev, klevp1, ptenh, pqenh, & + puen, pven, pgeoh, paph, ptu, & + pqu, puu, pvu, ldcum, kcbot, & + kctop, pmfub, prfl, ptd, pqd, & + pud, pvd, pmfd, pmfds, pmfdq, & + pdmfdp, kdtop, lddraf) +! this routine calculates level of free sinking for +! cumulus downdrafts and specifies t,q,u and v values +! m.tiedtke e.c.m.w.f. 12/86 modif. 12/89 +!***purpose. ! -------- -! TO PRODUCE LFS-VALUES FOR CUMULUS DOWNDRAFTS -! FOR MASSFLUX CUMULUS PARAMETERIZATION -!***INTERFACE +! to produce lfs-values for cumulus downdrafts +! for massflux cumulus parameterization +!***interface ! --------- -! THIS ROUTINE IS CALLED FROM *CUMASTR*. -! INPUT ARE ENVIRONMENTAL VALUES OF T,Q,U,V,P,PHI -! AND UPDRAFT VALUES T,Q,U AND V AND ALSO -! CLOUD BASE MASSFLUX AND CU-PRECIPITATION RATE. -! IT RETURNS T,Q,U AND V VALUES AND MASSFLUX AT LFS. -!***METHOD. +! this routine is called from *cumastr*. +! input are environmental values of t,q,u,v,p,phi +! and updraft values t,q,u and v and also +! cloud base massflux and cu-precipitation rate. +! it returns t,q,u and v values and massflux at lfs. +!***method. ! -------- -! CHECK FOR NEGATIVE BUOYANCY OF AIR OF EQUAL PARTS OF -! MOIST ENVIRONMENTAL AIR AND CLOUD AIR. -!***EXTERNALS +! check for negative buoyancy of air of equal parts of +! moist environmental air and cloud air. +!***externals ! --------- -! *CUADJTQ* FOR CALCULATING WET BULB T AND Q AT LFS +! *cuadjtq* for calculating wet bulb t and q at lfs ! ---------------------------------------------------------------- !------------------------------------------------------------------- - IMPLICIT NONE + implicit none !------------------------------------------------------------------- - INTEGER KLON, KLEV, KLEVP1 - INTEGER JL,KE,JK,IS,IK,ICALL - REAL ZTTEST, ZQTEST, ZBUO, ZMFTOP - REAL PTENH(KLON,KLEV), PQENH(KLON,KLEV), & - PUEN(KLON,KLEV), PVEN(KLON,KLEV), & - PGEOH(KLON,KLEV), PAPH(KLON,KLEVP1), & - PTU(KLON,KLEV), PQU(KLON,KLEV), & - PUU(KLON,KLEV), PVU(KLON,KLEV), & - PMFUB(KLON), PRFL(KLON) - REAL PTD(KLON,KLEV), PQD(KLON,KLEV), & - PUD(KLON,KLEV), PVD(KLON,KLEV), & - PMFD(KLON,KLEV), PMFDS(KLON,KLEV), & - PMFDQ(KLON,KLEV), PDMFDP(KLON,KLEV) - REAL ZTENWB(KLON,KLEV), ZQENWB(KLON,KLEV), & - ZCOND(KLON), ZPH(KLON) - INTEGER KCBOT(KLON), KCTOP(KLON), & - KDTOP(KLON) - LOGICAL LDCUM(KLON), LLo2(KLON), & - LDDRAF(KLON) + integer klon, klev, klevp1 + integer jl,ke,jk,is,ik,icall + real zttest, zqtest, zbuo, zmftop + real ptenh(klon,klev), pqenh(klon,klev), & + puen(klon,klev), pven(klon,klev), & + pgeoh(klon,klev), paph(klon,klevp1), & + ptu(klon,klev), pqu(klon,klev), & + puu(klon,klev), pvu(klon,klev), & + pmfub(klon), prfl(klon) + real ptd(klon,klev), pqd(klon,klev), & + pud(klon,klev), pvd(klon,klev), & + pmfd(klon,klev), pmfds(klon,klev), & + pmfdq(klon,klev), pdmfdp(klon,klev) + real ztenwb(klon,klev), zqenwb(klon,klev), & + zcond(klon), zph(klon) + integer kcbot(klon), kctop(klon), & + kdtop(klon) + logical ldcum(klon), llo2(klon), & + lddraf(klon) !----------------------------------------------- -! 1. SET DEFAULT VALUES FOR DOWNDRAFTS +! 1. set default values for downdrafts !----------------------------------------------- - 100 CONTINUE - DO 110 JL=1,KLON - LDDRAF(JL)=.FALSE. - KDTOP(JL)=KLEVP1 - 110 CONTINUE - IF(.NOT.LMFDD) GO TO 300 + do jl=1,klon + lddraf(jl)=.false. + kdtop(jl)=klevp1 + end do + if(.not.lmfdd) return !------------------------------------------------------------ -! 2. DETERMINE LEVEL OF FREE SINKING BY -! DOING A SCAN FROM TOP TO BASE OF CUMULUS CLOUDS -! FOR EVERY POINT AND PROCEED AS FOLLOWS: -! (1) DETEMINE WET BULB ENVIRONMENTAL T AND Q -! (2) DO MIXING WITH CUMULUS CLOUD AIR -! (3) CHECK FOR NEGATIVE BUOYANCY -! THE ASSUMPTION IS THAT AIR OF DOWNDRAFTS IS MIXTURE -! OF 50% CLOUD AIR + 50% ENVIRONMENTAL AIR AT WET BULB -! TEMPERATURE (I.E. WHICH BECAME SATURATED DUE TO -! EVAPORATION OF RAIN AND CLOUD WATER) +! 2. determine level of free sinking by +! doing a scan from top to base of cumulus clouds +! for every point and proceed as follows: +! (1) detemine wet bulb environmental t and q +! (2) do mixing with cumulus cloud air +! (3) check for negative buoyancy +! the assumption is that air of downdrafts is mixture +! of 50% cloud air + 50% environmental air at wet bulb +! temperature (i.e. which became saturated due to +! evaporation of rain and cloud water) !------------------------------------------------------------------ - 200 CONTINUE - KE=KLEV-3 - DO 290 JK=3,KE -! 2.1 CALCULATE WET-BULB TEMPERATURE AND MOISTURE -! FOR ENVIRONMENTAL AIR IN *CUADJTQ* + ke=klev-3 + do jk=3,ke +! 2.1 calculate wet-bulb temperature and moisture +! for environmental air in *cuadjtq* ! ----------------------------------------------------- - 210 CONTINUE - IS=0 - DO 212 JL=1,KLON - ZTENWB(JL,JK)=PTENH(JL,JK) - ZQENWB(JL,JK)=PQENH(JL,JK) - ZPH(JL)=PAPH(JL,JK) - LLO2(JL)=LDCUM(JL).AND.PRFL(JL).GT.0..AND..NOT.LDDRAF(JL).AND. & - (JK.LT.KCBOT(JL).AND.JK.GT.KCTOP(JL)) - IF(LLO2(JL))THEN - IS=IS+1 - ENDIF - 212 CONTINUE - IF(IS.EQ.0) GO TO 290 - IK=JK - ICALL=2 - CALL CUADJTQ(KLON,KLEV,IK,ZPH,ZTENWB,ZQENWB,LLO2,ICALL) -! 2.2 DO MIXING OF CUMULUS AND ENVIRONMENTAL AIR -! AND CHECK FOR NEGATIVE BUOYANCY. -! THEN SET VALUES FOR DOWNDRAFT AT LFS. + is=0 + do jl=1,klon + ztenwb(jl,jk)=ptenh(jl,jk) + zqenwb(jl,jk)=pqenh(jl,jk) + zph(jl)=paph(jl,jk) + llo2(jl)=ldcum(jl).and.prfl(jl).gt.0..and..not.lddraf(jl).and. & + (jk.lt.kcbot(jl).and.jk.gt.kctop(jl)) + if(llo2(jl))then + is=is+1 + endif + end do + + if(is.eq.0) cycle + ik=jk + icall=2 + call cuadjtq(klon,klev,ik,zph,ztenwb,zqenwb,llo2,icall) +! 2.2 do mixing of cumulus and environmental air +! and check for negative buoyancy. +! then set values for downdraft at lfs. ! ----------------------------------------------------- - 220 CONTINUE - DO 222 JL=1,KLON - IF(LLO2(JL)) THEN - ZTTEST=0.5*(PTU(JL,JK)+ZTENWB(JL,JK)) - ZQTEST=0.5*(PQU(JL,JK)+ZQENWB(JL,JK)) - ZBUO=ZTTEST*(1.+VTMPC1*ZQTEST)- & - PTENH(JL,JK)*(1.+VTMPC1*PQENH(JL,JK)) - ZCOND(JL)=PQENH(JL,JK)-ZQENWB(JL,JK) - ZMFTOP=-CMFDEPS*PMFUB(JL) - IF(ZBUO.LT.0..AND.PRFL(JL).GT.10.*ZMFTOP*ZCOND(JL)) THEN - KDTOP(JL)=JK - LDDRAF(JL)=.TRUE. - PTD(JL,JK)=ZTTEST - PQD(JL,JK)=ZQTEST - PMFD(JL,JK)=ZMFTOP - PMFDS(JL,JK)=PMFD(JL,JK)*(CPD*PTD(JL,JK)+PGEOH(JL,JK)) - PMFDQ(JL,JK)=PMFD(JL,JK)*PQD(JL,JK) - PDMFDP(JL,JK-1)=-0.5*PMFD(JL,JK)*ZCOND(JL) - PRFL(JL)=PRFL(JL)+PDMFDP(JL,JK-1) - END IF - END IF - 222 CONTINUE - IF(LMFDUDV) THEN - DO 224 JL=1,KLON - IF(PMFD(JL,JK).LT.0.) THEN - PUD(JL,JK)=0.5*(PUU(JL,JK)+PUEN(JL,JK-1)) - PVD(JL,JK)=0.5*(PVU(JL,JK)+PVEN(JL,JK-1)) - END IF - 224 CONTINUE - END IF - 290 CONTINUE - 300 CONTINUE - RETURN - END SUBROUTINE CUDLFS + do jl=1,klon + if(llo2(jl)) then + zttest=0.5*(ptu(jl,jk)+ztenwb(jl,jk)) + zqtest=0.5*(pqu(jl,jk)+zqenwb(jl,jk)) + zbuo=zttest*(1.+vtmpc1*zqtest)- & + ptenh(jl,jk)*(1.+vtmpc1*pqenh(jl,jk)) + zcond(jl)=pqenh(jl,jk)-zqenwb(jl,jk) + zmftop=-cmfdeps*pmfub(jl) + if(zbuo.lt.0..and.prfl(jl).gt.10.*zmftop*zcond(jl)) then + kdtop(jl)=jk + lddraf(jl)=.true. + ptd(jl,jk)=zttest + pqd(jl,jk)=zqtest + pmfd(jl,jk)=zmftop + pmfds(jl,jk)=pmfd(jl,jk)*(cpd*ptd(jl,jk)+pgeoh(jl,jk)) + pmfdq(jl,jk)=pmfd(jl,jk)*pqd(jl,jk) + pdmfdp(jl,jk-1)=-0.5*pmfd(jl,jk)*zcond(jl) + prfl(jl)=prfl(jl)+pdmfdp(jl,jk-1) + end if + end if + end do + + if(lmfdudv) then + do jl=1,klon + if(pmfd(jl,jk).lt.0.) then + pud(jl,jk)=0.5*(puu(jl,jk)+puen(jl,jk-1)) + pvd(jl,jk)=0.5*(pvu(jl,jk)+pven(jl,jk-1)) + end if + end do + end if + + end do + return + end subroutine cudlfs ! !********************************************** -! SUBROUTINE CUDDRAF +! subroutine cuddraf !********************************************** - SUBROUTINE CUDDRAF & - (KLON, KLEV, KLEVP1, PTENH, PQENH, & - PUEN, PVEN, PGEOH, PAPH, PRFL, & - LDDRAF, PTD, PQD, PUD, PVD, & - PMFD, PMFDS, PMFDQ, PDMFDP) -! THIS ROUTINE CALCULATES CUMULUS DOWNDRAFT DESCENT -! M.TIEDTKE E.C.M.W.F. 12/86 MODIF. 12/89 -!***PURPOSE. + subroutine cuddraf & + (klon, klev, klevp1, ptenh, pqenh, & + puen, pven, pgeoh, paph, prfl, & + lddraf, ptd, pqd, pud, pvd, & + pmfd, pmfds, pmfdq, pdmfdp) +! this routine calculates cumulus downdraft descent +! m.tiedtke e.c.m.w.f. 12/86 modif. 12/89 +!***purpose. ! -------- -! TO PRODUCE THE VERTICAL PROFILES FOR CUMULUS DOWNDRAFTS -! (I.E. T,Q,U AND V AND FLUXES) -!***INTERFACE +! to produce the vertical profiles for cumulus downdrafts +! (i.e. t,q,u and v and fluxes) +!***interface ! --------- -! THIS ROUTINE IS CALLED FROM *CUMASTR*. -! INPUT IS T,Q,P,PHI,U,V AT HALF LEVELS. -! IT RETURNS FLUXES OF S,Q AND EVAPORATION RATE -! AND U,V AT LEVELS WHERE DOWNDRAFT OCCURS -!***METHOD. +! this routine is called from *cumastr*. +! input is t,q,p,phi,u,v at half levels. +! it returns fluxes of s,q and evaporation rate +! and u,v at levels where downdraft occurs +!***method. ! -------- -! CALCULATE MOIST DESCENT FOR ENTRAINING/DETRAINING PLUME BY -! A) MOVING AIR DRY-ADIABATICALLY TO NEXT LEVEL BELOW AND -! B) CORRECTING FOR EVAPORATION TO OBTAIN SATURATED STATE. -!***EXTERNALS +! calculate moist descent for entraining/detraining plume by +! a) moving air dry-adiabatically to next level below and +! b) correcting for evaporation to obtain saturated state. +!***externals ! --------- -! *CUADJTQ* FOR ADJUSTING T AND Q DUE TO EVAPORATION IN -! SATURATED DESCENT -!***REFERENCE +! *cuadjtq* for adjusting t and q due to evaporation in +! saturated descent +!***reference ! --------- -! (TIEDTKE,1989) +! (tiedtke,1989) ! ---------------------------------------------------------------- !------------------------------------------------------------------- - IMPLICIT NONE + implicit none !------------------------------------------------------------------- - INTEGER KLON, KLEV, KLEVP1 - INTEGER JK,IS,JL,ITOPDE, IK, ICALL - REAL ZENTR,ZSEEN, ZQEEN, ZSDDE, ZQDDE,ZMFDSK, ZMFDQK - REAL ZBUO, ZDMFDP, ZMFDUK, ZMFDVK - REAL PTENH(KLON,KLEV), PQENH(KLON,KLEV), & - PUEN(KLON,KLEV), PVEN(KLON,KLEV), & - PGEOH(KLON,KLEV), PAPH(KLON,KLEVP1) - REAL PTD(KLON,KLEV), PQD(KLON,KLEV), & - PUD(KLON,KLEV), PVD(KLON,KLEV), & - PMFD(KLON,KLEV), PMFDS(KLON,KLEV), & - PMFDQ(KLON,KLEV), PDMFDP(KLON,KLEV), & - PRFL(KLON) - REAL ZDMFEN(KLON), ZDMFDE(KLON), & - ZCOND(KLON), ZPH(KLON) - LOGICAL LDDRAF(KLON), LLO2(KLON) + integer klon, klev, klevp1 + integer jk,is,jl,itopde, ik, icall + real zentr,zseen, zqeen, zsdde, zqdde,zmfdsk, zmfdqk + real zbuo, zdmfdp, zmfduk, zmfdvk + real ptenh(klon,klev), pqenh(klon,klev), & + puen(klon,klev), pven(klon,klev), & + pgeoh(klon,klev), paph(klon,klevp1) + real ptd(klon,klev), pqd(klon,klev), & + pud(klon,klev), pvd(klon,klev), & + pmfd(klon,klev), pmfds(klon,klev), & + pmfdq(klon,klev), pdmfdp(klon,klev), & + prfl(klon) + real zdmfen(klon), zdmfde(klon), & + zcond(klon), zph(klon) + logical lddraf(klon), llo2(klon) !-------------------------------------------------------------- -! 1. CALCULATE MOIST DESCENT FOR CUMULUS DOWNDRAFT BY -! (A) CALCULATING ENTRAINMENT RATES, ASSUMING -! LINEAR DECREASE OF MASSFLUX IN PBL -! (B) DOING MOIST DESCENT - EVAPORATIVE COOLING -! AND MOISTENING IS CALCULATED IN *CUADJTQ* -! (C) CHECKING FOR NEGATIVE BUOYANCY AND -! SPECIFYING FINAL T,Q,U,V AND DOWNWARD FLUXES +! 1. calculate moist descent for cumulus downdraft by +! (a) calculating entrainment rates, assuming +! linear decrease of massflux in pbl +! (b) doing moist descent - evaporative cooling +! and moistening is calculated in *cuadjtq* +! (c) checking for negative buoyancy and +! specifying final t,q,u,v and downward fluxes ! ---------------------------------------------------------------- - 100 CONTINUE - DO 180 JK=3,KLEV - IS=0 - DO 110 JL=1,KLON - ZPH(JL)=PAPH(JL,JK) - LLO2(JL)=LDDRAF(JL).AND.PMFD(JL,JK-1).LT.0. - IF(LLO2(JL)) THEN - IS=IS+1 - ENDIF - 110 CONTINUE - IF(IS.EQ.0) GO TO 180 - DO 122 JL=1,KLON - IF(LLO2(JL)) THEN - ZENTR=ENTRDD*PMFD(JL,JK-1)*RD*PTENH(JL,JK-1)/ & - (G*PAPH(JL,JK-1))*(PAPH(JL,JK)-PAPH(JL,JK-1)) - ZDMFEN(JL)=ZENTR - ZDMFDE(JL)=ZENTR - END IF - 122 CONTINUE - ITOPDE=KLEV-2 - IF(JK.GT.ITOPDE) THEN - DO 124 JL=1,KLON - IF(LLO2(JL)) THEN - ZDMFEN(JL)=0. - ZDMFDE(JL)=PMFD(JL,ITOPDE)* & - (PAPH(JL,JK)-PAPH(JL,JK-1))/ & - (PAPH(JL,KLEVP1)-PAPH(JL,ITOPDE)) - END IF - 124 CONTINUE - END IF - DO 126 JL=1,KLON - IF(LLO2(JL)) THEN - PMFD(JL,JK)=PMFD(JL,JK-1)+ZDMFEN(JL)-ZDMFDE(JL) - ZSEEN=(CPD*PTENH(JL,JK-1)+PGEOH(JL,JK-1))*ZDMFEN(JL) - ZQEEN=PQENH(JL,JK-1)*ZDMFEN(JL) - ZSDDE=(CPD*PTD(JL,JK-1)+PGEOH(JL,JK-1))*ZDMFDE(JL) - ZQDDE=PQD(JL,JK-1)*ZDMFDE(JL) - ZMFDSK=PMFDS(JL,JK-1)+ZSEEN-ZSDDE - ZMFDQK=PMFDQ(JL,JK-1)+ZQEEN-ZQDDE - PQD(JL,JK)=ZMFDQK*(1./MIN(-CMFCMIN,PMFD(JL,JK))) - PTD(JL,JK)=(ZMFDSK*(1./MIN(-CMFCMIN,PMFD(JL,JK)))- & - PGEOH(JL,JK))*RCPD - PTD(JL,JK)=MIN(400.,PTD(JL,JK)) - PTD(JL,JK)=MAX(100.,PTD(JL,JK)) - ZCOND(JL)=PQD(JL,JK) - END IF - 126 CONTINUE - IK=JK - ICALL=2 - CALL CUADJTQ(KLON,KLEV,IK,ZPH,PTD,PQD,LLO2,ICALL) - DO 150 JL=1,KLON - IF(LLO2(JL)) THEN - ZCOND(JL)=ZCOND(JL)-PQD(JL,JK) - ZBUO=PTD(JL,JK)*(1.+VTMPC1*PQD(JL,JK))- & - PTENH(JL,JK)*(1.+VTMPC1*PQENH(JL,JK)) - IF(ZBUO.GE.0..OR.PRFL(JL).LE.(PMFD(JL,JK)*ZCOND(JL))) THEN - PMFD(JL,JK)=0. - ENDIF - PMFDS(JL,JK)=(CPD*PTD(JL,JK)+PGEOH(JL,JK))*PMFD(JL,JK) - PMFDQ(JL,JK)=PQD(JL,JK)*PMFD(JL,JK) - ZDMFDP=-PMFD(JL,JK)*ZCOND(JL) - PDMFDP(JL,JK-1)=ZDMFDP - PRFL(JL)=PRFL(JL)+ZDMFDP - END IF - 150 CONTINUE - IF(LMFDUDV) THEN - DO 160 JL=1,KLON - IF(LLO2(JL).AND.PMFD(JL,JK).LT.0.) THEN - ZMFDUK=PMFD(JL,JK-1)*PUD(JL,JK-1)+ & - ZDMFEN(JL)*PUEN(JL,JK-1)-ZDMFDE(JL)*PUD(JL,JK-1) - ZMFDVK=PMFD(JL,JK-1)*PVD(JL,JK-1)+ & - ZDMFEN(JL)*PVEN(JL,JK-1)-ZDMFDE(JL)*PVD(JL,JK-1) - PUD(JL,JK)=ZMFDUK*(1./MIN(-CMFCMIN,PMFD(JL,JK))) - PVD(JL,JK)=ZMFDVK*(1./MIN(-CMFCMIN,PMFD(JL,JK))) - END IF - 160 CONTINUE - END IF - 180 CONTINUE - RETURN - END SUBROUTINE CUDDRAF + do jk=3,klev + is=0 + do jl=1,klon + zph(jl)=paph(jl,jk) + llo2(jl)=lddraf(jl).and.pmfd(jl,jk-1).lt.0. + if(llo2(jl)) then + is=is+1 + endif + end do + + if(is.eq.0) cycle + do jl=1,klon + if(llo2(jl)) then + zentr=entrdd*pmfd(jl,jk-1)*rd*ptenh(jl,jk-1)/ & + (g*paph(jl,jk-1))*(paph(jl,jk)-paph(jl,jk-1)) + zdmfen(jl)=zentr + zdmfde(jl)=zentr + end if + end do + + itopde=klev-2 + if(jk.gt.itopde) then + do jl=1,klon + if(llo2(jl)) then + zdmfen(jl)=0. + zdmfde(jl)=pmfd(jl,itopde)* & + (paph(jl,jk)-paph(jl,jk-1))/ & + (paph(jl,klevp1)-paph(jl,itopde)) + end if + end do + end if + + do jl=1,klon + if(llo2(jl)) then + pmfd(jl,jk)=pmfd(jl,jk-1)+zdmfen(jl)-zdmfde(jl) + zseen=(cpd*ptenh(jl,jk-1)+pgeoh(jl,jk-1))*zdmfen(jl) + zqeen=pqenh(jl,jk-1)*zdmfen(jl) + zsdde=(cpd*ptd(jl,jk-1)+pgeoh(jl,jk-1))*zdmfde(jl) + zqdde=pqd(jl,jk-1)*zdmfde(jl) + zmfdsk=pmfds(jl,jk-1)+zseen-zsdde + zmfdqk=pmfdq(jl,jk-1)+zqeen-zqdde + pqd(jl,jk)=zmfdqk*(1./min(-cmfcmin,pmfd(jl,jk))) + ptd(jl,jk)=(zmfdsk*(1./min(-cmfcmin,pmfd(jl,jk)))- & + pgeoh(jl,jk))*rcpd + ptd(jl,jk)=min(400.,ptd(jl,jk)) + ptd(jl,jk)=max(100.,ptd(jl,jk)) + zcond(jl)=pqd(jl,jk) + end if + end do + + ik=jk + icall=2 + call cuadjtq(klon,klev,ik,zph,ptd,pqd,llo2,icall) + do jl=1,klon + if(llo2(jl)) then + zcond(jl)=zcond(jl)-pqd(jl,jk) + zbuo=ptd(jl,jk)*(1.+vtmpc1*pqd(jl,jk))- & + ptenh(jl,jk)*(1.+vtmpc1*pqenh(jl,jk)) + if(zbuo.ge.0..or.prfl(jl).le.(pmfd(jl,jk)*zcond(jl))) then + pmfd(jl,jk)=0. + endif + pmfds(jl,jk)=(cpd*ptd(jl,jk)+pgeoh(jl,jk))*pmfd(jl,jk) + pmfdq(jl,jk)=pqd(jl,jk)*pmfd(jl,jk) + zdmfdp=-pmfd(jl,jk)*zcond(jl) + pdmfdp(jl,jk-1)=zdmfdp + prfl(jl)=prfl(jl)+zdmfdp + end if + end do + + if(lmfdudv) then + do jl=1,klon + if(llo2(jl).and.pmfd(jl,jk).lt.0.) then + zmfduk=pmfd(jl,jk-1)*pud(jl,jk-1)+ & + zdmfen(jl)*puen(jl,jk-1)-zdmfde(jl)*pud(jl,jk-1) + zmfdvk=pmfd(jl,jk-1)*pvd(jl,jk-1)+ & + zdmfen(jl)*pven(jl,jk-1)-zdmfde(jl)*pvd(jl,jk-1) + pud(jl,jk)=zmfduk*(1./min(-cmfcmin,pmfd(jl,jk))) + pvd(jl,jk)=zmfdvk*(1./min(-cmfcmin,pmfd(jl,jk))) + end if + end do + end if + + end do + return + end subroutine cuddraf ! !********************************************** -! SUBROUTINE CUFLX +! subroutine cuflx !********************************************** - SUBROUTINE CUFLX & - (KLON, KLEV, KLEVP1, PQEN, PQSEN, & - PTENH, PQENH, PAPH, PGEOH, KCBOT, & - KCTOP, KDTOP, KTYPE, LDDRAF, LDCUM, & - PMFU, PMFD, PMFUS, PMFDS, PMFUQ, & - PMFDQ, PMFUL, PLUDE, PDMFUP, PDMFDP, & - PRFL, PRAIN, PTEN, PSFL, PDPMEL, & - KTOPM2, ZTMST, sig1) -! M.TIEDTKE E.C.M.W.F. 7/86 MODIF. 12/89 -!***PURPOSE + subroutine cuflx & + (klon, klev, klevp1, pqen, pqsen, & + ptenh, pqenh, paph, pgeoh, kcbot, & + kctop, kdtop, ktype, lddraf, ldcum, & + pmfu, pmfd, pmfus, pmfds, pmfuq, & + pmfdq, pmful, plude, pdmfup, pdmfdp, & + prfl, prain, pten, psfl, pdpmel, & + ktopm2, ztmst, sig1) +! m.tiedtke e.c.m.w.f. 7/86 modif. 12/89 +!***purpose ! ------- -! THIS ROUTINE DOES THE FINAL CALCULATION OF CONVECTIVE -! FLUXES IN THE CLOUD LAYER AND IN THE SUBCLOUD LAYER -!***INTERFACE +! this routine does the final calculation of convective +! fluxes in the cloud layer and in the subcloud layer +!***interface ! --------- -! THIS ROUTINE IS CALLED FROM *CUMASTR*. -!***EXTERNALS +! this routine is called from *cumastr*. +!***externals ! --------- -! NONE +! none ! ---------------------------------------------------------------- !------------------------------------------------------------------- - IMPLICIT NONE + implicit none !------------------------------------------------------------------- - INTEGER KLON, KLEV, KLEVP1 - INTEGER KTOPM2, ITOP, JL, JK, IKB - REAL ZTMST, ZCONS1, ZCONS2, ZCUCOV, ZTMELP2 - REAL ZZP, ZFAC, ZSNMLT, ZRFL, CEVAPCU, ZRNEW - REAL ZRMIN, ZRFLN, ZDRFL, ZDPEVAP - REAL PQEN(KLON,KLEV), PQSEN(KLON,KLEV), & - PTENH(KLON,KLEV), PQENH(KLON,KLEV), & - PAPH(KLON,KLEVP1), PGEOH(KLON,KLEV) - REAL PMFU(KLON,KLEV), PMFD(KLON,KLEV), & - PMFUS(KLON,KLEV), PMFDS(KLON,KLEV), & - PMFUQ(KLON,KLEV), PMFDQ(KLON,KLEV), & - PDMFUP(KLON,KLEV), PDMFDP(KLON,KLEV), & - PMFUL(KLON,KLEV), PLUDE(KLON,KLEV), & - PRFL(KLON), PRAIN(KLON) - REAL PTEN(KLON,KLEV), PDPMEL(KLON,KLEV), & - PSFL(KLON), ZPSUBCL(KLON) + integer klon, klev, klevp1 + integer ktopm2, itop, jl, jk, ikb + real ztmst, zcons1, zcons2, zcucov, ztmelp2 + real zzp, zfac, zsnmlt, zrfl, cevapcu, zrnew + real zrmin, zrfln, zdrfl, zdpevap + real pqen(klon,klev), pqsen(klon,klev), & + ptenh(klon,klev), pqenh(klon,klev), & + paph(klon,klevp1), pgeoh(klon,klev) + real pmfu(klon,klev), pmfd(klon,klev), & + pmfus(klon,klev), pmfds(klon,klev), & + pmfuq(klon,klev), pmfdq(klon,klev), & + pdmfup(klon,klev), pdmfdp(klon,klev), & + pmful(klon,klev), plude(klon,klev), & + prfl(klon), prain(klon) + real pten(klon,klev), pdpmel(klon,klev), & + psfl(klon), zpsubcl(klon) #if defined(mpas) -!MPAS specific (Laura D. Fowler): - REAL sig1(KLON,KLEV) +!mpas specific (Laura D. Fowler/2016-08-18): + real sig1(klon,klev) #else - REAL sig1(KLEV) + real sig1(klev) #endif - INTEGER KCBOT(KLON), KCTOP(KLON), & - KDTOP(KLON), KTYPE(KLON) - LOGICAL LDDRAF(KLON), LDCUM(KLON) -!* SPECIFY CONSTANTS - ZCONS1=CPD/(ALF*G*ZTMST) - ZCONS2=1./(G*ZTMST) - ZCUCOV=0.05 - ZTMELP2=TMELT+2. -!* 1.0 DETERMINE FINAL CONVECTIVE FLUXES + integer kcbot(klon), kctop(klon), & + kdtop(klon), ktype(klon) + logical lddraf(klon), ldcum(klon) +!* specify constants + zcons1=cpd/(alf*g*ztmst) + zcons2=1./(g*ztmst) + zcucov=0.05 + ztmelp2=tmelt+2. +!* 1.0 determine final convective fluxes !--------------------------------------------- - 100 CONTINUE - ITOP=KLEV - DO 110 JL=1,KLON - PRFL(JL)=0. - PSFL(JL)=0. - PRAIN(JL)=0. -! SWITCH OFF SHALLOW CONVECTION - IF(.NOT.LMFSCV.AND.KTYPE(JL).EQ.2)THEN - LDCUM(JL)=.FALSE. - LDDRAF(JL)=.FALSE. - ENDIF - ITOP=MIN(ITOP,KCTOP(JL)) - IF(.NOT.LDCUM(JL).OR.KDTOP(JL).LT.KCTOP(JL)) LDDRAF(JL)=.FALSE. - IF(.NOT.LDCUM(JL)) KTYPE(JL)=0 - 110 CONTINUE - KTOPM2=ITOP-2 - DO 120 JK=KTOPM2,KLEV - DO 115 JL=1,KLON - IF(LDCUM(JL).AND.JK.GE.KCTOP(JL)-1) THEN - PMFUS(JL,JK)=PMFUS(JL,JK)-PMFU(JL,JK)* & - (CPD*PTENH(JL,JK)+PGEOH(JL,JK)) - PMFUQ(JL,JK)=PMFUQ(JL,JK)-PMFU(JL,JK)*PQENH(JL,JK) - IF(LDDRAF(JL).AND.JK.GE.KDTOP(JL)) THEN - PMFDS(JL,JK)=PMFDS(JL,JK)-PMFD(JL,JK)* & - (CPD*PTENH(JL,JK)+PGEOH(JL,JK)) - PMFDQ(JL,JK)=PMFDQ(JL,JK)-PMFD(JL,JK)*PQENH(JL,JK) - ELSE - PMFD(JL,JK)=0. - PMFDS(JL,JK)=0. - PMFDQ(JL,JK)=0. - PDMFDP(JL,JK-1)=0. - END IF - ELSE - PMFU(JL,JK)=0. - PMFD(JL,JK)=0. - PMFUS(JL,JK)=0. - PMFDS(JL,JK)=0. - PMFUQ(JL,JK)=0. - PMFDQ(JL,JK)=0. - PMFUL(JL,JK)=0. - PDMFUP(JL,JK-1)=0. - PDMFDP(JL,JK-1)=0. - PLUDE(JL,JK-1)=0. - END IF - 115 CONTINUE - 120 CONTINUE - DO 130 JK=KTOPM2,KLEV - DO 125 JL=1,KLON - IF(LDCUM(JL).AND.JK.GT.KCBOT(JL)) THEN - IKB=KCBOT(JL) - ZZP=((PAPH(JL,KLEVP1)-PAPH(JL,JK))/ & - (PAPH(JL,KLEVP1)-PAPH(JL,IKB))) - IF(KTYPE(JL).EQ.3) THEN - ZZP=ZZP**2 - ENDIF - PMFU(JL,JK)=PMFU(JL,IKB)*ZZP - PMFUS(JL,JK)=PMFUS(JL,IKB)*ZZP - PMFUQ(JL,JK)=PMFUQ(JL,IKB)*ZZP - PMFUL(JL,JK)=PMFUL(JL,IKB)*ZZP - END IF -!* 2. CALCULATE RAIN/SNOW FALL RATES -!* CALCULATE MELTING OF SNOW -!* CALCULATE EVAPORATION OF PRECIP + itop=klev + do jl=1,klon + prfl(jl)=0. + psfl(jl)=0. + prain(jl)=0. +! switch off shallow convection + if(.not.lmfscv.and.ktype(jl).eq.2)then + ldcum(jl)=.false. + lddraf(jl)=.false. + endif + itop=min(itop,kctop(jl)) + if(.not.ldcum(jl).or.kdtop(jl).lt.kctop(jl)) lddraf(jl)=.false. + if(.not.ldcum(jl)) ktype(jl)=0 + end do + + ktopm2=itop-2 + do jk=ktopm2,klev + do jl=1,klon + if(ldcum(jl).and.jk.ge.kctop(jl)-1) then + pmfus(jl,jk)=pmfus(jl,jk)-pmfu(jl,jk)* & + (cpd*ptenh(jl,jk)+pgeoh(jl,jk)) + pmfuq(jl,jk)=pmfuq(jl,jk)-pmfu(jl,jk)*pqenh(jl,jk) + if(lddraf(jl).and.jk.ge.kdtop(jl)) then + pmfds(jl,jk)=pmfds(jl,jk)-pmfd(jl,jk)* & + (cpd*ptenh(jl,jk)+pgeoh(jl,jk)) + pmfdq(jl,jk)=pmfdq(jl,jk)-pmfd(jl,jk)*pqenh(jl,jk) + else + pmfd(jl,jk)=0. + pmfds(jl,jk)=0. + pmfdq(jl,jk)=0. + pdmfdp(jl,jk-1)=0. + end if + else + pmfu(jl,jk)=0. + pmfd(jl,jk)=0. + pmfus(jl,jk)=0. + pmfds(jl,jk)=0. + pmfuq(jl,jk)=0. + pmfdq(jl,jk)=0. + pmful(jl,jk)=0. + pdmfup(jl,jk-1)=0. + pdmfdp(jl,jk-1)=0. + plude(jl,jk-1)=0. + end if + end do + end do + + do jk=ktopm2,klev + do jl=1,klon + if(ldcum(jl).and.jk.gt.kcbot(jl)) then + ikb=kcbot(jl) + zzp=((paph(jl,klevp1)-paph(jl,jk))/ & + (paph(jl,klevp1)-paph(jl,ikb))) + if(ktype(jl).eq.3) then + zzp=zzp**2 + endif + pmfu(jl,jk)=pmfu(jl,ikb)*zzp + pmfus(jl,jk)=pmfus(jl,ikb)*zzp + pmfuq(jl,jk)=pmfuq(jl,ikb)*zzp + pmful(jl,jk)=pmful(jl,ikb)*zzp + end if +!* 2. calculate rain/snow fall rates +!* calculate melting of snow +!* calculate evaporation of precip !---------------------------------------------- - IF(LDCUM(JL)) THEN - PRAIN(JL)=PRAIN(JL)+PDMFUP(JL,JK) - IF(PTEN(JL,JK).GT.TMELT) THEN - PRFL(JL)=PRFL(JL)+PDMFUP(JL,JK)+PDMFDP(JL,JK) - IF(PSFL(JL).GT.0..AND.PTEN(JL,JK).GT.ZTMELP2) THEN - ZFAC=ZCONS1*(PAPH(JL,JK+1)-PAPH(JL,JK)) - ZSNMLT=MIN(PSFL(JL),ZFAC*(PTEN(JL,JK)-ZTMELP2)) - PDPMEL(JL,JK)=ZSNMLT - PSFL(JL)=PSFL(JL)-ZSNMLT - PRFL(JL)=PRFL(JL)+ZSNMLT - END IF - ELSE - PSFL(JL)=PSFL(JL)+PDMFUP(JL,JK)+PDMFDP(JL,JK) - END IF - END IF - 125 CONTINUE - 130 CONTINUE - DO 230 JL=1,KLON - PRFL(JL)=MAX(PRFL(JL),0.) - PSFL(JL)=MAX(PSFL(JL),0.) - ZPSUBCL(JL)=PRFL(JL)+PSFL(JL) - 230 CONTINUE - DO 240 JK=KTOPM2,KLEV - DO 235 JL=1,KLON - IF(LDCUM(JL).AND.JK.GE.KCBOT(JL).AND. & - ZPSUBCL(JL).GT.1.E-20) THEN - ZRFL=ZPSUBCL(JL) + if(ldcum(jl)) then + prain(jl)=prain(jl)+pdmfup(jl,jk) + if(pten(jl,jk).gt.tmelt) then + prfl(jl)=prfl(jl)+pdmfup(jl,jk)+pdmfdp(jl,jk) + if(psfl(jl).gt.0..and.pten(jl,jk).gt.ztmelp2) then + zfac=zcons1*(paph(jl,jk+1)-paph(jl,jk)) + zsnmlt=min(psfl(jl),zfac*(pten(jl,jk)-ztmelp2)) + pdpmel(jl,jk)=zsnmlt + psfl(jl)=psfl(jl)-zsnmlt + prfl(jl)=prfl(jl)+zsnmlt + end if + else + psfl(jl)=psfl(jl)+pdmfup(jl,jk)+pdmfdp(jl,jk) + end if + end if + end do + end do + + do jl=1,klon + prfl(jl)=max(prfl(jl),0.) + psfl(jl)=max(psfl(jl),0.) + zpsubcl(jl)=prfl(jl)+psfl(jl) + end do + + do jk=ktopm2,klev + do jl=1,klon + if(ldcum(jl).and.jk.ge.kcbot(jl).and. & + zpsubcl(jl).gt.1.e-20) then + zrfl=zpsubcl(jl) #if defined(mpas) -!MPAS specific (Laura D. Fowler): - CEVAPCU=CEVAPCU1*SQRT(CEVAPCU2*SQRT(sig1(JL,JK))) +!mpas specific (Laura D. Fowler/2016-08-18): + cevapcu=cevapcu1*sqrt(cevapcu2*sqrt(sig1(jl,jk))) #else - CEVAPCU=CEVAPCU1*SQRT(CEVAPCU2*SQRT(sig1(JK))) + cevapcu=cevapcu1*sqrt(cevapcu2*sqrt(sig1(jk))) #endif - ZRNEW=(MAX(0.,SQRT(ZRFL/ZCUCOV)- & - CEVAPCU*(PAPH(JL,JK+1)-PAPH(JL,JK))* & - MAX(0.,PQSEN(JL,JK)-PQEN(JL,JK))))**2*ZCUCOV - ZRMIN=ZRFL-ZCUCOV*MAX(0.,0.8*PQSEN(JL,JK)-PQEN(JL,JK)) & - *ZCONS2*(PAPH(JL,JK+1)-PAPH(JL,JK)) - ZRNEW=MAX(ZRNEW,ZRMIN) - ZRFLN=MAX(ZRNEW,0.) - ZDRFL=MIN(0.,ZRFLN-ZRFL) - PDMFUP(JL,JK)=PDMFUP(JL,JK)+ZDRFL - ZPSUBCL(JL)=ZRFLN - END IF - 235 CONTINUE - 240 CONTINUE - DO 250 JL=1,KLON - ZDPEVAP=ZPSUBCL(JL)-(PRFL(JL)+PSFL(JL)) - PRFL(JL)=PRFL(JL)+ZDPEVAP*PRFL(JL)* & - (1./MAX(1.E-20,PRFL(JL)+PSFL(JL))) - PSFL(JL)=PSFL(JL)+ZDPEVAP*PSFL(JL)* & - (1./MAX(1.E-20,PRFL(JL)+PSFL(JL))) - 250 CONTINUE - RETURN - END SUBROUTINE CUFLX + zrnew=(max(0.,sqrt(zrfl/zcucov)- & + cevapcu*(paph(jl,jk+1)-paph(jl,jk))* & + max(0.,pqsen(jl,jk)-pqen(jl,jk))))**2*zcucov + zrmin=zrfl-zcucov*max(0.,0.8*pqsen(jl,jk)-pqen(jl,jk)) & + *zcons2*(paph(jl,jk+1)-paph(jl,jk)) + zrnew=max(zrnew,zrmin) + zrfln=max(zrnew,0.) + zdrfl=min(0.,zrfln-zrfl) + pdmfup(jl,jk)=pdmfup(jl,jk)+zdrfl + zpsubcl(jl)=zrfln + end if + end do + end do + + do jl=1,klon + zdpevap=zpsubcl(jl)-(prfl(jl)+psfl(jl)) + prfl(jl)=prfl(jl)+zdpevap*prfl(jl)* & + (1./max(1.e-20,prfl(jl)+psfl(jl))) + psfl(jl)=psfl(jl)+zdpevap*psfl(jl)* & + (1./max(1.e-20,prfl(jl)+psfl(jl))) + end do + + return + end subroutine cuflx ! !********************************************** -! SUBROUTINE CUDTDQ +! subroutine cudtdq !********************************************** - SUBROUTINE CUDTDQ & - (KLON, KLEV, KLEVP1, KTOPM2, PAPH, & - LDCUM, PTEN, PTTE, PQTE, PMFUS, & - PMFDS, PMFUQ, PMFDQ, PMFUL, PDMFUP, & - PDMFDP, ZTMST, PDPMEL, PRAIN, PRFL, & - PSFL, PSRAIN, PSEVAP, PSHEAT, PSMELT, & - PRSFC, PSSFC, PAPRC, PAPRSM, PAPRS, & - PQEN, PQSEN, PLUDE, PCTE) -!**** *CUDTDQ* - UPDATES T AND Q TENDENCIES, PRECIPITATION RATES -! DOES GLOBAL DIAGNOSTICS -! M.TIEDTKE E.C.M.W.F. 7/86 MODIF. 12/89 -!***INTERFACE. + subroutine cudtdq & + (klon, klev, klevp1, ktopm2, paph, & + ldcum, pten, ptte, pqte, pmfus, & + pmfds, pmfuq, pmfdq, pmful, pdmfup, & + pdmfdp, ztmst, pdpmel, prain, prfl, & + psfl, psrain, psevap, psheat, psmelt, & + prsfc, pssfc, paprc, paprsm, paprs, & + pqen, pqsen, plude, pcte) +!**** *cudtdq* - updates t and q tendencies, precipitation rates +! does global diagnostics +! m.tiedtke e.c.m.w.f. 7/86 modif. 12/89 +!***interface. ! ---------- -! *CUDTDQ* IS CALLED FROM *CUMASTR* +! *cudtdq* is called from *cumastr* ! ---------------------------------------------------------------- !------------------------------------------------------------------- - IMPLICIT NONE + implicit none !------------------------------------------------------------------- - INTEGER KLON, KLEV, KLEVP1 - INTEGER KTOPM2,JL, JK - REAL ZTMST, PSRAIN, PSEVAP, PSHEAT, PSMELT, ZDIAGT, ZDIAGW - REAL ZALV, RHK, RHCOE, PLDFD, ZDTDT, ZDQDT - REAL PTTE(KLON,KLEV), PQTE(KLON,KLEV), & - PTEN(KLON,KLEV), PLUDE(KLON,KLEV), & - PGEO(KLON,KLEV), PAPH(KLON,KLEVP1), & - PAPRC(KLON), PAPRS(KLON), & - PAPRSM(KLON), PCTE(KLON,KLEV), & - PRSFC(KLON), PSSFC(KLON) - REAL PMFUS(KLON,KLEV), PMFDS(KLON,KLEV), & - PMFUQ(KLON,KLEV), PMFDQ(KLON,KLEV), & - PMFUL(KLON,KLEV), PQSEN(KLON,KLEV), & - PDMFUP(KLON,KLEV), PDMFDP(KLON,KLEV),& - PRFL(KLON), PRAIN(KLON), & - PQEN(KLON,KLEV) - REAL PDPMEL(KLON,KLEV), PSFL(KLON) - REAL ZSHEAT(KLON), ZMELT(KLON) - LOGICAL LDCUM(KLON) + integer klon, klev, klevp1 + integer ktopm2,jl, jk + real ztmst, psrain, psevap, psheat, psmelt, zdiagt, zdiagw + real zalv, rhk, rhcoe, pldfd, zdtdt, zdqdt + real ptte(klon,klev), pqte(klon,klev), & + pten(klon,klev), plude(klon,klev), & + pgeo(klon,klev), paph(klon,klevp1), & + paprc(klon), paprs(klon), & + paprsm(klon), pcte(klon,klev), & + prsfc(klon), pssfc(klon) + real pmfus(klon,klev), pmfds(klon,klev), & + pmfuq(klon,klev), pmfdq(klon,klev), & + pmful(klon,klev), pqsen(klon,klev), & + pdmfup(klon,klev), pdmfdp(klon,klev),& + prfl(klon), prain(klon), & + pqen(klon,klev) + real pdpmel(klon,klev), psfl(klon) + real zsheat(klon), zmelt(klon) + logical ldcum(klon) !-------------------------------- -!* 1.0 SPECIFY PARAMETERS +!* 1.0 specify parameters !-------------------------------- - 100 CONTINUE - ZDIAGT=ZTMST - ZDIAGW=ZDIAGT/RHOH2O + zdiagt=ztmst + zdiagw=zdiagt/rhoh2o !-------------------------------------------------- -!* 2.0 INCREMENTATION OF T AND Q TENDENCIES +!* 2.0 incrementation of t and q tendencies !-------------------------------------------------- - 200 CONTINUE - DO 210 JL=1,KLON - ZMELT(JL)=0. - ZSHEAT(JL)=0. - 210 CONTINUE - DO 250 JK=KTOPM2,KLEV - IF(JK.LT.KLEV) THEN - DO 220 JL=1,KLON - IF(LDCUM(JL)) THEN - IF(PTEN(JL,JK).GT.TMELT) THEN - ZALV=ALV - ELSE - ZALV=ALS - ENDIF - RHK=MIN(1.0,PQEN(JL,JK)/PQSEN(JL,JK)) - RHCOE=MAX(0.0,(RHK-RHC)/(RHM-RHC)) - pldfd=MAX(0.0,RHCOE*fdbk*PLUDE(JL,JK)) - ZDTDT=(G/(PAPH(JL,JK+1)-PAPH(JL,JK)))*RCPD* & - (PMFUS(JL,JK+1)-PMFUS(JL,JK)+ & - PMFDS(JL,JK+1)-PMFDS(JL,JK)-ALF*PDPMEL(JL,JK) & - -ZALV*(PMFUL(JL,JK+1)-PMFUL(JL,JK)-pldfd- & - (PDMFUP(JL,JK)+PDMFDP(JL,JK)))) - PTTE(JL,JK)=PTTE(JL,JK)+ZDTDT - ZDQDT=(G/(PAPH(JL,JK+1)-PAPH(JL,JK)))*& - (PMFUQ(JL,JK+1)-PMFUQ(JL,JK)+ & - PMFDQ(JL,JK+1)-PMFDQ(JL,JK)+ & - PMFUL(JL,JK+1)-PMFUL(JL,JK)-pldfd- & - (PDMFUP(JL,JK)+PDMFDP(JL,JK))) - PQTE(JL,JK)=PQTE(JL,JK)+ZDQDT - PCTE(JL,JK)=(G/(PAPH(JL,JK+1)-PAPH(JL,JK)))*pldfd - ZSHEAT(JL)=ZSHEAT(JL)+ZALV*(PDMFUP(JL,JK)+PDMFDP(JL,JK)) - ZMELT(JL)=ZMELT(JL)+PDPMEL(JL,JK) - END IF - 220 CONTINUE - ELSE - DO 230 JL=1,KLON - IF(LDCUM(JL)) THEN - IF(PTEN(JL,JK).GT.TMELT) THEN - ZALV=ALV - ELSE - ZALV=ALS - ENDIF - RHK=MIN(1.0,PQEN(JL,JK)/PQSEN(JL,JK)) - RHCOE=MAX(0.0,(RHK-RHC)/(RHM-RHC)) - pldfd=MAX(0.0,RHCOE*fdbk*PLUDE(JL,JK)) - ZDTDT=-(G/(PAPH(JL,JK+1)-PAPH(JL,JK)))*RCPD* & - (PMFUS(JL,JK)+PMFDS(JL,JK)+ALF*PDPMEL(JL,JK)-ZALV* & - (PMFUL(JL,JK)+PDMFUP(JL,JK)+PDMFDP(JL,JK)+pldfd)) - PTTE(JL,JK)=PTTE(JL,JK)+ZDTDT - ZDQDT=-(G/(PAPH(JL,JK+1)-PAPH(JL,JK)))* & - (PMFUQ(JL,JK)+PMFDQ(JL,JK)+pldfd+ & - (PMFUL(JL,JK)+PDMFUP(JL,JK)+PDMFDP(JL,JK))) - PQTE(JL,JK)=PQTE(JL,JK)+ZDQDT - PCTE(JL,JK)=(G/(PAPH(JL,JK+1)-PAPH(JL,JK)))*pldfd - ZSHEAT(JL)=ZSHEAT(JL)+ZALV*(PDMFUP(JL,JK)+PDMFDP(JL,JK)) - ZMELT(JL)=ZMELT(JL)+PDPMEL(JL,JK) - END IF - 230 CONTINUE - END IF - 250 CONTINUE + do jl=1,klon + zmelt(jl)=0. + zsheat(jl)=0. + end do + + do jk=ktopm2,klev + if(jk.lt.klev) then + do jl=1,klon + if(ldcum(jl)) then + if(pten(jl,jk).gt.tmelt) then + zalv=alv + else + zalv=als + endif + rhk=min(1.0,pqen(jl,jk)/pqsen(jl,jk)) + rhcoe=max(0.0,(rhk-rhc)/(rhm-rhc)) + pldfd=max(0.0,rhcoe*fdbk*plude(jl,jk)) + zdtdt=(g/(paph(jl,jk+1)-paph(jl,jk)))*rcpd* & + (pmfus(jl,jk+1)-pmfus(jl,jk)+ & + pmfds(jl,jk+1)-pmfds(jl,jk)-alf*pdpmel(jl,jk) & + -zalv*(pmful(jl,jk+1)-pmful(jl,jk)-pldfd- & + (pdmfup(jl,jk)+pdmfdp(jl,jk)))) + ptte(jl,jk)=ptte(jl,jk)+zdtdt + zdqdt=(g/(paph(jl,jk+1)-paph(jl,jk)))*& + (pmfuq(jl,jk+1)-pmfuq(jl,jk)+ & + pmfdq(jl,jk+1)-pmfdq(jl,jk)+ & + pmful(jl,jk+1)-pmful(jl,jk)-pldfd- & + (pdmfup(jl,jk)+pdmfdp(jl,jk))) + pqte(jl,jk)=pqte(jl,jk)+zdqdt + pcte(jl,jk)=(g/(paph(jl,jk+1)-paph(jl,jk)))*pldfd + zsheat(jl)=zsheat(jl)+zalv*(pdmfup(jl,jk)+pdmfdp(jl,jk)) + zmelt(jl)=zmelt(jl)+pdpmel(jl,jk) + end if + end do + else + do jl=1,klon + if(ldcum(jl)) then + if(pten(jl,jk).gt.tmelt) then + zalv=alv + else + zalv=als + endif + rhk=min(1.0,pqen(jl,jk)/pqsen(jl,jk)) + rhcoe=max(0.0,(rhk-rhc)/(rhm-rhc)) + pldfd=max(0.0,rhcoe*fdbk*plude(jl,jk)) + zdtdt=-(g/(paph(jl,jk+1)-paph(jl,jk)))*rcpd* & + (pmfus(jl,jk)+pmfds(jl,jk)+alf*pdpmel(jl,jk)-zalv* & + (pmful(jl,jk)+pdmfup(jl,jk)+pdmfdp(jl,jk)+pldfd)) + ptte(jl,jk)=ptte(jl,jk)+zdtdt + zdqdt=-(g/(paph(jl,jk+1)-paph(jl,jk)))* & + (pmfuq(jl,jk)+pmfdq(jl,jk)+pldfd+ & + (pmful(jl,jk)+pdmfup(jl,jk)+pdmfdp(jl,jk))) + pqte(jl,jk)=pqte(jl,jk)+zdqdt + pcte(jl,jk)=(g/(paph(jl,jk+1)-paph(jl,jk)))*pldfd + zsheat(jl)=zsheat(jl)+zalv*(pdmfup(jl,jk)+pdmfdp(jl,jk)) + zmelt(jl)=zmelt(jl)+pdpmel(jl,jk) + end if + end do + end if + end do !--------------------------------------------------------- -! 3. UPDATE SURFACE FIELDS AND DO GLOBAL BUDGETS +! 3. update surface fields and do global budgets !--------------------------------------------------------- - 300 CONTINUE - DO 310 JL=1,KLON - PRSFC(JL)=PRFL(JL) - PSSFC(JL)=PSFL(JL) - PAPRC(JL)=PAPRC(JL)+ZDIAGW*(PRFL(JL)+PSFL(JL)) - PAPRS(JL)=PAPRSM(JL)+ZDIAGW*PSFL(JL) - PSHEAT=PSHEAT+ZSHEAT(JL) - PSRAIN=PSRAIN+PRAIN(JL) - PSEVAP=PSEVAP-(PRFL(JL)+PSFL(JL)) - PSMELT=PSMELT+ZMELT(JL) - 310 CONTINUE - PSEVAP=PSEVAP+PSRAIN - RETURN - END SUBROUTINE CUDTDQ + do jl=1,klon + prsfc(jl)=prfl(jl) + pssfc(jl)=psfl(jl) + paprc(jl)=paprc(jl)+zdiagw*(prfl(jl)+psfl(jl)) + paprs(jl)=paprsm(jl)+zdiagw*psfl(jl) + psheat=psheat+zsheat(jl) + psrain=psrain+prain(jl) + psevap=psevap-(prfl(jl)+psfl(jl)) + psmelt=psmelt+zmelt(jl) + end do + psevap=psevap+psrain + return + end subroutine cudtdq ! !********************************************** -! SUBROUTINE CUDUDV +! subroutine cududv !********************************************** - SUBROUTINE CUDUDV & - (KLON, KLEV, KLEVP1, KTOPM2, KTYPE, & - KCBOT, PAPH, LDCUM, PUEN, PVEN, & - PVOM, PVOL, PUU, PUD, PVU, & - PVD, PMFU, PMFD, PSDISS) -!**** *CUDUDV* - UPDATES U AND V TENDENCIES, -! DOES GLOBAL DIAGNOSTIC OF DISSIPATION -! M.TIEDTKE E.C.M.W.F. 7/86 MODIF. 12/89 -!***INTERFACE. + subroutine cududv & + (klon, klev, klevp1, ktopm2, ktype, & + kcbot, paph, ldcum, puen, pven, & + pvom, pvol, puu, pud, pvu, & + pvd, pmfu, pmfd, psdiss) +!**** *cududv* - updates u and v tendencies, +! does global diagnostic of dissipation +! m.tiedtke e.c.m.w.f. 7/86 modif. 12/89 +!***interface. ! ---------- -! *CUDUDV* IS CALLED FROM *CUMASTR* +! *cududv* is called from *cumastr* ! ---------------------------------------------------------------- !------------------------------------------------------------------- - IMPLICIT NONE + implicit none !------------------------------------------------------------------- - INTEGER KLON, KLEV, KLEVP1 - INTEGER KTOPM2, JK, IK, JL, IKB - REAL PSDISS,ZZP, ZDUDT ,ZDVDT, ZSUM - REAL PUEN(KLON,KLEV), PVEN(KLON,KLEV), & - PVOL(KLON,KLEV), PVOM(KLON,KLEV), & - PAPH(KLON,KLEVP1) - REAL PUU(KLON,KLEV), PUD(KLON,KLEV), & - PVU(KLON,KLEV), PVD(KLON,KLEV), & - PMFU(KLON,KLEV), PMFD(KLON,KLEV) - REAL ZMFUU(KLON,KLEV), ZMFDU(KLON,KLEV), & - ZMFUV(KLON,KLEV), ZMFDV(KLON,KLEV), & - ZDISS(KLON) - INTEGER KTYPE(KLON), KCBOT(KLON) - LOGICAL LDCUM(KLON) + integer klon, klev, klevp1 + integer ktopm2, jk, ik, jl, ikb + real psdiss,zzp, zdudt ,zdvdt, zsum + real puen(klon,klev), pven(klon,klev), & + pvol(klon,klev), pvom(klon,klev), & + paph(klon,klevp1) + real puu(klon,klev), pud(klon,klev), & + pvu(klon,klev), pvd(klon,klev), & + pmfu(klon,klev), pmfd(klon,klev) + real zmfuu(klon,klev), zmfdu(klon,klev), & + zmfuv(klon,klev), zmfdv(klon,klev), & + zdiss(klon) + integer ktype(klon), kcbot(klon) + logical ldcum(klon) !------------------------------------------------------------ -!* 1.0 CALCULATE FLUXES AND UPDATE U AND V TENDENCIES +!* 1.0 calculate fluxes and update u and v tendencies ! ----------------------------------------------------------- - 100 CONTINUE - DO 120 JK=KTOPM2,KLEV - IK=JK-1 - DO 110 JL=1,KLON - IF(LDCUM(JL)) THEN - ZMFUU(JL,JK)=PMFU(JL,JK)*(PUU(JL,JK)-PUEN(JL,IK)) - ZMFUV(JL,JK)=PMFU(JL,JK)*(PVU(JL,JK)-PVEN(JL,IK)) - ZMFDU(JL,JK)=PMFD(JL,JK)*(PUD(JL,JK)-PUEN(JL,IK)) - ZMFDV(JL,JK)=PMFD(JL,JK)*(PVD(JL,JK)-PVEN(JL,IK)) - END IF - 110 CONTINUE - 120 CONTINUE - DO 140 JK=KTOPM2,KLEV - DO 130 JL=1,KLON - IF(LDCUM(JL).AND.JK.GT.KCBOT(JL)) THEN - IKB=KCBOT(JL) - ZZP=((PAPH(JL,KLEVP1)-PAPH(JL,JK))/ & - (PAPH(JL,KLEVP1)-PAPH(JL,IKB))) - IF(KTYPE(JL).EQ.3) THEN - ZZP=ZZP**2 - ENDIF - ZMFUU(JL,JK)=ZMFUU(JL,IKB)*ZZP - ZMFUV(JL,JK)=ZMFUV(JL,IKB)*ZZP - ZMFDU(JL,JK)=ZMFDU(JL,IKB)*ZZP - ZMFDV(JL,JK)=ZMFDV(JL,IKB)*ZZP - END IF - 130 CONTINUE - 140 CONTINUE - DO 150 JL=1,KLON - ZDISS(JL)=0. - 150 CONTINUE - DO 190 JK=KTOPM2,KLEV - IF(JK.LT.KLEV) THEN - DO 160 JL=1,KLON - IF(LDCUM(JL)) THEN - ZDUDT=(G/(PAPH(JL,JK+1)-PAPH(JL,JK)))* & - (ZMFUU(JL,JK+1)-ZMFUU(JL,JK)+ & - ZMFDU(JL,JK+1)-ZMFDU(JL,JK)) - ZDVDT=(G/(PAPH(JL,JK+1)-PAPH(JL,JK)))* & - (ZMFUV(JL,JK+1)-ZMFUV(JL,JK)+ & - ZMFDV(JL,JK+1)-ZMFDV(JL,JK)) - ZDISS(JL)=ZDISS(JL)+ & - PUEN(JL,JK)*(ZMFUU(JL,JK+1)-ZMFUU(JL,JK)+ & - ZMFDU(JL,JK+1)-ZMFDU(JL,JK))+ & - PVEN(JL,JK)*(ZMFUV(JL,JK+1)-ZMFUV(JL,JK)+ & - ZMFDV(JL,JK+1)-ZMFDV(JL,JK)) - PVOM(JL,JK)=PVOM(JL,JK)+ZDUDT - PVOL(JL,JK)=PVOL(JL,JK)+ZDVDT - END IF - 160 CONTINUE - ELSE - DO 170 JL=1,KLON - IF(LDCUM(JL)) THEN - ZDUDT=-(G/(PAPH(JL,JK+1)-PAPH(JL,JK)))* & - (ZMFUU(JL,JK)+ZMFDU(JL,JK)) - ZDVDT=-(G/(PAPH(JL,JK+1)-PAPH(JL,JK)))* & - (ZMFUV(JL,JK)+ZMFDV(JL,JK)) - ZDISS(JL)=ZDISS(JL)- & - (PUEN(JL,JK)*(ZMFUU(JL,JK)+ZMFDU(JL,JK))+ & - PVEN(JL,JK)*(ZMFUV(JL,JK)+ZMFDV(JL,JK))) - PVOM(JL,JK)=PVOM(JL,JK)+ZDUDT - PVOL(JL,JK)=PVOL(JL,JK)+ZDVDT - END IF - 170 CONTINUE - END IF - 190 CONTINUE - ZSUM=SSUM(KLON,ZDISS(1),1) - PSDISS=PSDISS+ZSUM - RETURN - END SUBROUTINE CUDUDV + do jk=ktopm2,klev + ik=jk-1 + do jl=1,klon + if(ldcum(jl)) then + zmfuu(jl,jk)=pmfu(jl,jk)*(puu(jl,jk)-puen(jl,ik)) + zmfuv(jl,jk)=pmfu(jl,jk)*(pvu(jl,jk)-pven(jl,ik)) + zmfdu(jl,jk)=pmfd(jl,jk)*(pud(jl,jk)-puen(jl,ik)) + zmfdv(jl,jk)=pmfd(jl,jk)*(pvd(jl,jk)-pven(jl,ik)) + end if + end do + end do + + do jk=ktopm2,klev + do jl=1,klon + if(ldcum(jl).and.jk.gt.kcbot(jl)) then + ikb=kcbot(jl) + zzp=((paph(jl,klevp1)-paph(jl,jk))/ & + (paph(jl,klevp1)-paph(jl,ikb))) + if(ktype(jl).eq.3) then + zzp=zzp**2 + endif + zmfuu(jl,jk)=zmfuu(jl,ikb)*zzp + zmfuv(jl,jk)=zmfuv(jl,ikb)*zzp + zmfdu(jl,jk)=zmfdu(jl,ikb)*zzp + zmfdv(jl,jk)=zmfdv(jl,ikb)*zzp + end if + end do + end do + + do jl=1,klon + zdiss(jl)=0. + end do + + do jk=ktopm2,klev + if(jk.lt.klev) then + do jl=1,klon + if(ldcum(jl)) then + zdudt=(g/(paph(jl,jk+1)-paph(jl,jk)))* & + (zmfuu(jl,jk+1)-zmfuu(jl,jk)+ & + zmfdu(jl,jk+1)-zmfdu(jl,jk)) + zdvdt=(g/(paph(jl,jk+1)-paph(jl,jk)))* & + (zmfuv(jl,jk+1)-zmfuv(jl,jk)+ & + zmfdv(jl,jk+1)-zmfdv(jl,jk)) + zdiss(jl)=zdiss(jl)+ & + puen(jl,jk)*(zmfuu(jl,jk+1)-zmfuu(jl,jk)+ & + zmfdu(jl,jk+1)-zmfdu(jl,jk))+ & + pven(jl,jk)*(zmfuv(jl,jk+1)-zmfuv(jl,jk)+ & + zmfdv(jl,jk+1)-zmfdv(jl,jk)) + pvom(jl,jk)=pvom(jl,jk)+zdudt + pvol(jl,jk)=pvol(jl,jk)+zdvdt + end if + end do + else + do jl=1,klon + if(ldcum(jl)) then + zdudt=-(g/(paph(jl,jk+1)-paph(jl,jk)))* & + (zmfuu(jl,jk)+zmfdu(jl,jk)) + zdvdt=-(g/(paph(jl,jk+1)-paph(jl,jk)))* & + (zmfuv(jl,jk)+zmfdv(jl,jk)) + zdiss(jl)=zdiss(jl)- & + (puen(jl,jk)*(zmfuu(jl,jk)+zmfdu(jl,jk))+ & + pven(jl,jk)*(zmfuv(jl,jk)+zmfdv(jl,jk))) + pvom(jl,jk)=pvom(jl,jk)+zdudt + pvol(jl,jk)=pvol(jl,jk)+zdvdt + end if + end do + end if + end do + zsum=ssum(klon,zdiss(1),1) + psdiss=psdiss+zsum + return + end subroutine cududv ! !################################################################# ! -! LEVEL 4 SUBROUTINES +! level 4 subroutines ! !################################################################# !************************************************************** -! SUBROUTINE CUBASMC +! subroutine cubasmc !************************************************************** - SUBROUTINE CUBASMC & - (KLON, KLEV, KLEVM1, KK, PTEN, & - PQEN, PQSEN, PUEN, PVEN, PVERV, & - PGEO, PGEOH, LDCUM, KTYPE, KLAB, & - PMFU, PMFUB, PENTR, KCBOT, PTU, & - PQU, PLU, PUU, PVU, PMFUS, & - PMFUQ, PMFUL, PDMFUP, PMFUU, PMFUV) -! M.TIEDTKE E.C.M.W.F. 12/89 -!***PURPOSE. + subroutine cubasmc & + (klon, klev, klevm1, kk, pten, & + pqen, pqsen, puen, pven, pverv, & + pgeo, pgeoh, ldcum, ktype, klab, & + pmfu, pmfub, pentr, kcbot, ptu, & + pqu, plu, puu, pvu, pmfus, & + pmfuq, pmful, pdmfup, pmfuu, pmfuv) +! m.tiedtke e.c.m.w.f. 12/89 +!***purpose. ! -------- -! THIS ROUTINE CALCULATES CLOUD BASE VALUES -! FOR MIDLEVEL CONVECTION -!***INTERFACE +! this routine calculates cloud base values +! for midlevel convection +!***interface ! --------- -! THIS ROUTINE IS CALLED FROM *CUASC*. -! INPUT ARE ENVIRONMENTAL VALUES T,Q ETC -! IT RETURNS CLOUDBASE VALUES FOR MIDLEVEL CONVECTION -!***METHOD. +! this routine is called from *cuasc*. +! input are environmental values t,q etc +! it returns cloudbase values for midlevel convection +!***method. ! ------- -! S. TIEDTKE (1989) -!***EXTERNALS +! s. tiedtke (1989) +!***externals ! --------- -! NONE +! none ! ---------------------------------------------------------------- !------------------------------------------------------------------- - IMPLICIT NONE + implicit none !------------------------------------------------------------------- - INTEGER KLON, KLEV, KLEVP1 - INTEGER KLEVM1,KK, JL - REAL zzzmb - REAL PTEN(KLON,KLEV), PQEN(KLON,KLEV), & - PUEN(KLON,KLEV), PVEN(KLON,KLEV), & - PQSEN(KLON,KLEV), PVERV(KLON,KLEV), & - PGEO(KLON,KLEV), PGEOH(KLON,KLEV) - REAL PTU(KLON,KLEV), PQU(KLON,KLEV), & - PUU(KLON,KLEV), PVU(KLON,KLEV), & - PLU(KLON,KLEV), PMFU(KLON,KLEV), & - PMFUB(KLON), PENTR(KLON), & - PMFUS(KLON,KLEV), PMFUQ(KLON,KLEV), & - PMFUL(KLON,KLEV), PDMFUP(KLON,KLEV), & - PMFUU(KLON), PMFUV(KLON) - INTEGER KTYPE(KLON), KCBOT(KLON), & - KLAB(KLON,KLEV) - LOGICAL LDCUM(KLON) + integer klon, klev, klevp1 + integer klevm1,kk, jl + real zzzmb + real pten(klon,klev), pqen(klon,klev), & + puen(klon,klev), pven(klon,klev), & + pqsen(klon,klev), pverv(klon,klev), & + pgeo(klon,klev), pgeoh(klon,klev) + real ptu(klon,klev), pqu(klon,klev), & + puu(klon,klev), pvu(klon,klev), & + plu(klon,klev), pmfu(klon,klev), & + pmfub(klon), pentr(klon), & + pmfus(klon,klev), pmfuq(klon,klev), & + pmful(klon,klev), pdmfup(klon,klev), & + pmfuu(klon), pmfuv(klon) + integer ktype(klon), kcbot(klon), & + klab(klon,klev) + logical ldcum(klon) !-------------------------------------------------------- -!* 1. CALCULATE ENTRAINMENT AND DETRAINMENT RATES +!* 1. calculate entrainment and detrainment rates ! ------------------------------------------------------- - 100 CONTINUE - DO 150 JL=1,KLON - IF( .NOT. LDCUM(JL).AND.KLAB(JL,KK+1).EQ.0.0.AND. & - PQEN(JL,KK).GT.0.80*PQSEN(JL,KK)) THEN - PTU(JL,KK+1)=(CPD*PTEN(JL,KK)+PGEO(JL,KK)-PGEOH(JL,KK+1)) & - *RCPD - PQU(JL,KK+1)=PQEN(JL,KK) - PLU(JL,KK+1)=0. - ZZZMB=MAX(CMFCMIN,-PVERV(JL,KK)/G) - ZZZMB=MIN(ZZZMB,CMFCMAX) - PMFUB(JL)=ZZZMB - PMFU(JL,KK+1)=PMFUB(JL) - PMFUS(JL,KK+1)=PMFUB(JL)*(CPD*PTU(JL,KK+1)+PGEOH(JL,KK+1)) - PMFUQ(JL,KK+1)=PMFUB(JL)*PQU(JL,KK+1) - PMFUL(JL,KK+1)=0. - PDMFUP(JL,KK+1)=0. - KCBOT(JL)=KK - KLAB(JL,KK+1)=1 - KTYPE(JL)=3 - PENTR(JL)=ENTRMID - IF(LMFDUDV) THEN - PUU(JL,KK+1)=PUEN(JL,KK) - PVU(JL,KK+1)=PVEN(JL,KK) - PMFUU(JL)=PMFUB(JL)*PUU(JL,KK+1) - PMFUV(JL)=PMFUB(JL)*PVU(JL,KK+1) - END IF - END IF - 150 CONTINUE - RETURN - END SUBROUTINE CUBASMC + do jl=1,klon + if( .not. ldcum(jl).and.klab(jl,kk+1).eq.0.0.and. & + pqen(jl,kk).gt.0.80*pqsen(jl,kk)) then + ptu(jl,kk+1)=(cpd*pten(jl,kk)+pgeo(jl,kk)-pgeoh(jl,kk+1)) & + *rcpd + pqu(jl,kk+1)=pqen(jl,kk) + plu(jl,kk+1)=0. + zzzmb=max(cmfcmin,-pverv(jl,kk)/g) + zzzmb=min(zzzmb,cmfcmax) + pmfub(jl)=zzzmb + pmfu(jl,kk+1)=pmfub(jl) + pmfus(jl,kk+1)=pmfub(jl)*(cpd*ptu(jl,kk+1)+pgeoh(jl,kk+1)) + pmfuq(jl,kk+1)=pmfub(jl)*pqu(jl,kk+1) + pmful(jl,kk+1)=0. + pdmfup(jl,kk+1)=0. + kcbot(jl)=kk + klab(jl,kk+1)=1 + ktype(jl)=3 + pentr(jl)=entrmid + if(lmfdudv) then + puu(jl,kk+1)=puen(jl,kk) + pvu(jl,kk+1)=pven(jl,kk) + pmfuu(jl)=pmfub(jl)*puu(jl,kk+1) + pmfuv(jl)=pmfub(jl)*pvu(jl,kk+1) + end if + end if + end do + return + end subroutine cubasmc ! !************************************************************** -! SUBROUTINE CUADJTQ +! subroutine cuadjtq !************************************************************** - SUBROUTINE CUADJTQ(KLON,KLEV,KK,PP,PT,PQ,LDFLAG,KCALL) -! M.TIEDTKE E.C.M.W.F. 12/89 -! D.SALMOND CRAY(UK)) 12/8/91 -!***PURPOSE. + subroutine cuadjtq(klon,klev,kk,pp,pt,pq,ldflag,kcall) +! m.tiedtke e.c.m.w.f. 12/89 +! d.salmond cray(uk)) 12/8/91 +!***purpose. ! -------- -! TO PRODUCE T,Q AND L VALUES FOR CLOUD ASCENT -!***INTERFACE +! to produce t,q and l values for cloud ascent +!***interface ! --------- -! THIS ROUTINE IS CALLED FROM SUBROUTINES: -! *CUBASE* (T AND Q AT CONDENSTION LEVEL) -! *CUASC* (T AND Q AT CLOUD LEVELS) -! *CUINI* (ENVIRONMENTAL T AND QS VALUES AT HALF LEVELS) -! INPUT ARE UNADJUSTED T AND Q VALUES, -! IT RETURNS ADJUSTED VALUES OF T AND Q -! NOTE: INPUT PARAMETER KCALL DEFINES CALCULATION AS -! KCALL=0 ENV. T AND QS IN*CUINI* -! KCALL=1 CONDENSATION IN UPDRAFTS (E.G. CUBASE, CUASC) -! KCALL=2 EVAPORATION IN DOWNDRAFTS (E.G. CUDLFS,CUDDRAF -!***EXTERNALS +! this routine is called from subroutines: +! *cubase* (t and q at condenstion level) +! *cuasc* (t and q at cloud levels) +! *cuini* (environmental t and qs values at half levels) +! input are unadjusted t and q values, +! it returns adjusted values of t and q +! note: input parameter kcall defines calculation as +! kcall=0 env. t and qs in*cuini* +! kcall=1 condensation in updrafts (e.g. cubase, cuasc) +! kcall=2 evaporation in downdrafts (e.g. cudlfs,cuddraf +!***externals ! --------- -! 3 LOOKUP TABLES ( TLUCUA, TLUCUB, TLUCUC ) -! FOR CONDENSATION CALCULATIONS. -! THE TABLES ARE INITIALISED IN *SETPHYS*. +! 3 lookup tables ( tlucua, tlucub, tlucuc ) +! for condensation calculations. +! the tables are initialised in *setphys*. ! ---------------------------------------------------------------- !------------------------------------------------------------------- - IMPLICIT NONE + implicit none !------------------------------------------------------------------- - INTEGER KLON, KLEV - INTEGER KK, KCALL, ISUM, JL - REAL ZQSAT, ZCOR, ZCOND1, TT - REAL PT(KLON,KLEV), PQ(KLON,KLEV), & - ZCOND(KLON), ZQP(KLON), & - PP(KLON) - LOGICAL LDFLAG(KLON) + integer klon, klev + integer kk, kcall, isum, jl + real zqsat, zcor, zcond1, tt + real pt(klon,klev), pq(klon,klev), & + zcond(klon), zqp(klon), & + pp(klon) + logical ldflag(klon) !------------------------------------------------------------------ -! 2. CALCULATE CONDENSATION AND ADJUST T AND Q ACCORDINGLY +! 2. calculate condensation and adjust t and q accordingly !------------------------------------------------------------------ - 200 CONTINUE - IF (KCALL.EQ.1 ) THEN - ISUM=0 - DO 210 JL=1,KLON - ZCOND(JL)=0. - IF(LDFLAG(JL)) THEN - ZQP(JL)=1./PP(JL) - TT=PT(JL,KK) - ZQSAT=TLUCUA(TT)*ZQP(JL) - ZQSAT=MIN(0.5,ZQSAT) - ZCOR=1./(1.-VTMPC1*ZQSAT) - ZQSAT=ZQSAT*ZCOR - ZCOND(JL)=(PQ(JL,KK)-ZQSAT)/(1.+ZQSAT*ZCOR*TLUCUB(TT)) - ZCOND(JL)=MAX(ZCOND(JL),0.) - PT(JL,KK)=PT(JL,KK)+TLUCUC(TT)*ZCOND(JL) - PQ(JL,KK)=PQ(JL,KK)-ZCOND(JL) - IF(ZCOND(JL).NE.0.0) ISUM=ISUM+1 - END IF - 210 CONTINUE - IF(ISUM.EQ.0) GO TO 230 - DO 220 JL=1,KLON - IF(LDFLAG(JL).AND.ZCOND(JL).NE.0.) THEN - TT=PT(JL,KK) - ZQSAT=TLUCUA(TT)*ZQP(JL) - ZQSAT=MIN(0.5,ZQSAT) - ZCOR=1./(1.-VTMPC1*ZQSAT) - ZQSAT=ZQSAT*ZCOR - ZCOND1=(PQ(JL,KK)-ZQSAT)/(1.+ZQSAT*ZCOR*TLUCUB(TT)) - PT(JL,KK)=PT(JL,KK)+TLUCUC(TT)*ZCOND1 - PQ(JL,KK)=PQ(JL,KK)-ZCOND1 - END IF - 220 CONTINUE - 230 CONTINUE - END IF - IF(KCALL.EQ.2) THEN - ISUM=0 - DO 310 JL=1,KLON - ZCOND(JL)=0. - IF(LDFLAG(JL)) THEN - TT=PT(JL,KK) - ZQP(JL)=1./PP(JL) - ZQSAT=TLUCUA(TT)*ZQP(JL) - ZQSAT=MIN(0.5,ZQSAT) - ZCOR=1./(1.-VTMPC1*ZQSAT) - ZQSAT=ZQSAT*ZCOR - ZCOND(JL)=(PQ(JL,KK)-ZQSAT)/(1.+ZQSAT*ZCOR*TLUCUB(TT)) - ZCOND(JL)=MIN(ZCOND(JL),0.) - PT(JL,KK)=PT(JL,KK)+TLUCUC(TT)*ZCOND(JL) - PQ(JL,KK)=PQ(JL,KK)-ZCOND(JL) - IF(ZCOND(JL).NE.0.0) ISUM=ISUM+1 - END IF - 310 CONTINUE - IF(ISUM.EQ.0) GO TO 330 - DO 320 JL=1,KLON - IF(LDFLAG(JL).AND.ZCOND(JL).NE.0.) THEN - TT=PT(JL,KK) - ZQSAT=TLUCUA(TT)*ZQP(JL) - ZQSAT=MIN(0.5,ZQSAT) - ZCOR=1./(1.-VTMPC1*ZQSAT) - ZQSAT=ZQSAT*ZCOR - ZCOND1=(PQ(JL,KK)-ZQSAT)/(1.+ZQSAT*ZCOR*TLUCUB(TT)) - PT(JL,KK)=PT(JL,KK)+TLUCUC(TT)*ZCOND1 - PQ(JL,KK)=PQ(JL,KK)-ZCOND1 - END IF - 320 CONTINUE - 330 CONTINUE - END IF - IF(KCALL.EQ.0) THEN - ISUM=0 - DO 410 JL=1,KLON - TT=PT(JL,KK) - ZQP(JL)=1./PP(JL) - ZQSAT=TLUCUA(TT)*ZQP(JL) - ZQSAT=MIN(0.5,ZQSAT) - ZCOR=1./(1.-VTMPC1*ZQSAT) - ZQSAT=ZQSAT*ZCOR - ZCOND(JL)=(PQ(JL,KK)-ZQSAT)/(1.+ZQSAT*ZCOR*TLUCUB(TT)) - PT(JL,KK)=PT(JL,KK)+TLUCUC(TT)*ZCOND(JL) - PQ(JL,KK)=PQ(JL,KK)-ZCOND(JL) - IF(ZCOND(JL).NE.0.0) ISUM=ISUM+1 - 410 CONTINUE - IF(ISUM.EQ.0) GO TO 430 - DO 420 JL=1,KLON - TT=PT(JL,KK) - ZQSAT=TLUCUA(TT)*ZQP(JL) - ZQSAT=MIN(0.5,ZQSAT) - ZCOR=1./(1.-VTMPC1*ZQSAT) - ZQSAT=ZQSAT*ZCOR - ZCOND1=(PQ(JL,KK)-ZQSAT)/(1.+ZQSAT*ZCOR*TLUCUB(TT)) - PT(JL,KK)=PT(JL,KK)+TLUCUC(TT)*ZCOND1 - PQ(JL,KK)=PQ(JL,KK)-ZCOND1 - 420 CONTINUE - 430 CONTINUE - END IF - IF(KCALL.EQ.4) THEN - DO 510 JL=1,KLON - TT=PT(JL,KK) - ZQP(JL)=1./PP(JL) - ZQSAT=TLUCUA(TT)*ZQP(JL) - ZQSAT=MIN(0.5,ZQSAT) - ZCOR=1./(1.-VTMPC1*ZQSAT) - ZQSAT=ZQSAT*ZCOR - ZCOND(JL)=(PQ(JL,KK)-ZQSAT)/(1.+ZQSAT*ZCOR*TLUCUB(TT)) - PT(JL,KK)=PT(JL,KK)+TLUCUC(TT)*ZCOND(JL) - PQ(JL,KK)=PQ(JL,KK)-ZCOND(JL) - 510 CONTINUE - DO 520 JL=1,KLON - TT=PT(JL,KK) - ZQSAT=TLUCUA(TT)*ZQP(JL) - ZQSAT=MIN(0.5,ZQSAT) - ZCOR=1./(1.-VTMPC1*ZQSAT) - ZQSAT=ZQSAT*ZCOR - ZCOND1=(PQ(JL,KK)-ZQSAT)/(1.+ZQSAT*ZCOR*TLUCUB(TT)) - PT(JL,KK)=PT(JL,KK)+TLUCUC(TT)*ZCOND1 - PQ(JL,KK)=PQ(JL,KK)-ZCOND1 - 520 CONTINUE - END IF - RETURN - END SUBROUTINE CUADJTQ + if (kcall.eq.1 ) then + isum=0 + do jl=1,klon + zcond(jl)=0. + if(ldflag(jl)) then + zqp(jl)=1./pp(jl) + tt=pt(jl,kk) + zqsat=tlucua(tt)*zqp(jl) + zqsat=min(0.5,zqsat) + zcor=1./(1.-vtmpc1*zqsat) + zqsat=zqsat*zcor + zcond(jl)=(pq(jl,kk)-zqsat)/(1.+zqsat*zcor*tlucub(tt)) + zcond(jl)=max(zcond(jl),0.) + pt(jl,kk)=pt(jl,kk)+tlucuc(tt)*zcond(jl) + pq(jl,kk)=pq(jl,kk)-zcond(jl) + if(zcond(jl).ne.0.0) isum=isum+1 + end if + end do + + if(isum.eq.0) return + do jl=1,klon + if(ldflag(jl).and.zcond(jl).ne.0.) then + tt=pt(jl,kk) + zqsat=tlucua(tt)*zqp(jl) + zqsat=min(0.5,zqsat) + zcor=1./(1.-vtmpc1*zqsat) + zqsat=zqsat*zcor + zcond1=(pq(jl,kk)-zqsat)/(1.+zqsat*zcor*tlucub(tt)) + pt(jl,kk)=pt(jl,kk)+tlucuc(tt)*zcond1 + pq(jl,kk)=pq(jl,kk)-zcond1 + end if + end do + end if + if(kcall.eq.2) then + isum=0 + do jl=1,klon + zcond(jl)=0. + if(ldflag(jl)) then + tt=pt(jl,kk) + zqp(jl)=1./pp(jl) + zqsat=tlucua(tt)*zqp(jl) + zqsat=min(0.5,zqsat) + zcor=1./(1.-vtmpc1*zqsat) + zqsat=zqsat*zcor + zcond(jl)=(pq(jl,kk)-zqsat)/(1.+zqsat*zcor*tlucub(tt)) + zcond(jl)=min(zcond(jl),0.) + pt(jl,kk)=pt(jl,kk)+tlucuc(tt)*zcond(jl) + pq(jl,kk)=pq(jl,kk)-zcond(jl) + if(zcond(jl).ne.0.0) isum=isum+1 + end if + end do + + if(isum.eq.0) return + do jl=1,klon + if(ldflag(jl).and.zcond(jl).ne.0.) then + tt=pt(jl,kk) + zqsat=tlucua(tt)*zqp(jl) + zqsat=min(0.5,zqsat) + zcor=1./(1.-vtmpc1*zqsat) + zqsat=zqsat*zcor + zcond1=(pq(jl,kk)-zqsat)/(1.+zqsat*zcor*tlucub(tt)) + pt(jl,kk)=pt(jl,kk)+tlucuc(tt)*zcond1 + pq(jl,kk)=pq(jl,kk)-zcond1 + end if + end do + end if + if(kcall.eq.0) then + isum=0 + do jl=1,klon + tt=pt(jl,kk) + zqp(jl)=1./pp(jl) + zqsat=tlucua(tt)*zqp(jl) + zqsat=min(0.5,zqsat) + zcor=1./(1.-vtmpc1*zqsat) + zqsat=zqsat*zcor + zcond(jl)=(pq(jl,kk)-zqsat)/(1.+zqsat*zcor*tlucub(tt)) + pt(jl,kk)=pt(jl,kk)+tlucuc(tt)*zcond(jl) + pq(jl,kk)=pq(jl,kk)-zcond(jl) + if(zcond(jl).ne.0.0) isum=isum+1 + end do + + if(isum.eq.0) return + do jl=1,klon + tt=pt(jl,kk) + zqsat=tlucua(tt)*zqp(jl) + zqsat=min(0.5,zqsat) + zcor=1./(1.-vtmpc1*zqsat) + zqsat=zqsat*zcor + zcond1=(pq(jl,kk)-zqsat)/(1.+zqsat*zcor*tlucub(tt)) + pt(jl,kk)=pt(jl,kk)+tlucuc(tt)*zcond1 + pq(jl,kk)=pq(jl,kk)-zcond1 + end do + end if + if(kcall.eq.4) then + do jl=1,klon + tt=pt(jl,kk) + zqp(jl)=1./pp(jl) + zqsat=tlucua(tt)*zqp(jl) + zqsat=min(0.5,zqsat) + zcor=1./(1.-vtmpc1*zqsat) + zqsat=zqsat*zcor + zcond(jl)=(pq(jl,kk)-zqsat)/(1.+zqsat*zcor*tlucub(tt)) + pt(jl,kk)=pt(jl,kk)+tlucuc(tt)*zcond(jl) + pq(jl,kk)=pq(jl,kk)-zcond(jl) + end do + + do jl=1,klon + tt=pt(jl,kk) + zqsat=tlucua(tt)*zqp(jl) + zqsat=min(0.5,zqsat) + zcor=1./(1.-vtmpc1*zqsat) + zqsat=zqsat*zcor + zcond1=(pq(jl,kk)-zqsat)/(1.+zqsat*zcor*tlucub(tt)) + pt(jl,kk)=pt(jl,kk)+tlucuc(tt)*zcond1 + pq(jl,kk)=pq(jl,kk)-zcond1 + end do + end if + return + end subroutine cuadjtq ! !********************************************************** -! SUBROUTINE CUENTR_NEW +! subroutine cuentr_new !********************************************************** - SUBROUTINE CUENTR_NEW & - (KLON, KLEV, KLEVP1, KK, PTENH, & - PAPH, PAP, PGEOH, KLWMIN, LDCUM, & - KTYPE, KCBOT, KCTOP0, ZPBASE, PMFU, & - PENTR, ZDMFEN, ZDMFDE, ZODETR, KHMIN) -! M.TIEDTKE E.C.M.W.F. 12/89 -! Y.WANG IPRC 11/01 -!***PURPOSE. + subroutine cuentr_new & + (klon, klev, klevp1, kk, ptenh, & + paph, pap, pgeoh, klwmin, ldcum, & + ktype, kcbot, kctop0, zpbase, pmfu, & + pentr, zdmfen, zdmfde, zodetr, khmin) +! m.tiedtke e.c.m.w.f. 12/89 +! y.wang iprc 11/01 +!***purpose. ! -------- -! THIS ROUTINE CALCULATES ENTRAINMENT/DETRAINMENT RATES -! FOR UPDRAFTS IN CUMULUS PARAMETERIZATION -!***INTERFACE +! this routine calculates entrainment/detrainment rates +! for updrafts in cumulus parameterization +!***interface ! --------- -! THIS ROUTINE IS CALLED FROM *CUASC*. -! INPUT ARE ENVIRONMENTAL VALUES T,Q ETC -! AND UPDRAFT VALUES T,Q ETC -! IT RETURNS ENTRAINMENT/DETRAINMENT RATES -!***METHOD. +! this routine is called from *cuasc*. +! input are environmental values t,q etc +! and updraft values t,q etc +! it returns entrainment/detrainment rates +!***method. ! -------- -! S. TIEDTKE (1989), NORDENG(1996) -!***EXTERNALS +! s. tiedtke (1989), nordeng(1996) +!***externals ! --------- -! NONE +! none ! ---------------------------------------------------------------- !------------------------------------------------------------------- - IMPLICIT NONE + implicit none !------------------------------------------------------------------- - INTEGER KLON, KLEV, KLEVP1 - INTEGER KK, JL, IKLWMIN,IKB, IKT, IKH - REAL ZRRHO, ZDPRHO, ZPMID, ZENTR, ZZMZK, ZTMZK, ARG, ZORGDE - REAL PTENH(KLON,KLEV), & - PAP(KLON,KLEV), PAPH(KLON,KLEVP1), & - PMFU(KLON,KLEV), PGEOH(KLON,KLEV), & - PENTR(KLON), ZPBASE(KLON), & - ZDMFEN(KLON), ZDMFDE(KLON), & - ZODETR(KLON,KLEV) - INTEGER KLWMIN(KLON), KTYPE(KLON), & - KCBOT(KLON), KCTOP0(KLON), & - KHMIN(KLON) - LOGICAL LDCUM(KLON),LLO1,LLO2 - - real tt(klon),ttb(klon) - real zqsat(klon), zqsatb(klon) - real fscale(klon) + integer klon, klev, klevp1 + integer kk, jl, iklwmin,ikb, ikt, ikh + real zrrho, zdprho, zpmid, zentr, zzmzk, ztmzk, arg, zorgde + real ptenh(klon,klev), & + pap(klon,klev), paph(klon,klevp1), & + pmfu(klon,klev), pgeoh(klon,klev), & + pentr(klon), zpbase(klon), & + zdmfen(klon), zdmfde(klon), & + zodetr(klon,klev) + integer klwmin(klon), ktype(klon), & + kcbot(klon), kctop0(klon), & + khmin(klon) + logical ldcum(klon),llo1,llo2 !--------------------------------------------------------- -!* 1. CALCULATE ENTRAINMENT AND DETRAINMENT RATES +!* 1. calculate entrainment and detrainment rates !--------------------------------------------------------- -!* 1.1 SPECIFY ENTRAINMENT RATES FOR SHALLOW CLOUDS +!* 1.1 specify entrainment rates for shallow clouds !---------------------------------------------------------- -!* 1.2 SPECIFY ENTRAINMENT RATES FOR DEEP CLOUDS +!* 1.2 specify entrainment rates for deep clouds !------------------------------------------------------- - DO jl = 1, klon + do jl = 1, klon zpbase(jl) = paph(jl,kcbot(jl)) zrrho = (rd*ptenh(jl,kk+1))/paph(jl,kk+1) zdprho = (paph(jl,kk+1)-paph(jl,kk))*zrg -! old or new choice zpmid = 0.5*(zpbase(jl)+paph(jl,kctop0(jl))) zentr = pentr(jl)*pmfu(jl,kk+1)*zdprho*zrrho - llo1 = kk.LT.kcbot(jl).AND.ldcum(jl) -! old or new choice + llo1 = kk.lt.kcbot(jl).and.ldcum(jl) if(llo1) then - if(nturben.eq.1) zdmfde(jl) = zentr - if(nturben.eq.2) zdmfde(jl) = zentr*1.2 + zdmfde(jl) = zentr else - zdmfde(jl) = 0.0 + zdmfde(jl) = 0.0 endif -! old or new choice - if(nturben .eq. 1) then - fscale(jl) = 1.0 - elseif (nturben .eq. 2) then -! defining the facale - tt(jl) = ptenh(jl,kk+1) - zqsat(jl) = TLUCUA(tt(jl))/paph(jl,kk+1) - zqsat(jl) = zqsat(jl)/(1.-VTMPC1*zqsat(jl)) - ttb(jl) = ptenh(jl,kcbot(jl)) - zqsatb(jl) = TLUCUA(ttb(jl))/zpbase(jl) - zqsatb(jl) = zqsatb(jl)/(1.-VTMPC1*zqsatb(jl)) - fscale(jl) = 4.0*(zqsat(jl)/zqsatb(jl))**2 - end if -! end of defining the fscale - llo2 = llo1.AND.ktype(jl).EQ.2.AND.((zpbase(jl)-paph(jl,kk)) & - .LT.ZDNOPRC.OR.paph(jl,kk).GT.zpmid) + llo2 = llo1.and.ktype(jl).eq.2.and.((zpbase(jl)-paph(jl,kk)) & + .lt.zdnoprc.or.paph(jl,kk).gt.zpmid) if(llo2) then - zdmfen(jl) = zentr*fscale(jl) + zdmfen(jl) = zentr else zdmfen(jl) = 0.0 endif - iklwmin = MAX(klwmin(jl),kctop0(jl)+2) - llo2 = llo1.AND.ktype(jl).EQ.3.AND.(kk.GE.iklwmin.OR.pap(jl,kk) & - .GT.zpmid) - IF (llo2) zdmfen(jl) = zentr*fscale(jl) - llo2 = llo1.AND.ktype(jl).EQ.1 -! Turbulent entrainment - IF (llo2) zdmfen(jl) = zentr*fscale(jl) -! Organized detrainment, detrainment starts at khmin + iklwmin = max(klwmin(jl),kctop0(jl)+2) + llo2 = llo1.and.ktype(jl).eq.3.and.(kk.ge.iklwmin.or.pap(jl,kk) & + .gt.zpmid) + if (llo2) zdmfen(jl) = zentr + llo2 = llo1.and.ktype(jl).eq.1 +! turbulent entrainment + if (llo2) zdmfen(jl) = zentr +! organized detrainment, detrainment starts at khmin ikb = kcbot(jl) zodetr(jl,kk) = 0. - IF (llo2.AND.kk.LE.khmin(jl).AND.kk.GE.kctop0(jl)) THEN + if (llo2.and.kk.le.khmin(jl).and.kk.ge.kctop0(jl)) then ikt = kctop0(jl) ikh = khmin(jl) - IF (ikh.GT.ikt) THEN + if (ikh.gt.ikt) then zzmzk = -(pgeoh(jl,ikh)-pgeoh(jl,kk))*zrg ztmzk = -(pgeoh(jl,ikh)-pgeoh(jl,ikt))*zrg arg = 3.1415*(zzmzk/ztmzk)*0.5 - zorgde = TAN(arg)*3.1415*0.5/ztmzk + zorgde = tan(arg)*3.1415*0.5/ztmzk zdprho = (paph(jl,kk+1)-paph(jl,kk))*(zrg*zrrho) - zodetr(jl,kk) = MIN(zorgde,1.E-3)*pmfu(jl,kk+1)*zdprho - END IF - END IF - ENDDO + zodetr(jl,kk) = min(zorgde,1.e-3)*pmfu(jl,kk+1)*zdprho + end if + end if + enddo +! + return + end subroutine cuentr_new ! - RETURN - END SUBROUTINE CUENTR_NEW !********************************************************** -! FUNCTION SSUM, TLUCUA, TLUCUB, TLUCUC +! function ssum, tlucua, tlucub, tlucuc !********************************************************** - REAL FUNCTION SSUM ( N, X, IX ) -! -! COMPUTES SSUM = SUM OF [X(I)] -! FOR N ELEMENTS OF X WITH SKIP INCREMENT IX FOR VECTOR X + real function ssum ( n, x, ix ) ! - IMPLICIT NONE - REAL X(*) - REAL ZSUM - INTEGER N, IX, JX, JL +! computes ssum = sum of [x(i)] +! for n elements of x with skip increment ix for vector x ! - JX = 1 - ZSUM = 0.0 - DO JL = 1, N - ZSUM = ZSUM + X(JX) - JX = JX + IX + implicit none + real x(*) + real zsum + integer n, ix, jx, jl +! + jx = 1 + zsum = 0.0 + do jl = 1, n + zsum = zsum + x(jx) + jx = jx + ix enddo ! - SSUM=ZSUM + ssum=zsum ! - RETURN - END FUNCTION SSUM + return + end function ssum - REAL FUNCTION TLUCUA(TT) -! -! Set up lookup tables for cloud ascent calculations. + real function tlucua(tt) ! - IMPLICIT NONE - REAL ZCVM3,ZCVM4,TT +! set up lookup tables for cloud ascent calculations. ! - IF(TT-TMELT.GT.0.) THEN - ZCVM3=C3LES - ZCVM4=C4LES - ELSE - ZCVM3=C3IES - ZCVM4=C4IES - END IF - TLUCUA=C2ES*EXP(ZCVM3*(TT-TMELT)*(1./(TT-ZCVM4))) -! - RETURN - END FUNCTION TLUCUA -! - REAL FUNCTION TLUCUB(TT) + implicit none + real zcvm3,zcvm4,tt !,tlucua +! + if(tt-tmelt.gt.0.) then + zcvm3=c3les + zcvm4=c4les + else + zcvm3=c3ies + zcvm4=c4ies + end if + tlucua=c2es*exp(zcvm3*(tt-tmelt)*(1./(tt-zcvm4))) ! -! Set up lookup tables for cloud ascent calculations. + return + end function tlucua ! - IMPLICIT NONE - REAL Z5ALVCP,Z5ALSCP,ZCVM4,ZCVM5,TT + real function tlucub(tt) ! - Z5ALVCP=C5LES*ALV/CPD - Z5ALSCP=C5IES*ALS/CPD - IF(TT-TMELT.GT.0.) THEN - ZCVM4=C4LES - ZCVM5=Z5ALVCP - ELSE - ZCVM4=C4IES - ZCVM5=Z5ALSCP - END IF - TLUCUB=ZCVM5*(1./(TT-ZCVM4))**2 +! set up lookup tables for cloud ascent calculations. ! - RETURN - END FUNCTION TLUCUB + implicit none + real z5alvcp,z5alscp,zcvm4,zcvm5,tt !,tlucub +! + z5alvcp=c5les*alv/cpd + z5alscp=c5ies*als/cpd + if(tt-tmelt.gt.0.) then + zcvm4=c4les + zcvm5=z5alvcp + else + zcvm4=c4ies + zcvm5=z5alscp + end if + tlucub=zcvm5*(1./(tt-zcvm4))**2 ! - REAL FUNCTION TLUCUC(TT) + return + end function tlucub ! -! Set up lookup tables for cloud ascent calculations. + real function tlucuc(tt) ! - IMPLICIT NONE - REAL ZALVDCP,ZALSDCP,TT,ZLDCP +! set up lookup tables for cloud ascent calculations. ! - ZALVDCP=ALV/CPD - ZALSDCP=ALS/CPD - IF(TT-TMELT.GT.0.) THEN - ZLDCP=ZALVDCP - ELSE - ZLDCP=ZALSDCP - END IF - TLUCUC=ZLDCP + implicit none + real zalvdcp,zalsdcp,tt,zldcp !,tlucuc +! + zalvdcp=alv/cpd + zalsdcp=als/cpd + if(tt-tmelt.gt.0.) then + zldcp=zalvdcp + else + zldcp=zalsdcp + end if + tlucuc=zldcp ! - RETURN - END FUNCTION TLUCUC + return + end function tlucuc ! -END MODULE module_cu_tiedtke +end module module_cu_tiedtke + diff --git a/src/core_atmosphere/physics/physics_wrf/module_mp_radar.F b/src/core_atmosphere/physics/physics_wrf/module_mp_radar.F index b4e6ee13dd..d68951187f 100644 --- a/src/core_atmosphere/physics/physics_wrf/module_mp_radar.F +++ b/src/core_atmosphere/physics/physics_wrf/module_mp_radar.F @@ -23,6 +23,7 @@ MODULE module_mp_radar #if defined(mpas) + USE mpas_atmphys_functions USE mpas_atmphys_utilities #else USE module_wrf_error @@ -35,9 +36,13 @@ MODULE module_mp_radar PRIVATE :: m_complex_maxwellgarnett PRIVATE :: get_m_mix_nested PRIVATE :: get_m_mix +#if defined(mpas) + PUBLIC :: WGAMMA + PUBLIC :: GAMMLN +#else PRIVATE :: WGAMMA PRIVATE :: GAMMLN - +#endif INTEGER, PARAMETER, PUBLIC:: nrbins = 50 DOUBLE PRECISION, DIMENSION(nrbins+1), PUBLIC:: xxDx @@ -645,41 +650,6 @@ COMPLEX*16 FUNCTION m_complex_maxwellgarnett(vol1, vol2, vol3, & END FUNCTION m_complex_maxwellgarnett -!+---+-----------------------------------------------------------------+ - REAL FUNCTION GAMMLN(XX) -! --- RETURNS THE VALUE LN(GAMMA(XX)) FOR XX > 0. - IMPLICIT NONE - REAL, INTENT(IN):: XX - DOUBLE PRECISION, PARAMETER:: STP = 2.5066282746310005D0 - DOUBLE PRECISION, DIMENSION(6), PARAMETER:: & - COF = (/76.18009172947146D0, -86.50532032941677D0, & - 24.01409824083091D0, -1.231739572450155D0, & - .1208650973866179D-2, -.5395239384953D-5/) - DOUBLE PRECISION:: SER,TMP,X,Y - INTEGER:: J - - X=XX - Y=X - TMP=X+5.5D0 - TMP=(X+0.5D0)*LOG(TMP)-TMP - SER=1.000000000190015D0 - DO 11 J=1,6 - Y=Y+1.D0 - SER=SER+COF(J)/Y -11 CONTINUE - GAMMLN=TMP+LOG(STP*SER/X) - END FUNCTION GAMMLN -! (C) Copr. 1986-92 Numerical Recipes Software 2.02 -!+---+-----------------------------------------------------------------+ - REAL FUNCTION WGAMMA(y) - - IMPLICIT NONE - REAL, INTENT(IN):: y - - WGAMMA = EXP(GAMMLN(y)) - - END FUNCTION WGAMMA - !+---+-----------------------------------------------------------------+ END MODULE module_mp_radar !+---+-----------------------------------------------------------------+ diff --git a/src/core_atmosphere/physics/physics_wrf/module_mp_thompson.F b/src/core_atmosphere/physics/physics_wrf/module_mp_thompson.F new file mode 100644 index 0000000000..f027f56d14 --- /dev/null +++ b/src/core_atmosphere/physics/physics_wrf/module_mp_thompson.F @@ -0,0 +1,5120 @@ +!================================================================================================================= +!module_mp_thompson was originally copied from./phys/module_mp_thompson.F from WRF version 3.8. Modifications made +!to the original sourcecode are mostly confined to subroutine thompson_init. +!Laura D. Fowler (laura@ucar.edu) / 2016-06-04. + +!modifications to sourcecode for MPAS: +! * changed all the lines #if ( WRF_CHEM == 1 ) with #if defined(mpas). +! * commented out all the lines CALL wrf_debug(..., mp_debug). +! Laura D. Fowler (laura@ucar.edu) / 2016-06-04. +! * in subroutine mp_thompson, added corrections made by Greg for WRF version 3.8.1. +! Laura D. Fowler (laura@ucar.edu) / 2016-10-29. +! * in subroutine mp_gt_driver, moved the initialization of variables Nt_c and mu_c +! before initialization of local mixing ratios and number concentrations. +! Laura D. Fowler (lara@ucar.edu) / 2916-12-30. + + +!+---+-----------------------------------------------------------------+ +!.. This subroutine computes the moisture tendencies of water vapor, +!.. cloud droplets, rain, cloud ice (pristine), snow, and graupel. +!.. Prior to WRFv2.2 this code was based on Reisner et al (1998), but +!.. few of those pieces remain. A complete description is now found in +!.. Thompson, G., P. R. Field, R. M. Rasmussen, and W. D. Hall, 2008: +!.. Explicit Forecasts of winter precipitation using an improved bulk +!.. microphysics scheme. Part II: Implementation of a new snow +!.. parameterization. Mon. Wea. Rev., 136, 5095-5115. +!.. Prior to WRFv3.1, this code was single-moment rain prediction as +!.. described in the reference above, but in v3.1 and higher, the +!.. scheme is two-moment rain (predicted rain number concentration). +!.. +!.. Beginning with WRFv3.6, this is also the "aerosol-aware" scheme as +!.. described in Thompson, G. and T. Eidhammer, 2014: A study of +!.. aerosol impacts on clouds and precipitation development in a large +!.. winter cyclone. J. Atmos. Sci., 71, 3636-3658. Setting WRF +!.. namelist option mp_physics=8 utilizes the older one-moment cloud +!.. water with constant droplet concentration set as Nt_c (found below) +!.. while mp_physics=28 uses double-moment cloud droplet number +!.. concentration, which is not permitted to exceed Nt_c_max below. +!.. +!.. Most importantly, users may wish to modify the prescribed number of +!.. cloud droplets (Nt_c; see guidelines mentioned below). Otherwise, +!.. users may alter the rain and graupel size distribution parameters +!.. to use exponential (Marshal-Palmer) or generalized gamma shape. +!.. The snow field assumes a combination of two gamma functions (from +!.. Field et al. 2005) and would require significant modifications +!.. throughout the entire code to alter its shape as well as accretion +!.. rates. Users may also alter the constants used for density of rain, +!.. graupel, ice, and snow, but the latter is not constant when using +!.. Paul Field's snow distribution and moments methods. Other values +!.. users can modify include the constants for mass and/or velocity +!.. power law relations and assumed capacitances used in deposition/ +!.. sublimation/evaporation/melting. +!.. Remaining values should probably be left alone. +!.. +!..Author: Greg Thompson, NCAR-RAL, gthompsn@ucar.edu, 303-497-2805 +!..Last modified: 01 Aug 2016 Aerosol additions to v3.5.1 code 9/2013 +!.. Cloud fraction additions 11/2014 part of pre-v3.7 +!+---+-----------------------------------------------------------------+ +!wrft:model_layer:physics +!+---+-----------------------------------------------------------------+ +! + MODULE module_mp_thompson + + use mpas_kind_types + use mpas_atmphys_functions + use mpas_atmphys_utilities + use module_mp_radar + + implicit none + logical, parameter, private:: iiwarm = .false. + logical, public:: is_aerosol_aware = .false. + + LOGICAL, PARAMETER, PRIVATE:: dustyIce = .true. + LOGICAL, PARAMETER, PRIVATE:: homogIce = .true. + + INTEGER, PARAMETER, PRIVATE:: IFDRY = 0 + REAL, PARAMETER, PRIVATE:: T_0 = 273.15 + REAL, PARAMETER, PRIVATE:: PI = 3.1415926536 + +!..Densities of rain, snow, graupel, and cloud ice. + REAL, PARAMETER, PRIVATE:: rho_w = 1000.0 + REAL, PARAMETER, PRIVATE:: rho_s = 100.0 + REAL, PARAMETER, PRIVATE:: rho_g = 500.0 + REAL, PARAMETER, PRIVATE:: rho_i = 890.0 + +!..Prescribed number of cloud droplets. Set according to known data or +!.. roughly 100 per cc (100.E6 m^-3) for Maritime cases and +!.. 300 per cc (300.E6 m^-3) for Continental. Gamma shape parameter, +!.. mu_c, calculated based on Nt_c is important in autoconversion +!.. scheme. In 2-moment cloud water, Nt_c represents a maximum of +!.. droplet concentration and nu_c is also variable depending on local +!.. droplet number concentration. +! REAL, PARAMETER, PRIVATE:: Nt_c = 100.E6 + REAL, PARAMETER, PRIVATE:: Nt_c_max = 1999.E6 + REAL, PRIVATE:: Nt_c + +!..Declaration of constants for assumed CCN/IN aerosols when none in +!.. the input data. Look inside the init routine for modifications +!.. due to surface land-sea points or vegetation characteristics. + REAL, PARAMETER, PRIVATE:: naIN0 = 1.5E6 + REAL, PARAMETER, PRIVATE:: naIN1 = 0.5E6 + REAL, PARAMETER, PRIVATE:: naCCN0 = 300.0E6 + REAL, PARAMETER, PRIVATE:: naCCN1 = 50.0E6 + +!..Generalized gamma distributions for rain, graupel and cloud ice. +!.. N(D) = N_0 * D**mu * exp(-lamda*D); mu=0 is exponential. + REAL, PARAMETER, PRIVATE:: mu_r = 0.0 + REAL, PARAMETER, PRIVATE:: mu_g = 0.0 + REAL, PARAMETER, PRIVATE:: mu_i = 0.0 + REAL, PRIVATE:: mu_c + +!..Sum of two gamma distrib for snow (Field et al. 2005). +!.. N(D) = M2**4/M3**3 * [Kap0*exp(-M2*Lam0*D/M3) +!.. + Kap1*(M2/M3)**mu_s * D**mu_s * exp(-M2*Lam1*D/M3)] +!.. M2 and M3 are the (bm_s)th and (bm_s+1)th moments respectively +!.. calculated as function of ice water content and temperature. + REAL, PARAMETER, PRIVATE:: mu_s = 0.6357 + REAL, PARAMETER, PRIVATE:: Kap0 = 490.6 + REAL, PARAMETER, PRIVATE:: Kap1 = 17.46 + REAL, PARAMETER, PRIVATE:: Lam0 = 20.78 + REAL, PARAMETER, PRIVATE:: Lam1 = 3.29 + +!..Y-intercept parameter for graupel is not constant and depends on +!.. mixing ratio. Also, when mu_g is non-zero, these become equiv +!.. y-intercept for an exponential distrib and proper values are +!.. computed based on same mixing ratio and total number concentration. + REAL, PARAMETER, PRIVATE:: gonv_min = 1.E4 + REAL, PARAMETER, PRIVATE:: gonv_max = 3.E6 + +!..Mass power law relations: mass = am*D**bm +!.. Snow from Field et al. (2005), others assume spherical form. + REAL, PARAMETER, PRIVATE:: am_r = PI*rho_w/6.0 + REAL, PARAMETER, PRIVATE:: bm_r = 3.0 + REAL, PARAMETER, PRIVATE:: am_s = 0.069 + REAL, PARAMETER, PRIVATE:: bm_s = 2.0 + REAL, PARAMETER, PRIVATE:: am_g = PI*rho_g/6.0 + REAL, PARAMETER, PRIVATE:: bm_g = 3.0 + REAL, PARAMETER, PRIVATE:: am_i = PI*rho_i/6.0 + REAL, PARAMETER, PRIVATE:: bm_i = 3.0 + +!..Fallspeed power laws relations: v = (av*D**bv)*exp(-fv*D) +!.. Rain from Ferrier (1994), ice, snow, and graupel from +!.. Thompson et al (2008). Coefficient fv is zero for graupel/ice. + REAL, PARAMETER, PRIVATE:: av_r = 4854.0 + REAL, PARAMETER, PRIVATE:: bv_r = 1.0 + REAL, PARAMETER, PRIVATE:: fv_r = 195.0 + REAL, PARAMETER, PRIVATE:: av_s = 40.0 + REAL, PARAMETER, PRIVATE:: bv_s = 0.55 + REAL, PARAMETER, PRIVATE:: fv_s = 100.0 + REAL, PARAMETER, PRIVATE:: av_g = 442.0 + REAL, PARAMETER, PRIVATE:: bv_g = 0.89 + REAL, PARAMETER, PRIVATE:: av_i = 1847.5 + REAL, PARAMETER, PRIVATE:: bv_i = 1.0 + REAL, PARAMETER, PRIVATE:: av_c = 0.316946E8 + REAL, PARAMETER, PRIVATE:: bv_c = 2.0 + +!..Capacitance of sphere and plates/aggregates: D**3, D**2 + REAL, PARAMETER, PRIVATE:: C_cube = 0.5 + REAL, PARAMETER, PRIVATE:: C_sqrd = 0.15 + +!..Collection efficiencies. Rain/snow/graupel collection of cloud +!.. droplets use variables (Ef_rw, Ef_sw, Ef_gw respectively) and +!.. get computed elsewhere because they are dependent on stokes +!.. number. + REAL, PARAMETER, PRIVATE:: Ef_si = 0.05 + REAL, PARAMETER, PRIVATE:: Ef_rs = 0.95 + REAL, PARAMETER, PRIVATE:: Ef_rg = 0.75 + REAL, PARAMETER, PRIVATE:: Ef_ri = 0.95 + +!..Minimum microphys values +!.. R1 value, 1.E-12, cannot be set lower because of numerical +!.. problems with Paul Field's moments and should not be set larger +!.. because of truncation problems in snow/ice growth. + REAL, PARAMETER, PRIVATE:: R1 = 1.E-12 + REAL, PARAMETER, PRIVATE:: R2 = 1.E-6 + REAL, PARAMETER, PRIVATE:: eps = 1.E-15 + +!..Constants in Cooper curve relation for cloud ice number. + REAL, PARAMETER, PRIVATE:: TNO = 5.0 + REAL, PARAMETER, PRIVATE:: ATO = 0.304 + +!..Rho_not used in fallspeed relations (rho_not/rho)**.5 adjustment. + REAL, PARAMETER, PRIVATE:: rho_not = 101325.0/(287.05*298.0) + +!..Schmidt number + REAL, PARAMETER, PRIVATE:: Sc = 0.632 + REAL, PRIVATE:: Sc3 + +!..Homogeneous freezing temperature + REAL, PARAMETER, PRIVATE:: HGFR = 235.16 + +!..Water vapor and air gas constants at constant pressure + REAL, PARAMETER, PRIVATE:: Rv = 461.5 + REAL, PARAMETER, PRIVATE:: oRv = 1./Rv + REAL, PARAMETER, PRIVATE:: R = 287.04 + REAL, PARAMETER, PRIVATE:: Cp = 1004.0 + REAL, PARAMETER, PRIVATE:: R_uni = 8.314 ! J (mol K)-1 + + DOUBLE PRECISION, PARAMETER, PRIVATE:: k_b = 1.38065E-23 ! Boltzmann constant [J/K] + DOUBLE PRECISION, PARAMETER, PRIVATE:: M_w = 18.01528E-3 ! molecular mass of water [kg/mol] + DOUBLE PRECISION, PARAMETER, PRIVATE:: M_a = 28.96E-3 ! molecular mass of air [kg/mol] + DOUBLE PRECISION, PARAMETER, PRIVATE:: N_avo = 6.022E23 ! Avogadro number [1/mol] + DOUBLE PRECISION, PARAMETER, PRIVATE:: ma_w = M_w / N_avo ! mass of water molecule [kg] + REAL, PARAMETER, PRIVATE:: ar_volume = 4./3.*PI*(2.5e-6)**3 ! assume radius of 0.025 micrometer, 2.5e-6 cm + +!..Enthalpy of sublimation, vaporization, and fusion at 0C. + REAL, PARAMETER, PRIVATE:: lsub = 2.834E6 + REAL, PARAMETER, PRIVATE:: lvap0 = 2.5E6 + REAL, PARAMETER, PRIVATE:: lfus = lsub - lvap0 + REAL, PARAMETER, PRIVATE:: olfus = 1./lfus + +!..Ice initiates with this mass (kg), corresponding diameter calc. +!..Min diameters and mass of cloud, rain, snow, and graupel (m, kg). + REAL, PARAMETER, PRIVATE:: xm0i = 1.E-12 + REAL, PARAMETER, PRIVATE:: D0c = 1.E-6 + REAL, PARAMETER, PRIVATE:: D0r = 50.E-6 + REAL, PARAMETER, PRIVATE:: D0s = 200.E-6 + REAL, PARAMETER, PRIVATE:: D0g = 250.E-6 + REAL, PRIVATE:: D0i, xm0s, xm0g + +!..Lookup table dimensions + INTEGER, PARAMETER, PRIVATE:: nbins = 100 + INTEGER, PARAMETER, PRIVATE:: nbc = nbins + INTEGER, PARAMETER, PRIVATE:: nbi = nbins + INTEGER, PARAMETER, PRIVATE:: nbr = nbins + INTEGER, PARAMETER, PRIVATE:: nbs = nbins + INTEGER, PARAMETER, PRIVATE:: nbg = nbins + INTEGER, PARAMETER, PRIVATE:: ntb_c = 37 + INTEGER, PARAMETER, PRIVATE:: ntb_i = 64 + INTEGER, PARAMETER, PRIVATE:: ntb_r = 37 + INTEGER, PARAMETER, PRIVATE:: ntb_s = 28 + INTEGER, PARAMETER, PRIVATE:: ntb_g = 28 + INTEGER, PARAMETER, PRIVATE:: ntb_g1 = 28 + INTEGER, PARAMETER, PRIVATE:: ntb_r1 = 37 + INTEGER, PARAMETER, PRIVATE:: ntb_i1 = 55 + INTEGER, PARAMETER, PRIVATE:: ntb_t = 9 + INTEGER, PRIVATE:: nic1, nic2, nii2, nii3, nir2, nir3, nis2, nig2, nig3 + INTEGER, PARAMETER, PRIVATE:: ntb_arc = 7 + INTEGER, PARAMETER, PRIVATE:: ntb_arw = 9 + INTEGER, PARAMETER, PRIVATE:: ntb_art = 7 + INTEGER, PARAMETER, PRIVATE:: ntb_arr = 5 + INTEGER, PARAMETER, PRIVATE:: ntb_ark = 4 + INTEGER, PARAMETER, PRIVATE:: ntb_IN = 55 + INTEGER, PRIVATE:: niIN2 + + DOUBLE PRECISION, DIMENSION(nbins+1):: xDx + DOUBLE PRECISION, DIMENSION(nbc):: Dc, dtc + DOUBLE PRECISION, DIMENSION(nbi):: Di, dti + DOUBLE PRECISION, DIMENSION(nbr):: Dr, dtr + DOUBLE PRECISION, DIMENSION(nbs):: Ds, dts + DOUBLE PRECISION, DIMENSION(nbg):: Dg, dtg + DOUBLE PRECISION, DIMENSION(nbc):: t_Nc + +!..Lookup tables for cloud water content (kg/m**3). + REAL, DIMENSION(ntb_c), PARAMETER, PRIVATE:: & + r_c = (/1.e-6,2.e-6,3.e-6,4.e-6,5.e-6,6.e-6,7.e-6,8.e-6,9.e-6, & + 1.e-5,2.e-5,3.e-5,4.e-5,5.e-5,6.e-5,7.e-5,8.e-5,9.e-5, & + 1.e-4,2.e-4,3.e-4,4.e-4,5.e-4,6.e-4,7.e-4,8.e-4,9.e-4, & + 1.e-3,2.e-3,3.e-3,4.e-3,5.e-3,6.e-3,7.e-3,8.e-3,9.e-3, & + 1.e-2/) + +!..Lookup tables for cloud ice content (kg/m**3). + REAL, DIMENSION(ntb_i), PARAMETER, PRIVATE:: & + r_i = (/1.e-10,2.e-10,3.e-10,4.e-10, & + 5.e-10,6.e-10,7.e-10,8.e-10,9.e-10, & + 1.e-9,2.e-9,3.e-9,4.e-9,5.e-9,6.e-9,7.e-9,8.e-9,9.e-9, & + 1.e-8,2.e-8,3.e-8,4.e-8,5.e-8,6.e-8,7.e-8,8.e-8,9.e-8, & + 1.e-7,2.e-7,3.e-7,4.e-7,5.e-7,6.e-7,7.e-7,8.e-7,9.e-7, & + 1.e-6,2.e-6,3.e-6,4.e-6,5.e-6,6.e-6,7.e-6,8.e-6,9.e-6, & + 1.e-5,2.e-5,3.e-5,4.e-5,5.e-5,6.e-5,7.e-5,8.e-5,9.e-5, & + 1.e-4,2.e-4,3.e-4,4.e-4,5.e-4,6.e-4,7.e-4,8.e-4,9.e-4, & + 1.e-3/) + +!..Lookup tables for rain content (kg/m**3). + REAL, DIMENSION(ntb_r), PARAMETER, PRIVATE:: & + r_r = (/1.e-6,2.e-6,3.e-6,4.e-6,5.e-6,6.e-6,7.e-6,8.e-6,9.e-6, & + 1.e-5,2.e-5,3.e-5,4.e-5,5.e-5,6.e-5,7.e-5,8.e-5,9.e-5, & + 1.e-4,2.e-4,3.e-4,4.e-4,5.e-4,6.e-4,7.e-4,8.e-4,9.e-4, & + 1.e-3,2.e-3,3.e-3,4.e-3,5.e-3,6.e-3,7.e-3,8.e-3,9.e-3, & + 1.e-2/) + +!..Lookup tables for graupel content (kg/m**3). + REAL, DIMENSION(ntb_g), PARAMETER, PRIVATE:: & + r_g = (/1.e-5,2.e-5,3.e-5,4.e-5,5.e-5,6.e-5,7.e-5,8.e-5,9.e-5, & + 1.e-4,2.e-4,3.e-4,4.e-4,5.e-4,6.e-4,7.e-4,8.e-4,9.e-4, & + 1.e-3,2.e-3,3.e-3,4.e-3,5.e-3,6.e-3,7.e-3,8.e-3,9.e-3, & + 1.e-2/) + +!..Lookup tables for snow content (kg/m**3). + REAL, DIMENSION(ntb_s), PARAMETER, PRIVATE:: & + r_s = (/1.e-5,2.e-5,3.e-5,4.e-5,5.e-5,6.e-5,7.e-5,8.e-5,9.e-5, & + 1.e-4,2.e-4,3.e-4,4.e-4,5.e-4,6.e-4,7.e-4,8.e-4,9.e-4, & + 1.e-3,2.e-3,3.e-3,4.e-3,5.e-3,6.e-3,7.e-3,8.e-3,9.e-3, & + 1.e-2/) + +!..Lookup tables for rain y-intercept parameter (/m**4). + REAL, DIMENSION(ntb_r1), PARAMETER, PRIVATE:: & + N0r_exp = (/1.e6,2.e6,3.e6,4.e6,5.e6,6.e6,7.e6,8.e6,9.e6, & + 1.e7,2.e7,3.e7,4.e7,5.e7,6.e7,7.e7,8.e7,9.e7, & + 1.e8,2.e8,3.e8,4.e8,5.e8,6.e8,7.e8,8.e8,9.e8, & + 1.e9,2.e9,3.e9,4.e9,5.e9,6.e9,7.e9,8.e9,9.e9, & + 1.e10/) + +!..Lookup tables for graupel y-intercept parameter (/m**4). + REAL, DIMENSION(ntb_g1), PARAMETER, PRIVATE:: & + N0g_exp = (/1.e4,2.e4,3.e4,4.e4,5.e4,6.e4,7.e4,8.e4,9.e4, & + 1.e5,2.e5,3.e5,4.e5,5.e5,6.e5,7.e5,8.e5,9.e5, & + 1.e6,2.e6,3.e6,4.e6,5.e6,6.e6,7.e6,8.e6,9.e6, & + 1.e7/) + +!..Lookup tables for ice number concentration (/m**3). + REAL, DIMENSION(ntb_i1), PARAMETER, PRIVATE:: & + Nt_i = (/1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0, & + 1.e1,2.e1,3.e1,4.e1,5.e1,6.e1,7.e1,8.e1,9.e1, & + 1.e2,2.e2,3.e2,4.e2,5.e2,6.e2,7.e2,8.e2,9.e2, & + 1.e3,2.e3,3.e3,4.e3,5.e3,6.e3,7.e3,8.e3,9.e3, & + 1.e4,2.e4,3.e4,4.e4,5.e4,6.e4,7.e4,8.e4,9.e4, & + 1.e5,2.e5,3.e5,4.e5,5.e5,6.e5,7.e5,8.e5,9.e5, & + 1.e6/) + +!..Aerosol table parameter: Number of available aerosols, vertical +!.. velocity, temperature, aerosol mean radius, and hygroscopicity. + REAL, DIMENSION(ntb_arc), PARAMETER, PRIVATE:: & + ta_Na = (/10.0, 31.6, 100.0, 316.0, 1000.0, 3160.0, 10000.0/) + REAL, DIMENSION(ntb_arw), PARAMETER, PRIVATE:: & + ta_Ww = (/0.01, 0.0316, 0.1, 0.316, 1.0, 3.16, 10.0, 31.6, 100.0/) + REAL, DIMENSION(ntb_art), PARAMETER, PRIVATE:: & + ta_Tk = (/243.15, 253.15, 263.15, 273.15, 283.15, 293.15, 303.15/) + REAL, DIMENSION(ntb_arr), PARAMETER, PRIVATE:: & + ta_Ra = (/0.01, 0.02, 0.04, 0.08, 0.16/) + REAL, DIMENSION(ntb_ark), PARAMETER, PRIVATE:: & + ta_Ka = (/0.2, 0.4, 0.6, 0.8/) + +!..Lookup tables for IN concentration (/m**3) from 0.001 to 1000/Liter. + REAL, DIMENSION(ntb_IN), PARAMETER, PRIVATE:: & + Nt_IN = (/1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0, & + 1.e1,2.e1,3.e1,4.e1,5.e1,6.e1,7.e1,8.e1,9.e1, & + 1.e2,2.e2,3.e2,4.e2,5.e2,6.e2,7.e2,8.e2,9.e2, & + 1.e3,2.e3,3.e3,4.e3,5.e3,6.e3,7.e3,8.e3,9.e3, & + 1.e4,2.e4,3.e4,4.e4,5.e4,6.e4,7.e4,8.e4,9.e4, & + 1.e5,2.e5,3.e5,4.e5,5.e5,6.e5,7.e5,8.e5,9.e5, & + 1.e6/) + +!..For snow moments conversions (from Field et al. 2005) + REAL, DIMENSION(10), PARAMETER, PRIVATE:: & + sa = (/ 5.065339, -0.062659, -3.032362, 0.029469, -0.000285, & + 0.31255, 0.000204, 0.003199, 0.0, -0.015952/) + REAL, DIMENSION(10), PARAMETER, PRIVATE:: & + sb = (/ 0.476221, -0.015896, 0.165977, 0.007468, -0.000141, & + 0.060366, 0.000079, 0.000594, 0.0, -0.003577/) + +!..Temperatures (5 C interval 0 to -40) used in lookup tables. + REAL, DIMENSION(ntb_t), PARAMETER, PRIVATE:: & + Tc = (/-0.01, -5., -10., -15., -20., -25., -30., -35., -40./) + +!..Lookup tables for various accretion/collection terms. +!.. ntb_x refers to the number of elements for rain, snow, graupel, +!.. and temperature array indices. Variables beginning with t-p/c/m/n +!.. represent lookup tables. Save compile-time memory by making +!.. allocatable (2009Jun12, J. Michalakes). +! INTEGER, PARAMETER, PRIVATE:: R8SIZE = 8 +! INTEGER, PARAMETER, PRIVATE:: R4SIZE = 4 + INTEGER, PARAMETER, PRIVATE:: R8SIZE = R8KIND + INTEGER, PARAMETER, PRIVATE:: R4SIZE = R4KIND + REAL (KIND=R8SIZE), ALLOCATABLE, DIMENSION(:,:,:,:):: & + tcg_racg, tmr_racg, tcr_gacr, tmg_gacr, & + tnr_racg, tnr_gacr + REAL (KIND=R8SIZE), ALLOCATABLE, DIMENSION(:,:,:,:):: & + tcs_racs1, tmr_racs1, tcs_racs2, tmr_racs2, & + tcr_sacr1, tms_sacr1, tcr_sacr2, tms_sacr2, & + tnr_racs1, tnr_racs2, tnr_sacr1, tnr_sacr2 + REAL (KIND=R8SIZE), ALLOCATABLE, DIMENSION(:,:,:,:):: & + tpi_qcfz, tni_qcfz + REAL (KIND=R8SIZE), ALLOCATABLE, DIMENSION(:,:,:,:):: & + tpi_qrfz, tpg_qrfz, tni_qrfz, tnr_qrfz + REAL (KIND=R8SIZE), ALLOCATABLE, DIMENSION(:,:):: & + tps_iaus, tni_iaus, tpi_ide + REAL (KIND=R8SIZE), ALLOCATABLE, DIMENSION(:,:):: t_Efrw + REAL (KIND=R8SIZE), ALLOCATABLE, DIMENSION(:,:):: t_Efsw + REAL (KIND=R8SIZE), ALLOCATABLE, DIMENSION(:,:,:):: tnr_rev + REAL (KIND=R8SIZE), ALLOCATABLE, DIMENSION(:,:,:):: & + tpc_wev, tnc_wev + REAL (KIND=R4SIZE), ALLOCATABLE, DIMENSION(:,:,:,:,:):: tnccn_act + +!..Variables holding a bunch of exponents and gamma values (cloud water, +!.. cloud ice, rain, snow, then graupel). + REAL, DIMENSION(5,15), PRIVATE:: cce, ccg + REAL, DIMENSION(15), PRIVATE:: ocg1, ocg2 + REAL, DIMENSION(7), PRIVATE:: cie, cig + REAL, PRIVATE:: oig1, oig2, obmi + REAL, DIMENSION(13), PRIVATE:: cre, crg + REAL, PRIVATE:: ore1, org1, org2, org3, obmr + REAL, DIMENSION(18), PRIVATE:: cse, csg + REAL, PRIVATE:: oams, obms, ocms + REAL, DIMENSION(12), PRIVATE:: cge, cgg + REAL, PRIVATE:: oge1, ogg1, ogg2, ogg3, oamg, obmg, ocmg + +!..Declaration of precomputed constants in various rate eqns. + REAL:: t1_qr_qc, t1_qr_qi, t2_qr_qi, t1_qg_qc, t1_qs_qc, t1_qs_qi + REAL:: t1_qr_ev, t2_qr_ev + REAL:: t1_qs_sd, t2_qs_sd, t1_qg_sd, t2_qg_sd + REAL:: t1_qs_me, t2_qs_me, t1_qg_me, t2_qg_me + +!+---+ +!+---+-----------------------------------------------------------------+ +!..END DECLARATIONS +!+---+-----------------------------------------------------------------+ +!+---+ +!ctrlL + + CONTAINS + +!================================================================================================================= + subroutine thompson_init(l_mp_tables) + implicit none +!================================================================================================================= + +!input arguments: + logical,intent(in):: l_mp_tables + + integer,parameter:: open_OK = 0 + integer:: i,j,k,l,m,n + integer:: istat + logical:: micro_init + +!..Allocate space for lookup tables (J. Michalakes 2009Jun08). + micro_init = .FALSE. + +!================================================================================================================= + +!..Allocate space for lookup tables (J. Michalakes 2009Jun08). + + if (.NOT. ALLOCATED(tcg_racg) ) then + ALLOCATE(tcg_racg(ntb_g1,ntb_g,ntb_r1,ntb_r)) + micro_init = .TRUE. + endif + + if (.NOT. ALLOCATED(tmr_racg)) ALLOCATE(tmr_racg(ntb_g1,ntb_g,ntb_r1,ntb_r)) + if (.NOT. ALLOCATED(tcr_gacr)) ALLOCATE(tcr_gacr(ntb_g1,ntb_g,ntb_r1,ntb_r)) + if (.NOT. ALLOCATED(tmg_gacr)) ALLOCATE(tmg_gacr(ntb_g1,ntb_g,ntb_r1,ntb_r)) + if (.NOT. ALLOCATED(tnr_racg)) ALLOCATE(tnr_racg(ntb_g1,ntb_g,ntb_r1,ntb_r)) + if (.NOT. ALLOCATED(tnr_gacr)) ALLOCATE(tnr_gacr(ntb_g1,ntb_g,ntb_r1,ntb_r)) + + if (.NOT. ALLOCATED(tcs_racs1)) ALLOCATE(tcs_racs1(ntb_s,ntb_t,ntb_r1,ntb_r)) + if (.NOT. ALLOCATED(tmr_racs1)) ALLOCATE(tmr_racs1(ntb_s,ntb_t,ntb_r1,ntb_r)) + if (.NOT. ALLOCATED(tcs_racs2)) ALLOCATE(tcs_racs2(ntb_s,ntb_t,ntb_r1,ntb_r)) + if (.NOT. ALLOCATED(tmr_racs2)) ALLOCATE(tmr_racs2(ntb_s,ntb_t,ntb_r1,ntb_r)) + if (.NOT. ALLOCATED(tcr_sacr1)) ALLOCATE(tcr_sacr1(ntb_s,ntb_t,ntb_r1,ntb_r)) + if (.NOT. ALLOCATED(tms_sacr1)) ALLOCATE(tms_sacr1(ntb_s,ntb_t,ntb_r1,ntb_r)) + if (.NOT. ALLOCATED(tcr_sacr2)) ALLOCATE(tcr_sacr2(ntb_s,ntb_t,ntb_r1,ntb_r)) + if (.NOT. ALLOCATED(tms_sacr2)) ALLOCATE(tms_sacr2(ntb_s,ntb_t,ntb_r1,ntb_r)) + if (.NOT. ALLOCATED(tnr_racs1)) ALLOCATE(tnr_racs1(ntb_s,ntb_t,ntb_r1,ntb_r)) + if (.NOT. ALLOCATED(tnr_racs2)) ALLOCATE(tnr_racs2(ntb_s,ntb_t,ntb_r1,ntb_r)) + if (.NOT. ALLOCATED(tnr_sacr1)) ALLOCATE(tnr_sacr1(ntb_s,ntb_t,ntb_r1,ntb_r)) + if (.NOT. ALLOCATED(tnr_sacr2)) ALLOCATE(tnr_sacr2(ntb_s,ntb_t,ntb_r1,ntb_r)) + + if (.NOT. ALLOCATED(tpi_qcfz)) ALLOCATE(tpi_qcfz(ntb_c,nbc,45,ntb_IN)) + if (.NOT. ALLOCATED(tni_qcfz)) ALLOCATE(tni_qcfz(ntb_c,nbc,45,ntb_IN)) + + if (.NOT. ALLOCATED(tpi_qrfz)) ALLOCATE(tpi_qrfz(ntb_r,ntb_r1,45,ntb_IN)) + if (.NOT. ALLOCATED(tpg_qrfz)) ALLOCATE(tpg_qrfz(ntb_r,ntb_r1,45,ntb_IN)) + if (.NOT. ALLOCATED(tni_qrfz)) ALLOCATE(tni_qrfz(ntb_r,ntb_r1,45,ntb_IN)) + if (.NOT. ALLOCATED(tnr_qrfz)) ALLOCATE(tnr_qrfz(ntb_r,ntb_r1,45,ntb_IN)) + + if (.NOT. ALLOCATED(tps_iaus)) ALLOCATE(tps_iaus(ntb_i,ntb_i1)) + if (.NOT. ALLOCATED(tni_iaus)) ALLOCATE(tni_iaus(ntb_i,ntb_i1)) + if (.NOT. ALLOCATED(tpi_ide)) ALLOCATE(tpi_ide(ntb_i,ntb_i1)) + + if (.NOT. ALLOCATED(t_Efrw)) ALLOCATE(t_Efrw(nbr,nbc)) + if (.NOT. ALLOCATED(t_Efsw)) ALLOCATE(t_Efsw(nbs,nbc)) + + if (.NOT. ALLOCATED(tnr_rev)) ALLOCATE(tnr_rev(nbr, ntb_r1, ntb_r)) + if (.NOT. ALLOCATED(tpc_wev)) ALLOCATE(tpc_wev(nbc,ntb_c,nbc)) + if (.NOT. ALLOCATED(tnc_wev)) ALLOCATE(tnc_wev(nbc,ntb_c,nbc)) + + if (.NOT. ALLOCATED(tnccn_act)) & + ALLOCATE(tnccn_act(ntb_arc,ntb_arw,ntb_art,ntb_arr,ntb_ark)) + + if (micro_init) then + +!..From Martin et al. (1994), assign gamma shape parameter mu for cloud +!.. drops according to general dispersion characteristics (disp=~0.25 +!.. for Maritime and 0.45 for Continental). +!.. disp=SQRT((mu+2)/(mu+1) - 1) so mu varies from 15 for Maritime +!.. to 2 for really dirty air. This not used in 2-moment cloud water +!.. scheme and nu_c used instead and varies from 2 to 15 (integer-only). +! mu_c = MIN(15., (1000.E6/Nt_c + 2.)) + +!..Schmidt number to one-third used numerous times. + Sc3 = Sc**(1./3.) + +!..Compute min ice diam from mass, min snow/graupel mass from diam. + D0i = (xm0i/am_i)**(1./bm_i) + xm0s = am_s * D0s**bm_s + xm0g = am_g * D0g**bm_g + +!..These constants various exponents and gamma() assoc with cloud, +!.. rain, snow, and graupel. + do n = 1, 15 + cce(1,n) = n + 1. + cce(2,n) = bm_r + n + 1. + cce(3,n) = bm_r + n + 4. + cce(4,n) = n + bv_c + 1. + cce(5,n) = bm_r + n + bv_c + 1. + ccg(1,n) = WGAMMA(cce(1,n)) + ccg(2,n) = WGAMMA(cce(2,n)) + ccg(3,n) = WGAMMA(cce(3,n)) + ccg(4,n) = WGAMMA(cce(4,n)) + ccg(5,n) = WGAMMA(cce(5,n)) + ocg1(n) = 1./ccg(1,n) + ocg2(n) = 1./ccg(2,n) + enddo + + cie(1) = mu_i + 1. + cie(2) = bm_i + mu_i + 1. + cie(3) = bm_i + mu_i + bv_i + 1. + cie(4) = mu_i + bv_i + 1. + cie(5) = mu_i + 2. + cie(6) = bm_i*0.5 + mu_i + bv_i + 1. + cie(7) = bm_i*0.5 + mu_i + 1. + cig(1) = WGAMMA(cie(1)) + cig(2) = WGAMMA(cie(2)) + cig(3) = WGAMMA(cie(3)) + cig(4) = WGAMMA(cie(4)) + cig(5) = WGAMMA(cie(5)) + cig(6) = WGAMMA(cie(6)) + cig(7) = WGAMMA(cie(7)) + oig1 = 1./cig(1) + oig2 = 1./cig(2) + obmi = 1./bm_i + + cre(1) = bm_r + 1. + cre(2) = mu_r + 1. + cre(3) = bm_r + mu_r + 1. + cre(4) = bm_r*2. + mu_r + 1. + cre(5) = mu_r + bv_r + 1. + cre(6) = bm_r + mu_r + bv_r + 1. + cre(7) = bm_r*0.5 + mu_r + bv_r + 1. + cre(8) = bm_r + mu_r + bv_r + 3. + cre(9) = mu_r + bv_r + 3. + cre(10) = mu_r + 2. + cre(11) = 0.5*(bv_r + 5. + 2.*mu_r) + cre(12) = bm_r*0.5 + mu_r + 1. + cre(13) = bm_r*2. + mu_r + bv_r + 1. + do n = 1, 13 + crg(n) = WGAMMA(cre(n)) + enddo + obmr = 1./bm_r + ore1 = 1./cre(1) + org1 = 1./crg(1) + org2 = 1./crg(2) + org3 = 1./crg(3) + + cse(1) = bm_s + 1. + cse(2) = bm_s + 2. + cse(3) = bm_s*2. + cse(4) = bm_s + bv_s + 1. + cse(5) = bm_s*2. + bv_s + 1. + cse(6) = bm_s*2. + 1. + cse(7) = bm_s + mu_s + 1. + cse(8) = bm_s + mu_s + 2. + cse(9) = bm_s + mu_s + 3. + cse(10) = bm_s + mu_s + bv_s + 1. + cse(11) = bm_s*2. + mu_s + bv_s + 1. + cse(12) = bm_s*2. + mu_s + 1. + cse(13) = bv_s + 2. + cse(14) = bm_s + bv_s + cse(15) = mu_s + 1. + cse(16) = 1.0 + (1.0 + bv_s)/2. + cse(17) = cse(16) + mu_s + 1. + cse(18) = bv_s + mu_s + 3. + do n = 1, 18 + csg(n) = WGAMMA(cse(n)) + enddo + oams = 1./am_s + obms = 1./bm_s + ocms = oams**obms + + cge(1) = bm_g + 1. + cge(2) = mu_g + 1. + cge(3) = bm_g + mu_g + 1. + cge(4) = bm_g*2. + mu_g + 1. + cge(5) = bm_g*2. + mu_g + bv_g + 1. + cge(6) = bm_g + mu_g + bv_g + 1. + cge(7) = bm_g + mu_g + bv_g + 2. + cge(8) = bm_g + mu_g + bv_g + 3. + cge(9) = mu_g + bv_g + 3. + cge(10) = mu_g + 2. + cge(11) = 0.5*(bv_g + 5. + 2.*mu_g) + cge(12) = 0.5*(bv_g + 5.) + mu_g + do n = 1, 12 + cgg(n) = WGAMMA(cge(n)) + enddo + oamg = 1./am_g + obmg = 1./bm_g + ocmg = oamg**obmg + oge1 = 1./cge(1) + ogg1 = 1./cgg(1) + ogg2 = 1./cgg(2) + ogg3 = 1./cgg(3) + +!+---+-----------------------------------------------------------------+ +!..Simplify various rate eqns the best we can now. +!+---+-----------------------------------------------------------------+ + +!..Rain collecting cloud water and cloud ice + t1_qr_qc = PI*.25*av_r * crg(9) + t1_qr_qi = PI*.25*av_r * crg(9) + t2_qr_qi = PI*.25*am_r*av_r * crg(8) + +!..Graupel collecting cloud water + t1_qg_qc = PI*.25*av_g * cgg(9) + +!..Snow collecting cloud water + t1_qs_qc = PI*.25*av_s + +!..Snow collecting cloud ice + t1_qs_qi = PI*.25*av_s + +!..Evaporation of rain; ignore depositional growth of rain. + t1_qr_ev = 0.78 * crg(10) + t2_qr_ev = 0.308*Sc3*SQRT(av_r) * crg(11) + +!..Sublimation/depositional growth of snow + t1_qs_sd = 0.86 + t2_qs_sd = 0.28*Sc3*SQRT(av_s) + +!..Melting of snow + t1_qs_me = PI*4.*C_sqrd*olfus * 0.86 + t2_qs_me = PI*4.*C_sqrd*olfus * 0.28*Sc3*SQRT(av_s) + +!..Sublimation/depositional growth of graupel + t1_qg_sd = 0.86 * cgg(10) + t2_qg_sd = 0.28*Sc3*SQRT(av_g) * cgg(11) + +!..Melting of graupel + t1_qg_me = PI*4.*C_cube*olfus * 0.86 * cgg(10) + t2_qg_me = PI*4.*C_cube*olfus * 0.28*Sc3*SQRT(av_g) * cgg(11) + +!..Constants for helping find lookup table indexes. + nic2 = NINT(ALOG10(r_c(1))) + nii2 = NINT(ALOG10(r_i(1))) + nii3 = NINT(ALOG10(Nt_i(1))) + nir2 = NINT(ALOG10(r_r(1))) + nir3 = NINT(ALOG10(N0r_exp(1))) + nis2 = NINT(ALOG10(r_s(1))) + nig2 = NINT(ALOG10(r_g(1))) + nig3 = NINT(ALOG10(N0g_exp(1))) + niIN2 = NINT(ALOG10(Nt_IN(1))) + +!..Create bins of cloud water (from min diameter up to 100 microns). + Dc(1) = D0c*1.0d0 + dtc(1) = D0c*1.0d0 + do n = 2, nbc + Dc(n) = Dc(n-1) + 1.0D-6 + dtc(n) = (Dc(n) - Dc(n-1)) + enddo + +!..Create bins of cloud ice (from min diameter up to 5x min snow size). + xDx(1) = D0i*1.0d0 + xDx(nbi+1) = 5.0d0*D0s + do n = 2, nbi + xDx(n) = DEXP(DFLOAT(n-1)/DFLOAT(nbi) & + *DLOG(xDx(nbi+1)/xDx(1)) +DLOG(xDx(1))) + enddo + do n = 1, nbi + Di(n) = DSQRT(xDx(n)*xDx(n+1)) + dti(n) = xDx(n+1) - xDx(n) + enddo + +!..Create bins of rain (from min diameter up to 5 mm). + xDx(1) = D0r*1.0d0 + xDx(nbr+1) = 0.005d0 + do n = 2, nbr + xDx(n) = DEXP(DFLOAT(n-1)/DFLOAT(nbr) & + *DLOG(xDx(nbr+1)/xDx(1)) +DLOG(xDx(1))) + enddo + do n = 1, nbr + Dr(n) = DSQRT(xDx(n)*xDx(n+1)) + dtr(n) = xDx(n+1) - xDx(n) + enddo + +!..Create bins of snow (from min diameter up to 2 cm). + xDx(1) = D0s*1.0d0 + xDx(nbs+1) = 0.02d0 + do n = 2, nbs + xDx(n) = DEXP(DFLOAT(n-1)/DFLOAT(nbs) & + *DLOG(xDx(nbs+1)/xDx(1)) +DLOG(xDx(1))) + enddo + do n = 1, nbs + Ds(n) = DSQRT(xDx(n)*xDx(n+1)) + dts(n) = xDx(n+1) - xDx(n) + enddo + +!..Create bins of graupel (from min diameter up to 5 cm). + xDx(1) = D0g*1.0d0 + xDx(nbg+1) = 0.05d0 + do n = 2, nbg + xDx(n) = DEXP(DFLOAT(n-1)/DFLOAT(nbg) & + *DLOG(xDx(nbg+1)/xDx(1)) +DLOG(xDx(1))) + enddo + do n = 1, nbg + Dg(n) = DSQRT(xDx(n)*xDx(n+1)) + dtg(n) = xDx(n+1) - xDx(n) + enddo + +!..Create bins of cloud droplet number concentration (1 to 3000 per cc). + xDx(1) = 1.0d0 + xDx(nbc+1) = 3000.0d0 + do n = 2, nbc + xDx(n) = DEXP(DFLOAT(n-1)/DFLOAT(nbc) & + *DLOG(xDx(nbc+1)/xDx(1)) +DLOG(xDx(1))) + enddo + do n = 1, nbc + t_Nc(n) = DSQRT(xDx(n)*xDx(n+1)) * 1.D6 + enddo + nic1 = DLOG(t_Nc(nbc)/t_Nc(1)) + +!+---+-----------------------------------------------------------------+ +!..Create lookup tables for most costly calculations. +!+---+-----------------------------------------------------------------+ + + do m = 1, ntb_r + do k = 1, ntb_r1 + do j = 1, ntb_g + do i = 1, ntb_g1 + tcg_racg(i,j,k,m) = 0.0d0 + tmr_racg(i,j,k,m) = 0.0d0 + tcr_gacr(i,j,k,m) = 0.0d0 + tmg_gacr(i,j,k,m) = 0.0d0 + tnr_racg(i,j,k,m) = 0.0d0 + tnr_gacr(i,j,k,m) = 0.0d0 + enddo + enddo + enddo + enddo + + do m = 1, ntb_r + do k = 1, ntb_r1 + do j = 1, ntb_t + do i = 1, ntb_s + tcs_racs1(i,j,k,m) = 0.0d0 + tmr_racs1(i,j,k,m) = 0.0d0 + tcs_racs2(i,j,k,m) = 0.0d0 + tmr_racs2(i,j,k,m) = 0.0d0 + tcr_sacr1(i,j,k,m) = 0.0d0 + tms_sacr1(i,j,k,m) = 0.0d0 + tcr_sacr2(i,j,k,m) = 0.0d0 + tms_sacr2(i,j,k,m) = 0.0d0 + tnr_racs1(i,j,k,m) = 0.0d0 + tnr_racs2(i,j,k,m) = 0.0d0 + tnr_sacr1(i,j,k,m) = 0.0d0 + tnr_sacr2(i,j,k,m) = 0.0d0 + enddo + enddo + enddo + enddo + + do m = 1, ntb_IN + do k = 1, 45 + do j = 1, ntb_r1 + do i = 1, ntb_r + tpi_qrfz(i,j,k,m) = 0.0d0 + tni_qrfz(i,j,k,m) = 0.0d0 + tpg_qrfz(i,j,k,m) = 0.0d0 + tnr_qrfz(i,j,k,m) = 0.0d0 + enddo + enddo + do j = 1, nbc + do i = 1, ntb_c + tpi_qcfz(i,j,k,m) = 0.0d0 + tni_qcfz(i,j,k,m) = 0.0d0 + enddo + enddo + enddo + enddo + + do j = 1, ntb_i1 + do i = 1, ntb_i + tps_iaus(i,j) = 0.0d0 + tni_iaus(i,j) = 0.0d0 + tpi_ide(i,j) = 0.0d0 + enddo + enddo + + do j = 1, nbc + do i = 1, nbr + t_Efrw(i,j) = 0.0 + enddo + do i = 1, nbs + t_Efsw(i,j) = 0.0 + enddo + enddo + + do k = 1, ntb_r + do j = 1, ntb_r1 + do i = 1, nbr + tnr_rev(i,j,k) = 0.0d0 + enddo + enddo + enddo + + do k = 1, nbc + do j = 1, ntb_c + do i = 1, nbc + tpc_wev(i,j,k) = 0.0d0 + tnc_wev(i,j,k) = 0.0d0 + enddo + enddo + enddo + + do m = 1, ntb_ark + do l = 1, ntb_arr + do k = 1, ntb_art + do j = 1, ntb_arw + do i = 1, ntb_arc + tnccn_act(i,j,k,l,m) = 1.0 + enddo + enddo + enddo + enddo + enddo + +!..Check that the look-up tables are available. + if(.not. l_mp_tables) return + +!..Collision efficiency between rain/snow and cloud water. +! call physics_message('--- creating qc collision eff tables') + call table_Efrw + call table_Efsw + +!..Drop evaporation. +! call physics_message('--- creating rain evap table') + call table_dropEvap + +!..Rain collecting graupel & graupel collecting rain. + open(unit=11,file='MP_THOMPSON_QRacrQG_DATA.DBL',form='UNFORMATTED',status='OLD',action='READ', & + iostat = istat) + if(istat /= open_OK) & + call physics_error_fatal('subroutine thompson_init: ' // & + 'failure opening MP_THOMPSON_QRacrQG.DBL') + read(11) tcg_racg + read(11) tmr_racg + read(11) tcr_gacr + read(11) tmg_gacr + read(11) tnr_racg + read(11) tnr_gacr + close(unit=11) +! write(0,*) '--- end read MP_THOMPSON_QRacrQG.DBL' +! write(0,*) 'max tcg_racg =',maxval(tcg_racg) +! write(0,*) 'min tcg_racg =',minval(tcg_racg) +! write(0,*) 'max tmr_racg =',maxval(tmr_racg) +! write(0,*) 'min tmr_racg =',minval(tmr_racg) +! write(0,*) 'max tcr_gacr =',maxval(tcr_gacr) +! write(0,*) 'min tcr_gacr =',minval(tcr_gacr) +! write(0,*) 'max tmg_gacr =',maxval(tmg_gacr) +! write(0,*) 'min tmg_gacr =',minval(tmg_gacr) +! write(0,*) 'max tnr_racg =',maxval(tnr_racg) +! write(0,*) 'min tnr_racg =',minval(tnr_racg) +! write(0,*) 'max tnr_gacr =',maxval(tnr_gacr) +! write(0,*) 'min tnr_gacr =',minval(tnr_gacr) + +!..Rain collecting snow & snow collecting rain. + open(unit=11,file='MP_THOMPSON_QRacrQS_DATA.DBL',form='UNFORMATTED',status='OLD',action='READ', & + iostat=istat) + if(istat /= open_OK) & + call physics_error_fatal('subroutine thompson_init: ' // & + 'failure opening MP_THOMPSON_QRacrQS.DBL') + read(11) tcs_racs1 + read(11) tmr_racs1 + read(11) tcs_racs2 + read(11) tmr_racs2 + read(11) tcr_sacr1 + read(11) tms_sacr1 + read(11) tcr_sacr2 + read(11) tms_sacr2 + read(11) tnr_racs1 + read(11) tnr_racs2 + read(11) tnr_sacr1 + read(11) tnr_sacr2 + close(unit=11) +! write(0,*) '--- end read MP_THOMPSON_QRacrQS.DBL' +! write(0,*) 'max tcs_racs1 =',maxval(tcs_racs1) +! write(0,*) 'min tcs_racs1 =',minval(tcs_racs1) +! write(0,*) 'max tmr_racs1 =',maxval(tmr_racs1) +! write(0,*) 'min tmr_racs1 =',minval(tmr_racs1) +! write(0,*) 'max tcs_racs2 =',maxval(tcs_racs2) +! write(0,*) 'min tcs_racs2 =',minval(tcs_racs2) +! write(0,*) 'max tmr_racs2 =',maxval(tmr_racs2) +! write(0,*) 'min tmr_racs2 =',minval(tmr_racs2) +! write(0,*) 'max tcr_sacr1 =',maxval(tcr_sacr1) +! write(0,*) 'min tcr_sacr1 =',minval(tcr_sacr1) +! write(0,*) 'max tms_sacr1 =',maxval(tms_sacr1) +! write(0,*) 'min tms_sacr1 =',minval(tms_sacr1) +! write(0,*) 'max tcr_sacr2 =',maxval(tcr_sacr2) +! write(0,*) 'min tcr_sacr2 =',minval(tcr_sacr2) +! write(0,*) 'max tms_sacr2 =',maxval(tms_sacr2) +! write(0,*) 'min tms_sacr2 =',minval(tms_sacr2) +! write(0,*) 'max tnr_racs1 =',maxval(tnr_racs1) +! write(0,*) 'min tnr_racs1 =',minval(tnr_racs1) +! write(0,*) 'max tnr_racs2 =',maxval(tnr_racs2) +! write(0,*) 'min tnr_racs2 =',minval(tnr_racs2) +! write(0,*) 'max tnr_sacr1 =',maxval(tnr_sacr1) +! write(0,*) 'min tnr_sacr1 =',minval(tnr_sacr1) +! write(0,*) 'max tnr_sacr2 =',maxval(tnr_sacr2) +! write(0,*) 'min tnr_sacr2 =',minval(tnr_sacr2) + +!..Cloud water and rain freezing (Bigg, 1953). + open(unit=11,file='MP_THOMPSON_freezeH2O_DATA.DBL',form='UNFORMATTED',status='OLD',action='READ', & + iostat=istat) + if(istat /= open_OK) & + call physics_error_fatal('subroutine thompson_init: ' // & + 'failure opening MP_THOMPSON_freezeH2O.DBL') + read(11) tpi_qrfz + read(11) tni_qrfz + read(11) tpg_qrfz + read(11) tnr_qrfz + read(11) tpi_qcfz + read(11) tni_qcfz + close(unit=11) +! write(0,*) '--- end read MP_THOMPSON_freezeH2O.DBL:' +! write(0,*) 'max tpi_qrfz =',maxval(tpi_qrfz) +! write(0,*) 'min tpi_qrfz =',minval(tpi_qrfz) +! write(0,*) 'max tni_qrfz =',maxval(tni_qrfz) +! write(0,*) 'min tni_qrfz =',minval(tni_qrfz) +! write(0,*) 'max tpg_qrfz =',maxval(tpg_qrfz) +! write(0,*) 'min tpg_qrfz =',minval(tpg_qrfz) +! write(0,*) 'max tnr_qrfz =',maxval(tnr_qrfz) +! write(0,*) 'min tnr_qrfz =',minval(tnr_qrfz) +! write(0,*) 'max tpi_qcfz =',maxval(tpi_qcfz) +! write(0,*) 'min tpi_qcfz =',minval(tpi_qcfz) +! write(0,*) 'max tni_qcfz =',maxval(tni_qcfz) +! write(0,*) 'min tni_qcfz =',minval(tni_qcfz) + +!..Conversion of some ice mass into snow category. + open(unit=11,file='MP_THOMPSON_QIautQS_DATA.DBL',form='UNFORMATTED',status='OLD',action='READ', & + iostat=istat) + if(istat /= open_OK) & + call physics_error_fatal('subroutine thompson_init: ' // & + 'failure opening MP_THOMPSON_QIautQS.DBL') + read(11) tpi_ide + read(11) tps_iaus + read(11) tni_iaus + close(unit=11) +! write(0,*) '--- end read MP_THOMPSON_QIautQS.DBL ' +! write(0,*) 'max tps_iaus =',maxval(tps_iaus) +! write(0,*) 'min tps_iaus =',minval(tps_iaus) +! write(0,*) 'max tni_iaus =',maxval(tni_iaus) +! write(0,*) 'min tni_iaus =',minval(tni_iaus) + +!..Initialize various constants for computing radar reflectivity. + xam_r = am_r + xbm_r = bm_r + xmu_r = mu_r + xam_s = am_s + xbm_s = bm_s + xmu_s = mu_s + xam_g = am_g + xbm_g = bm_g + xmu_g = mu_g + call radar_init + + endif + + END SUBROUTINE thompson_init +! +!+---+-----------------------------------------------------------------+ +!+---+-----------------------------------------------------------------+ +!ctrlL +!+---+-----------------------------------------------------------------+ +!..This is a wrapper routine designed to transfer values from 3D to 1D. +!+---+-----------------------------------------------------------------+ + SUBROUTINE mp_gt_driver(qv, qc, qr, qi, qs, qg, ni, nr, nc, & + nwfa, nifa, nwfa2d, & + th, pii, p, w, dz, dt_in, itimestep, & + RAINNC, RAINNCV, & + SNOWNC, SNOWNCV, & + GRAUPELNC, GRAUPELNCV, SR, & + refl_10cm, diagflag, do_radar_ref, & + re_cloud, re_ice, re_snow, & + has_reqc, has_reqi, has_reqs, & +#if defined(mpas) + ntc,muc,rainprod,evapprod, & +#endif + ids,ide, jds,jde, kds,kde, & ! domain dims + ims,ime, jms,jme, kms,kme, & ! memory dims + its,ite, jts,jte, kts,kte) ! tile dims + + implicit none + +!..Subroutine arguments + INTEGER, INTENT(IN):: ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + its,ite, jts,jte, kts,kte + REAL, DIMENSION(ims:ime, kms:kme, jms:jme), INTENT(INOUT):: & + qv, qc, qr, qi, qs, qg, ni, nr, th + REAL, DIMENSION(ims:ime, kms:kme, jms:jme), OPTIONAL, INTENT(INOUT):: & + nc, nwfa, nifa + REAL, DIMENSION(ims:ime, jms:jme), OPTIONAL, INTENT(IN):: nwfa2d + REAL, DIMENSION(ims:ime, kms:kme, jms:jme), INTENT(INOUT):: & + re_cloud, re_ice, re_snow + INTEGER, INTENT(IN):: has_reqc, has_reqi, has_reqs + REAL, DIMENSION(ims:ime, kms:kme, jms:jme), INTENT(IN):: & + pii, p, w, dz + REAL, DIMENSION(ims:ime, jms:jme), INTENT(INOUT):: & + RAINNC, RAINNCV, SR + REAL, DIMENSION(ims:ime, jms:jme), OPTIONAL, INTENT(INOUT):: & + SNOWNC, SNOWNCV, GRAUPELNC, GRAUPELNCV +#if defined(mpas) + REAL, DIMENSION(ims:ime, jms:jme), INTENT(IN):: & + ntc,muc + REAL, DIMENSION(ims:ime, kms:kme, jms:jme), INTENT(INOUT):: & + rainprod,evapprod + REAL, DIMENSION(ims:ime, kms:kme, jms:jme), INTENT(INOUT), OPTIONAL:: & + refl_10cm +#else + REAL, DIMENSION(ims:ime, kms:kme, jms:jme), INTENT(INOUT):: & + refl_10cm +#endif + REAL, INTENT(IN):: dt_in + INTEGER, INTENT(IN):: itimestep + +!..Local variables + REAL, DIMENSION(kts:kte):: & + qv1d, qc1d, qi1d, qr1d, qs1d, qg1d, ni1d, & + nr1d, nc1d, nwfa1d, nifa1d, & + t1d, p1d, w1d, dz1d, rho, dBZ + REAL, DIMENSION(kts:kte):: re_qc1d, re_qi1d, re_qs1d +#if defined(mpas) + REAL, DIMENSION(kts:kte):: & + rainprod1d, evapprod1d +#endif + REAL, DIMENSION(its:ite, jts:jte):: pcp_ra, pcp_sn, pcp_gr, pcp_ic + REAL:: dt, pptrain, pptsnow, pptgraul, pptice + REAL:: qc_max, qr_max, qs_max, qi_max, qg_max, ni_max, nr_max + REAL:: nwfa1 + INTEGER:: i, j, k + INTEGER:: imax_qc,imax_qr,imax_qi,imax_qs,imax_qg,imax_ni,imax_nr + INTEGER:: jmax_qc,jmax_qr,jmax_qi,jmax_qs,jmax_qg,jmax_ni,jmax_nr + INTEGER:: kmax_qc,kmax_qr,kmax_qi,kmax_qs,kmax_qg,kmax_ni,kmax_nr + INTEGER:: i_start, j_start, i_end, j_end + LOGICAL, OPTIONAL, INTENT(IN) :: diagflag + INTEGER, OPTIONAL, INTENT(IN) :: do_radar_ref + CHARACTER*256:: mp_debug + +!+---+ + + i_start = its + j_start = jts + i_end = MIN(ite, ide-1) + j_end = MIN(jte, jde-1) + +!..For idealized testing by developer. +! if ( (ide-ids+1).gt.4 .and. (jde-jds+1).lt.4 .and. & +! ids.eq.its.and.ide.eq.ite.and.jds.eq.jts.and.jde.eq.jte) then +! i_start = its + 2 +! i_end = ite +! j_start = jts +! j_end = jte +! endif + + dt = dt_in + + qc_max = 0. + qr_max = 0. + qs_max = 0. + qi_max = 0. + qg_max = 0 + ni_max = 0. + nr_max = 0. + imax_qc = 0 + imax_qr = 0 + imax_qi = 0 + imax_qs = 0 + imax_qg = 0 + imax_ni = 0 + imax_nr = 0 + jmax_qc = 0 + jmax_qr = 0 + jmax_qi = 0 + jmax_qs = 0 + jmax_qg = 0 + jmax_ni = 0 + jmax_nr = 0 + kmax_qc = 0 + kmax_qr = 0 + kmax_qi = 0 + kmax_qs = 0 + kmax_qg = 0 + kmax_ni = 0 + kmax_nr = 0 + do i = 1, 256 + mp_debug(i:i) = char(0) + enddo + +! if (.NOT. is_aerosol_aware .AND. PRESENT(nc) .AND. PRESENT(nwfa) & +! .AND. PRESENT(nifa) .AND. PRESENT(nwfa2d)) then +! write(mp_debug,*) 'WARNING, nc-nwfa-nifa-nwfa2d present but is_aerosol_aware is FALSE' +! CALL wrf_debug(0, mp_debug) +! endif + + j_loop: do j = j_start, j_end + i_loop: do i = i_start, i_end + + pptrain = 0. + pptsnow = 0. + pptgraul = 0. + pptice = 0. + RAINNCV(i,j) = 0. + IF ( PRESENT (snowncv) ) THEN + SNOWNCV(i,j) = 0. + ENDIF + IF ( PRESENT (graupelncv) ) THEN + GRAUPELNCV(i,j) = 0. + ENDIF + SR(i,j) = 0. + +#if defined(mpas) + Nt_c = ntc(i,j) + mu_c = muc(i,j) +#endif + do k = kts, kte + t1d(k) = th(i,k,j)*pii(i,k,j) + p1d(k) = p(i,k,j) + w1d(k) = w(i,k,j) + dz1d(k) = dz(i,k,j) + qv1d(k) = qv(i,k,j) + qc1d(k) = qc(i,k,j) + qi1d(k) = qi(i,k,j) + qr1d(k) = qr(i,k,j) + qs1d(k) = qs(i,k,j) + qg1d(k) = qg(i,k,j) + ni1d(k) = ni(i,k,j) + nr1d(k) = nr(i,k,j) + enddo + if (is_aerosol_aware) then + do k = kts, kte + nc1d(k) = nc(i,k,j) + nwfa1d(k) = nwfa(i,k,j) + nifa1d(k) = nifa(i,k,j) + enddo + nwfa1 = nwfa2d(i,j) + else + do k = kts, kte + rho(k) = 0.622*p1d(k)/(R*t1d(k)*(qv1d(k)+0.622)) + nc1d(k) = Nt_c/rho(k) + nwfa1d(k) = 11.1E6/rho(k) + nifa1d(k) = naIN1*0.01/rho(k) + enddo + nwfa1 = 11.1E6 + endif + + call mp_thompson(qv1d, qc1d, qi1d, qr1d, qs1d, qg1d, ni1d, & + nr1d, nc1d, nwfa1d, nifa1d, t1d, p1d, w1d, dz1d, & + pptrain, pptsnow, pptgraul, pptice, & +#if defined(mpas) + rainprod1d, evapprod1d, & +#endif + kts, kte, dt, i, j) + + pcp_ra(i,j) = pptrain + pcp_sn(i,j) = pptsnow + pcp_gr(i,j) = pptgraul + pcp_ic(i,j) = pptice + RAINNCV(i,j) = pptrain + pptsnow + pptgraul + pptice + RAINNC(i,j) = RAINNC(i,j) + pptrain + pptsnow + pptgraul + pptice + IF ( PRESENT(snowncv) .AND. PRESENT(snownc) ) THEN + SNOWNCV(i,j) = pptsnow + pptice + SNOWNC(i,j) = SNOWNC(i,j) + pptsnow + pptice + ENDIF + IF ( PRESENT(graupelncv) .AND. PRESENT(graupelnc) ) THEN + GRAUPELNCV(i,j) = pptgraul + GRAUPELNC(i,j) = GRAUPELNC(i,j) + pptgraul + ENDIF + SR(i,j) = (pptsnow + pptgraul + pptice)/(RAINNCV(i,j)+1.e-12) + + + +!..Reset lowest model level to initial state aerosols (fake sfc source). +!.. Changed 13 May 2013 to fake emissions in which nwfa2d is aerosol +!.. number tendency (number per kg per second). + if (is_aerosol_aware) then +!-GT nwfa1d(kts) = nwfa1 + nwfa1d(kts) = nwfa1d(kts) + nwfa2d(i,j)*dt_in + + do k = kts, kte + nc(i,k,j) = nc1d(k) + nwfa(i,k,j) = nwfa1d(k) + nifa(i,k,j) = nifa1d(k) + enddo + endif + + do k = kts, kte + qv(i,k,j) = qv1d(k) + qc(i,k,j) = qc1d(k) + qi(i,k,j) = qi1d(k) + qr(i,k,j) = qr1d(k) + qs(i,k,j) = qs1d(k) + qg(i,k,j) = qg1d(k) + ni(i,k,j) = ni1d(k) + nr(i,k,j) = nr1d(k) + th(i,k,j) = t1d(k)/pii(i,k,j) +#if defined(mpas) + rainprod(i,k,j) = rainprod1d(k) + evapprod(i,k,j) = evapprod1d(k) +#endif + if (qc1d(k) .gt. qc_max) then + imax_qc = i + jmax_qc = j + kmax_qc = k + qc_max = qc1d(k) + elseif (qc1d(k) .lt. 0.0) then + write(mp_debug,*) 'WARNING, negative qc ', qc1d(k), & + ' at i,j,k=', i,j,k +! CALL wrf_debug(150, mp_debug) + endif + if (qr1d(k) .gt. qr_max) then + imax_qr = i + jmax_qr = j + kmax_qr = k + qr_max = qr1d(k) + elseif (qr1d(k) .lt. 0.0) then + write(mp_debug,*) 'WARNING, negative qr ', qr1d(k), & + ' at i,j,k=', i,j,k +! CALL wrf_debug(150, mp_debug) + endif + if (nr1d(k) .gt. nr_max) then + imax_nr = i + jmax_nr = j + kmax_nr = k + nr_max = nr1d(k) + elseif (nr1d(k) .lt. 0.0) then + write(mp_debug,*) 'WARNING, negative nr ', nr1d(k), & + ' at i,j,k=', i,j,k +! CALL wrf_debug(150, mp_debug) + endif + if (qs1d(k) .gt. qs_max) then + imax_qs = i + jmax_qs = j + kmax_qs = k + qs_max = qs1d(k) + elseif (qs1d(k) .lt. 0.0) then + write(mp_debug,*) 'WARNING, negative qs ', qs1d(k), & + ' at i,j,k=', i,j,k +! CALL wrf_debug(150, mp_debug) + endif + if (qi1d(k) .gt. qi_max) then + imax_qi = i + jmax_qi = j + kmax_qi = k + qi_max = qi1d(k) + elseif (qi1d(k) .lt. 0.0) then + write(mp_debug,*) 'WARNING, negative qi ', qi1d(k), & + ' at i,j,k=', i,j,k +! CALL wrf_debug(150, mp_debug) + endif + if (qg1d(k) .gt. qg_max) then + imax_qg = i + jmax_qg = j + kmax_qg = k + qg_max = qg1d(k) + elseif (qg1d(k) .lt. 0.0) then + write(mp_debug,*) 'WARNING, negative qg ', qg1d(k), & + ' at i,j,k=', i,j,k +! CALL wrf_debug(150, mp_debug) + endif + if (ni1d(k) .gt. ni_max) then + imax_ni = i + jmax_ni = j + kmax_ni = k + ni_max = ni1d(k) + elseif (ni1d(k) .lt. 0.0) then + write(mp_debug,*) 'WARNING, negative ni ', ni1d(k), & + ' at i,j,k=', i,j,k +! CALL wrf_debug(150, mp_debug) + endif + if (qv1d(k) .lt. 0.0) then + write(mp_debug,*) 'WARNING, negative qv ', qv1d(k), & + ' at i,j,k=', i,j,k +! CALL wrf_debug(150, mp_debug) + if (k.lt.kte-2 .and. k.gt.kts+1) then + write(mp_debug,*) ' below and above are: ', qv(i,k-1,j), qv(i,k+1,j) +! CALL wrf_debug(150, mp_debug) + qv(i,k,j) = MAX(1.E-7, 0.5*(qv(i,k-1,j) + qv(i,k+1,j))) + else + qv(i,k,j) = 1.E-7 + endif + endif + enddo + +! IF ( PRESENT (diagflag) ) THEN +! if (diagflag .and. do_radar_ref == 1) then +! call calc_refl10cm (qv1d, qc1d, qr1d, nr1d, qs1d, qg1d, & +! t1d, p1d, dBZ, kts, kte, i, j) +! do k = kts, kte +! refl_10cm(i,k,j) = MAX(-35., dBZ(k)) +! enddo +! endif +! ENDIF + + IF (has_reqc.ne.0 .and. has_reqi.ne.0 .and. has_reqs.ne.0) THEN + do k = kts, kte + re_qc1d(k) = 2.49E-6 + re_qi1d(k) = 4.99E-6 + re_qs1d(k) = 9.99E-6 + enddo + call calc_effectRad (t1d, p1d, qv1d, qc1d, nc1d, qi1d, ni1d, qs1d, & + re_qc1d, re_qi1d, re_qs1d, kts, kte) + do k = kts, kte + re_cloud(i,k,j) = MAX(2.49E-6, MIN(re_qc1d(k), 50.E-6)) + re_ice(i,k,j) = MAX(4.99E-6, MIN(re_qi1d(k), 125.E-6)) + re_snow(i,k,j) = MAX(9.99E-6, MIN(re_qs1d(k), 999.E-6)) + enddo + ENDIF + + enddo i_loop + enddo j_loop + +! DEBUG - GT + write(mp_debug,'(a,7(a,e13.6,1x,a,i3,a,i3,a,i3,a,1x))') 'MP-GT:', & + 'qc: ', qc_max, '(', imax_qc, ',', jmax_qc, ',', kmax_qc, ')', & + 'qr: ', qr_max, '(', imax_qr, ',', jmax_qr, ',', kmax_qr, ')', & + 'qi: ', qi_max, '(', imax_qi, ',', jmax_qi, ',', kmax_qi, ')', & + 'qs: ', qs_max, '(', imax_qs, ',', jmax_qs, ',', kmax_qs, ')', & + 'qg: ', qg_max, '(', imax_qg, ',', jmax_qg, ',', kmax_qg, ')', & + 'ni: ', ni_max, '(', imax_ni, ',', jmax_ni, ',', kmax_ni, ')', & + 'nr: ', nr_max, '(', imax_nr, ',', jmax_nr, ',', kmax_nr, ')' +! CALL wrf_debug(150, mp_debug) +! END DEBUG - GT + + do i = 1, 256 + mp_debug(i:i) = char(0) + enddo + + END SUBROUTINE mp_gt_driver + +!+---+-----------------------------------------------------------------+ +!ctrlL +!+---+-----------------------------------------------------------------+ +!+---+-----------------------------------------------------------------+ +!.. This subroutine computes the moisture tendencies of water vapor, +!.. cloud droplets, rain, cloud ice (pristine), snow, and graupel. +!.. Previously this code was based on Reisner et al (1998), but few of +!.. those pieces remain. A complete description is now found in +!.. Thompson et al. (2004, 2008). +!+---+-----------------------------------------------------------------+ +! + subroutine mp_thompson (qv1d, qc1d, qi1d, qr1d, qs1d, qg1d, ni1d, & + nr1d, nc1d, nwfa1d, nifa1d, t1d, p1d, w1d, dzq, & + pptrain, pptsnow, pptgraul, pptice, & +#if defined(mpas) + rainprod, evapprod, & +#endif + kts, kte, dt, ii, jj) + + implicit none + +!..Sub arguments + INTEGER, INTENT(IN):: kts, kte, ii, jj + REAL, DIMENSION(kts:kte), INTENT(INOUT):: & + qv1d, qc1d, qi1d, qr1d, qs1d, qg1d, ni1d, & + nr1d, nc1d, nwfa1d, nifa1d, t1d + REAL, DIMENSION(kts:kte), INTENT(IN):: p1d, w1d, dzq + REAL, INTENT(INOUT):: pptrain, pptsnow, pptgraul, pptice + REAL, INTENT(IN):: dt +#if defined(mpas) + REAL, DIMENSION(kts:kte), INTENT(INOUT):: & + rainprod, evapprod +#endif + +!..Local variables + REAL, DIMENSION(kts:kte):: tten, qvten, qcten, qiten, & + qrten, qsten, qgten, niten, nrten, ncten, nwfaten, nifaten + + DOUBLE PRECISION, DIMENSION(kts:kte):: prw_vcd + + DOUBLE PRECISION, DIMENSION(kts:kte):: pnc_wcd, pnc_wau, pnc_rcw, & + pnc_scw, pnc_gcw + + DOUBLE PRECISION, DIMENSION(kts:kte):: pna_rca, pna_sca, pna_gca, & + pnd_rcd, pnd_scd, pnd_gcd + + DOUBLE PRECISION, DIMENSION(kts:kte):: prr_wau, prr_rcw, prr_rcs, & + prr_rcg, prr_sml, prr_gml, & + prr_rci, prv_rev, & + pnr_wau, pnr_rcs, pnr_rcg, & + pnr_rci, pnr_sml, pnr_gml, & + pnr_rev, pnr_rcr, pnr_rfz + + DOUBLE PRECISION, DIMENSION(kts:kte):: pri_inu, pni_inu, pri_ihm, & + pni_ihm, pri_wfz, pni_wfz, & + pri_rfz, pni_rfz, pri_ide, & + pni_ide, pri_rci, pni_rci, & + pni_sci, pni_iau, pri_iha, pni_iha + + DOUBLE PRECISION, DIMENSION(kts:kte):: prs_iau, prs_sci, prs_rcs, & + prs_scw, prs_sde, prs_ihm, & + prs_ide + + DOUBLE PRECISION, DIMENSION(kts:kte):: prg_scw, prg_rfz, prg_gde, & + prg_gcw, prg_rci, prg_rcs, & + prg_rcg, prg_ihm + + DOUBLE PRECISION, PARAMETER:: zeroD0 = 0.0d0 + + REAL, DIMENSION(kts:kte):: temp, pres, qv + REAL, DIMENSION(kts:kte):: rc, ri, rr, rs, rg, ni, nr, nc, nwfa, nifa + REAL, DIMENSION(kts:kte):: rho, rhof, rhof2 + REAL, DIMENSION(kts:kte):: qvs, qvsi, delQvs + REAL, DIMENSION(kts:kte):: satw, sati, ssatw, ssati + REAL, DIMENSION(kts:kte):: diffu, visco, vsc2, & + tcond, lvap, ocp, lvt2 + + DOUBLE PRECISION, DIMENSION(kts:kte):: ilamr, ilamg, N0_r, N0_g + REAL, DIMENSION(kts:kte):: mvd_r, mvd_c + REAL, DIMENSION(kts:kte):: smob, smo2, smo1, smo0, & + smoc, smod, smoe, smof + + REAL, DIMENSION(kts:kte):: sed_r, sed_s, sed_g, sed_i, sed_n,sed_c + + REAL:: rgvm, delta_tp, orho, lfus2 + REAL, DIMENSION(5):: onstep + DOUBLE PRECISION:: N0_exp, N0_min, lam_exp, lamc, lamr, lamg + DOUBLE PRECISION:: lami, ilami, ilamc + REAL:: xDc, Dc_b, Dc_g, xDi, xDr, xDs, xDg, Ds_m, Dg_m + DOUBLE PRECISION:: Dr_star, Dc_star + REAL:: zeta1, zeta, taud, tau + REAL:: stoke_r, stoke_s, stoke_g, stoke_i + REAL:: vti, vtr, vts, vtg, vtc + REAL, DIMENSION(kts:kte+1):: vtik, vtnik, vtrk, vtnrk, vtsk, vtgk, & + vtck, vtnck + REAL, DIMENSION(kts:kte):: vts_boost + REAL:: Mrat, ils1, ils2, t1_vts, t2_vts, t3_vts, t4_vts, C_snow + REAL:: a_, b_, loga_, A1, A2, tf + REAL:: tempc, tc0, r_mvd1, r_mvd2, xkrat + REAL:: xnc, xri, xni, xmi, oxmi, xrc, xrr, xnr + REAL:: xsat, rate_max, sump, ratio + REAL:: clap, fcd, dfcd + REAL:: otemp, rvs, rvs_p, rvs_pp, gamsc, alphsc, t1_evap, t1_subl + REAL:: r_frac, g_frac + REAL:: Ef_rw, Ef_sw, Ef_gw, Ef_rr + REAL:: Ef_ra, Ef_sa, Ef_ga + REAL:: dtsave, odts, odt, odzq, hgt_agl + REAL:: xslw1, ygra1, zans1, eva_factor + INTEGER:: i, k, k2, n, nn, nstep, k_0, kbot, IT, iexfrq + INTEGER, DIMENSION(5):: ksed1 + INTEGER:: nir, nis, nig, nii, nic, niin + INTEGER:: idx_tc, idx_t, idx_s, idx_g1, idx_g, idx_r1, idx_r, & + idx_i1, idx_i, idx_c, idx, idx_d, idx_n, idx_in + + LOGICAL:: melti, no_micro + LOGICAL, DIMENSION(kts:kte):: L_qc, L_qi, L_qr, L_qs, L_qg + LOGICAL:: debug_flag + CHARACTER*256:: mp_debug + INTEGER:: nu_c + +!+---+ + + + debug_flag = .false. +! if (ii.eq.901 .and. jj.eq.379) debug_flag = .true. + if(debug_flag) then + write(mp_debug, *) 'DEBUG INFO, mp_thompson at (i,j) ', ii, ', ', jj +! CALL wrf_debug(550, mp_debug) + endif + + no_micro = .true. + dtsave = dt + odt = 1./dt + odts = 1./dtsave + iexfrq = 1 + +!+---+-----------------------------------------------------------------+ +!.. Source/sink terms. First 2 chars: "pr" represents source/sink of +!.. mass while "pn" represents source/sink of number. Next char is one +!.. of "v" for water vapor, "r" for rain, "i" for cloud ice, "w" for +!.. cloud water, "s" for snow, and "g" for graupel. Next chars +!.. represent processes: "de" for sublimation/deposition, "ev" for +!.. evaporation, "fz" for freezing, "ml" for melting, "au" for +!.. autoconversion, "nu" for ice nucleation, "hm" for Hallet/Mossop +!.. secondary ice production, and "c" for collection followed by the +!.. character for the species being collected. ALL of these terms are +!.. positive (except for deposition/sublimation terms which can switch +!.. signs based on super/subsaturation) and are treated as negatives +!.. where necessary in the tendency equations. +!+---+-----------------------------------------------------------------+ + + do k = kts, kte + tten(k) = 0. + qvten(k) = 0. + qcten(k) = 0. + qiten(k) = 0. + qrten(k) = 0. + qsten(k) = 0. + qgten(k) = 0. + niten(k) = 0. + nrten(k) = 0. + ncten(k) = 0. + nwfaten(k) = 0. + nifaten(k) = 0. + + prw_vcd(k) = 0. + + pnc_wcd(k) = 0. + pnc_wau(k) = 0. + pnc_rcw(k) = 0. + pnc_scw(k) = 0. + pnc_gcw(k) = 0. + + prv_rev(k) = 0. + prr_wau(k) = 0. + prr_rcw(k) = 0. + prr_rcs(k) = 0. + prr_rcg(k) = 0. + prr_sml(k) = 0. + prr_gml(k) = 0. + prr_rci(k) = 0. + pnr_wau(k) = 0. + pnr_rcs(k) = 0. + pnr_rcg(k) = 0. + pnr_rci(k) = 0. + pnr_sml(k) = 0. + pnr_gml(k) = 0. + pnr_rev(k) = 0. + pnr_rcr(k) = 0. + pnr_rfz(k) = 0. + + pri_inu(k) = 0. + pni_inu(k) = 0. + pri_ihm(k) = 0. + pni_ihm(k) = 0. + pri_wfz(k) = 0. + pni_wfz(k) = 0. + pri_rfz(k) = 0. + pni_rfz(k) = 0. + pri_ide(k) = 0. + pni_ide(k) = 0. + pri_rci(k) = 0. + pni_rci(k) = 0. + pni_sci(k) = 0. + pni_iau(k) = 0. + pri_iha(k) = 0. + pni_iha(k) = 0. + + prs_iau(k) = 0. + prs_sci(k) = 0. + prs_rcs(k) = 0. + prs_scw(k) = 0. + prs_sde(k) = 0. + prs_ihm(k) = 0. + prs_ide(k) = 0. + + prg_scw(k) = 0. + prg_rfz(k) = 0. + prg_gde(k) = 0. + prg_gcw(k) = 0. + prg_rci(k) = 0. + prg_rcs(k) = 0. + prg_rcg(k) = 0. + prg_ihm(k) = 0. + + pna_rca(k) = 0. + pna_sca(k) = 0. + pna_gca(k) = 0. + + pnd_rcd(k) = 0. + pnd_scd(k) = 0. + pnd_gcd(k) = 0. + enddo +#if defined(mpas) + do k = kts, kte + rainprod(k) = 0. + evapprod(k) = 0. + enddo +#endif + +!..Bug fix (2016Jun15), prevent use of uninitialized value(s) of snow moments. + do k = kts, kte + smo0(k) = 0. + smo1(k) = 0. + smo2(k) = 0. + smob(k) = 0. + smoc(k) = 0. + smod(k) = 0. + smoe(k) = 0. + smof(k) = 0. + enddo + +!+---+-----------------------------------------------------------------+ +!..Put column of data into local arrays. +!+---+-----------------------------------------------------------------+ + do k = kts, kte + temp(k) = t1d(k) + qv(k) = MAX(1.E-10, qv1d(k)) + pres(k) = p1d(k) + rho(k) = 0.622*pres(k)/(R*temp(k)*(qv(k)+0.622)) + nwfa(k) = MAX(11.1E6, MIN(9999.E6, nwfa1d(k)*rho(k))) + nifa(k) = MAX(naIN1*0.01, MIN(9999.E6, nifa1d(k)*rho(k))) + + if (qc1d(k) .gt. R1) then + no_micro = .false. + rc(k) = qc1d(k)*rho(k) + nc(k) = MAX(2., nc1d(k)*rho(k)) + L_qc(k) = .true. + nu_c = MIN(15, NINT(1000.E6/nc(k)) + 2) + lamc = (nc(k)*am_r*ccg(2,nu_c)*ocg1(nu_c)/rc(k))**obmr + xDc = (bm_r + nu_c + 1.) / lamc + if (xDc.lt. D0c) then + lamc = cce(2,nu_c)/D0c + elseif (xDc.gt. D0r*2.) then + lamc = cce(2,nu_c)/(D0r*2.) + endif + nc(k) = MIN( DBLE(Nt_c_max), ccg(1,nu_c)*ocg2(nu_c)*rc(k) & + / am_r*lamc**bm_r) + if (.NOT. is_aerosol_aware) nc(k) = Nt_c + else + qc1d(k) = 0.0 + nc1d(k) = 0.0 + rc(k) = R1 + nc(k) = 2. + L_qc(k) = .false. + endif + + if (qi1d(k) .gt. R1) then + no_micro = .false. + ri(k) = qi1d(k)*rho(k) + ni(k) = MAX(R2, ni1d(k)*rho(k)) + if (ni(k).le. R2) then + lami = cie(2)/25.E-6 + ni(k) = MIN(499.D3, cig(1)*oig2*ri(k)/am_i*lami**bm_i) + endif + L_qi(k) = .true. + lami = (am_i*cig(2)*oig1*ni(k)/ri(k))**obmi + ilami = 1./lami + xDi = (bm_i + mu_i + 1.) * ilami + if (xDi.lt. 5.E-6) then + lami = cie(2)/5.E-6 + ni(k) = MIN(499.D3, cig(1)*oig2*ri(k)/am_i*lami**bm_i) + elseif (xDi.gt. 300.E-6) then + lami = cie(2)/300.E-6 + ni(k) = cig(1)*oig2*ri(k)/am_i*lami**bm_i + endif + else + qi1d(k) = 0.0 + ni1d(k) = 0.0 + ri(k) = R1 + ni(k) = R2 + L_qi(k) = .false. + endif + + if (qr1d(k) .gt. R1) then + no_micro = .false. + rr(k) = qr1d(k)*rho(k) + nr(k) = MAX(R2, nr1d(k)*rho(k)) + if (nr(k).le. R2) then + mvd_r(k) = 1.0E-3 + lamr = (3.0 + mu_r + 0.672) / mvd_r(k) + nr(k) = crg(2)*org3*rr(k)*lamr**bm_r / am_r + endif + L_qr(k) = .true. + lamr = (am_r*crg(3)*org2*nr(k)/rr(k))**obmr + mvd_r(k) = (3.0 + mu_r + 0.672) / lamr + if (mvd_r(k) .gt. 2.5E-3) then + mvd_r(k) = 2.5E-3 + lamr = (3.0 + mu_r + 0.672) / mvd_r(k) + nr(k) = crg(2)*org3*rr(k)*lamr**bm_r / am_r + elseif (mvd_r(k) .lt. D0r*0.75) then + mvd_r(k) = D0r*0.75 + lamr = (3.0 + mu_r + 0.672) / mvd_r(k) + nr(k) = crg(2)*org3*rr(k)*lamr**bm_r / am_r + endif + else + qr1d(k) = 0.0 + nr1d(k) = 0.0 + rr(k) = R1 + nr(k) = R2 + L_qr(k) = .false. + endif + if (qs1d(k) .gt. R1) then + no_micro = .false. + rs(k) = qs1d(k)*rho(k) + L_qs(k) = .true. + else + qs1d(k) = 0.0 + rs(k) = R1 + L_qs(k) = .false. + endif + if (qg1d(k) .gt. R1) then + no_micro = .false. + rg(k) = qg1d(k)*rho(k) + L_qg(k) = .true. + else + qg1d(k) = 0.0 + rg(k) = R1 + L_qg(k) = .false. + endif + enddo + +!+---+-----------------------------------------------------------------+ +! if (debug_flag) then +! write(mp_debug,*) 'DEBUG-VERBOSE at (i,j) ', ii, ', ', jj +! CALL wrf_debug(550, mp_debug) +! do k = kts, kte +! write(mp_debug, '(a,i3,f8.2,1x,f7.2,1x, 11(1x,e13.6))') & +! & 'VERBOSE: ', k, pres(k)*0.01, temp(k)-273.15, qv(k), rc(k), rr(k), ri(k), rs(k), rg(k), nc(k), nr(k), ni(k), nwfa(k), nifa(k) +! CALL wrf_debug(550, mp_debug) +! enddo +! endif +!+---+-----------------------------------------------------------------+ + +!+---+-----------------------------------------------------------------+ +!..Derive various thermodynamic variables frequently used. +!.. Saturation vapor pressure (mixing ratio) over liquid/ice comes from +!.. Flatau et al. 1992; enthalpy (latent heat) of vaporization from +!.. Bohren & Albrecht 1998; others from Pruppacher & Klett 1978. +!+---+-----------------------------------------------------------------+ + do k = kts, kte + tempc = temp(k) - 273.15 + rhof(k) = SQRT(RHO_NOT/rho(k)) + rhof2(k) = SQRT(rhof(k)) + qvs(k) = rslf(pres(k), temp(k)) + delQvs(k) = MAX(0.0, rslf(pres(k), 273.15)-qv(k)) + if (tempc .le. 0.0) then + qvsi(k) = rsif(pres(k), temp(k)) + else + qvsi(k) = qvs(k) + endif + satw(k) = qv(k)/qvs(k) + sati(k) = qv(k)/qvsi(k) + ssatw(k) = satw(k) - 1. + ssati(k) = sati(k) - 1. + if (abs(ssatw(k)).lt. eps) ssatw(k) = 0.0 + if (abs(ssati(k)).lt. eps) ssati(k) = 0.0 + if (no_micro .and. ssati(k).gt. 0.0) no_micro = .false. + diffu(k) = 2.11E-5*(temp(k)/273.15)**1.94 * (101325./pres(k)) + if (tempc .ge. 0.0) then + visco(k) = (1.718+0.0049*tempc)*1.0E-5 + else + visco(k) = (1.718+0.0049*tempc-1.2E-5*tempc*tempc)*1.0E-5 + endif + ocp(k) = 1./(Cp*(1.+0.887*qv(k))) + vsc2(k) = SQRT(rho(k)/visco(k)) + lvap(k) = lvap0 + (2106.0 - 4218.0)*tempc + tcond(k) = (5.69 + 0.0168*tempc)*1.0E-5 * 418.936 + enddo + +!+---+-----------------------------------------------------------------+ +!..If no existing hydrometeor species and no chance to initiate ice or +!.. condense cloud water, just exit quickly! +!+---+-----------------------------------------------------------------+ + + if (no_micro) return + +!+---+-----------------------------------------------------------------+ +!..Calculate y-intercept, slope, and useful moments for snow. +!+---+-----------------------------------------------------------------+ + if (.not. iiwarm) then + do k = kts, kte + if (.not. L_qs(k)) CYCLE + tc0 = MIN(-0.1, temp(k)-273.15) + smob(k) = rs(k)*oams + +!..All other moments based on reference, 2nd moment. If bm_s.ne.2, +!.. then we must compute actual 2nd moment and use as reference. + if (bm_s.gt.(2.0-1.e-3) .and. bm_s.lt.(2.0+1.e-3)) then + smo2(k) = smob(k) + else + loga_ = sa(1) + sa(2)*tc0 + sa(3)*bm_s & + + sa(4)*tc0*bm_s + sa(5)*tc0*tc0 & + + sa(6)*bm_s*bm_s + sa(7)*tc0*tc0*bm_s & + + sa(8)*tc0*bm_s*bm_s + sa(9)*tc0*tc0*tc0 & + + sa(10)*bm_s*bm_s*bm_s + a_ = 10.0**loga_ + b_ = sb(1) + sb(2)*tc0 + sb(3)*bm_s & + + sb(4)*tc0*bm_s + sb(5)*tc0*tc0 & + + sb(6)*bm_s*bm_s + sb(7)*tc0*tc0*bm_s & + + sb(8)*tc0*bm_s*bm_s + sb(9)*tc0*tc0*tc0 & + + sb(10)*bm_s*bm_s*bm_s + smo2(k) = (smob(k)/a_)**(1./b_) + endif + +!..Calculate 0th moment. Represents snow number concentration. + loga_ = sa(1) + sa(2)*tc0 + sa(5)*tc0*tc0 + sa(9)*tc0*tc0*tc0 + a_ = 10.0**loga_ + b_ = sb(1) + sb(2)*tc0 + sb(5)*tc0*tc0 + sb(9)*tc0*tc0*tc0 + smo0(k) = a_ * smo2(k)**b_ + +!..Calculate 1st moment. Useful for depositional growth and melting. + loga_ = sa(1) + sa(2)*tc0 + sa(3) & + + sa(4)*tc0 + sa(5)*tc0*tc0 & + + sa(6) + sa(7)*tc0*tc0 & + + sa(8)*tc0 + sa(9)*tc0*tc0*tc0 & + + sa(10) + a_ = 10.0**loga_ + b_ = sb(1)+ sb(2)*tc0 + sb(3) + sb(4)*tc0 & + + sb(5)*tc0*tc0 + sb(6) & + + sb(7)*tc0*tc0 + sb(8)*tc0 & + + sb(9)*tc0*tc0*tc0 + sb(10) + smo1(k) = a_ * smo2(k)**b_ + +!..Calculate bm_s+1 (th) moment. Useful for diameter calcs. + loga_ = sa(1) + sa(2)*tc0 + sa(3)*cse(1) & + + sa(4)*tc0*cse(1) + sa(5)*tc0*tc0 & + + sa(6)*cse(1)*cse(1) + sa(7)*tc0*tc0*cse(1) & + + sa(8)*tc0*cse(1)*cse(1) + sa(9)*tc0*tc0*tc0 & + + sa(10)*cse(1)*cse(1)*cse(1) + a_ = 10.0**loga_ + b_ = sb(1)+ sb(2)*tc0 + sb(3)*cse(1) + sb(4)*tc0*cse(1) & + + sb(5)*tc0*tc0 + sb(6)*cse(1)*cse(1) & + + sb(7)*tc0*tc0*cse(1) + sb(8)*tc0*cse(1)*cse(1) & + + sb(9)*tc0*tc0*tc0 + sb(10)*cse(1)*cse(1)*cse(1) + smoc(k) = a_ * smo2(k)**b_ + +!..Calculate bv_s+2 (th) moment. Useful for riming. + loga_ = sa(1) + sa(2)*tc0 + sa(3)*cse(13) & + + sa(4)*tc0*cse(13) + sa(5)*tc0*tc0 & + + sa(6)*cse(13)*cse(13) + sa(7)*tc0*tc0*cse(13) & + + sa(8)*tc0*cse(13)*cse(13) + sa(9)*tc0*tc0*tc0 & + + sa(10)*cse(13)*cse(13)*cse(13) + a_ = 10.0**loga_ + b_ = sb(1)+ sb(2)*tc0 + sb(3)*cse(13) + sb(4)*tc0*cse(13) & + + sb(5)*tc0*tc0 + sb(6)*cse(13)*cse(13) & + + sb(7)*tc0*tc0*cse(13) + sb(8)*tc0*cse(13)*cse(13) & + + sb(9)*tc0*tc0*tc0 + sb(10)*cse(13)*cse(13)*cse(13) + smoe(k) = a_ * smo2(k)**b_ + +!..Calculate 1+(bv_s+1)/2 (th) moment. Useful for depositional growth. + loga_ = sa(1) + sa(2)*tc0 + sa(3)*cse(16) & + + sa(4)*tc0*cse(16) + sa(5)*tc0*tc0 & + + sa(6)*cse(16)*cse(16) + sa(7)*tc0*tc0*cse(16) & + + sa(8)*tc0*cse(16)*cse(16) + sa(9)*tc0*tc0*tc0 & + + sa(10)*cse(16)*cse(16)*cse(16) + a_ = 10.0**loga_ + b_ = sb(1)+ sb(2)*tc0 + sb(3)*cse(16) + sb(4)*tc0*cse(16) & + + sb(5)*tc0*tc0 + sb(6)*cse(16)*cse(16) & + + sb(7)*tc0*tc0*cse(16) + sb(8)*tc0*cse(16)*cse(16) & + + sb(9)*tc0*tc0*tc0 + sb(10)*cse(16)*cse(16)*cse(16) + smof(k) = a_ * smo2(k)**b_ + + enddo + +!+---+-----------------------------------------------------------------+ +!..Calculate y-intercept, slope values for graupel. +!+---+-----------------------------------------------------------------+ + N0_min = gonv_max + k_0 = kts + do k = kte, kts, -1 + if (temp(k).ge.270.65) k_0 = MAX(k_0, k) + enddo + do k = kte, kts, -1 + if (k.gt.k_0 .and. L_qr(k) .and. mvd_r(k).gt.100.E-6) then + xslw1 = 4.01 + alog10(mvd_r(k)) + else + xslw1 = 0.01 + endif + ygra1 = 4.31 + alog10(max(5.E-5, rg(k))) + zans1 = 3.1 + (100./(300.*xslw1*ygra1/(10./xslw1+1.+0.25*ygra1)+30.+10.*ygra1)) + N0_exp = 10.**(zans1) + N0_exp = MAX(DBLE(gonv_min), MIN(N0_exp, DBLE(gonv_max))) + N0_min = MIN(N0_exp, N0_min) + N0_exp = N0_min + lam_exp = (N0_exp*am_g*cgg(1)/rg(k))**oge1 + lamg = lam_exp * (cgg(3)*ogg2*ogg1)**obmg + ilamg(k) = 1./lamg + N0_g(k) = N0_exp/(cgg(2)*lam_exp) * lamg**cge(2) + enddo + + endif + +!+---+-----------------------------------------------------------------+ +!..Calculate y-intercept, slope values for rain. +!+---+-----------------------------------------------------------------+ + do k = kte, kts, -1 + lamr = (am_r*crg(3)*org2*nr(k)/rr(k))**obmr + ilamr(k) = 1./lamr + mvd_r(k) = (3.0 + mu_r + 0.672) / lamr + N0_r(k) = nr(k)*org2*lamr**cre(2) + enddo + +!+---+-----------------------------------------------------------------+ +!..Compute warm-rain process terms (except evap done later). +!+---+-----------------------------------------------------------------+ + + do k = kts, kte + +!..Rain self-collection follows Seifert, 1994 and drop break-up +!.. follows Verlinde and Cotton, 1993. RAIN2M + if (L_qr(k) .and. mvd_r(k).gt. D0r) then +!-GT Ef_rr = 1.0 +!-GT if (mvd_r(k) .gt. 1500.0E-6) then + Ef_rr = 1.0 - EXP(2300.0*(mvd_r(k)-1950.0E-6)) +!-GT endif + pnr_rcr(k) = Ef_rr * 2.0*nr(k)*rr(k) + endif + + mvd_c(k) = D0c + if (L_qc(k)) then + nu_c = MIN(15, NINT(1000.E6/nc(k)) + 2) + xDc = MAX(D0c*1.E6, ((rc(k)/(am_r*nc(k)))**obmr) * 1.E6) + lamc = (nc(k)*am_r* ccg(2,nu_c) * ocg1(nu_c) / rc(k))**obmr + mvd_c(k) = (3.0+nu_c+0.672) / lamc + endif + +!..Autoconversion follows Berry & Reinhardt (1974) with characteristic +!.. diameters correctly computed from gamma distrib of cloud droplets. + if (rc(k).gt. 0.01e-3) then + Dc_g = ((ccg(3,nu_c)*ocg2(nu_c))**obmr / lamc) * 1.E6 + Dc_b = (xDc*xDc*xDc*Dc_g*Dc_g*Dc_g - xDc*xDc*xDc*xDc*xDc*xDc) & + **(1./6.) + zeta1 = 0.5*((6.25E-6*xDc*Dc_b*Dc_b*Dc_b - 0.4) & + + abs(6.25E-6*xDc*Dc_b*Dc_b*Dc_b - 0.4)) + zeta = 0.027*rc(k)*zeta1 + taud = 0.5*((0.5*Dc_b - 7.5) + abs(0.5*Dc_b - 7.5)) + R1 + tau = 3.72/(rc(k)*taud) + prr_wau(k) = zeta/tau + prr_wau(k) = MIN(DBLE(rc(k)*odts), prr_wau(k)) + pnr_wau(k) = prr_wau(k) / (am_r*nu_c*D0r*D0r*D0r) ! RAIN2M + pnc_wau(k) = MIN(DBLE(nc(k)*odts), prr_wau(k) & + / (am_r*mvd_c(k)*mvd_c(k)*mvd_c(k))) ! Qc2M + endif + +!..Rain collecting cloud water. In CE, assume Dc<1). Either way, only bother to do sedimentation below +!.. 1st level that contains any sedimenting particles (k=ksed1 on down). +!.. New in v3.0+ is computing separate for rain, ice, snow, and +!.. graupel species thus making code faster with credit to J. Schmidt. +!+---+-----------------------------------------------------------------+ + nstep = 0 + onstep(:) = 1.0 + ksed1(:) = 1 + do k = kte+1, kts, -1 + vtrk(k) = 0. + vtnrk(k) = 0. + vtik(k) = 0. + vtnik(k) = 0. + vtsk(k) = 0. + vtgk(k) = 0. + vtck(k) = 0. + vtnck(k) = 0. + enddo + do k = kte, kts, -1 + vtr = 0. + rhof(k) = SQRT(RHO_NOT/rho(k)) + + if (rr(k).gt. R1) then + lamr = (am_r*crg(3)*org2*nr(k)/rr(k))**obmr + vtr = rhof(k)*av_r*crg(6)*org3 * lamr**cre(3) & + *((lamr+fv_r)**(-cre(6))) + vtrk(k) = vtr +! First below is technically correct: +! vtr = rhof(k)*av_r*crg(5)*org2 * lamr**cre(2) & +! *((lamr+fv_r)**(-cre(5))) +! Test: make number fall faster (but still slower than mass) +! Goal: less prominent size sorting + vtr = rhof(k)*av_r*crg(7)/crg(12) * lamr**cre(12) & + *((lamr+fv_r)**(-cre(7))) + vtnrk(k) = vtr + else + vtrk(k) = vtrk(k+1) + vtnrk(k) = vtnrk(k+1) + endif + + if (MAX(vtrk(k),vtnrk(k)) .gt. 1.E-3) then + ksed1(1) = MAX(ksed1(1), k) + delta_tp = dzq(k)/(MAX(vtrk(k),vtnrk(k))) + nstep = MAX(nstep, INT(DT/delta_tp + 1.)) + endif + enddo + if (ksed1(1) .eq. kte) ksed1(1) = kte-1 + if (nstep .gt. 0) onstep(1) = 1./REAL(nstep) + +!+---+-----------------------------------------------------------------+ + + hgt_agl = 0. + do k = kts, kte-1 + if (rc(k) .gt. R2) ksed1(5) = k + hgt_agl = hgt_agl + dzq(k) + if (hgt_agl .gt. 500.0) goto 151 + enddo + 151 continue + + do k = ksed1(5), kts, -1 + vtc = 0. + if (rc(k) .gt. R1 .and. w1d(k) .lt. 1.E-1) then + nu_c = MIN(15, NINT(1000.E6/nc(k)) + 2) + lamc = (nc(k)*am_r*ccg(2,nu_c)*ocg1(nu_c)/rc(k))**obmr + ilamc = 1./lamc + vtc = rhof(k)*av_c*ccg(5,nu_c)*ocg2(nu_c) * ilamc**bv_c + vtck(k) = vtc + vtc = rhof(k)*av_c*ccg(4,nu_c)*ocg1(nu_c) * ilamc**bv_c + vtnck(k) = vtc + endif + enddo + +!+---+-----------------------------------------------------------------+ + + if (.not. iiwarm) then + + nstep = 0 + do k = kte, kts, -1 + vti = 0. + + if (ri(k).gt. R1) then + lami = (am_i*cig(2)*oig1*ni(k)/ri(k))**obmi + ilami = 1./lami + vti = rhof(k)*av_i*cig(3)*oig2 * ilami**bv_i + vtik(k) = vti +! First below is technically correct: +! vti = rhof(k)*av_i*cig(4)*oig1 * ilami**bv_i +! Goal: less prominent size sorting + vti = rhof(k)*av_i*cig(6)/cig(7) * ilami**bv_i + vtnik(k) = vti + else + vtik(k) = vtik(k+1) + vtnik(k) = vtnik(k+1) + endif + + if (vtik(k) .gt. 1.E-3) then + ksed1(2) = MAX(ksed1(2), k) + delta_tp = dzq(k)/vtik(k) + nstep = MAX(nstep, INT(DT/delta_tp + 1.)) + endif + enddo + if (ksed1(2) .eq. kte) ksed1(2) = kte-1 + if (nstep .gt. 0) onstep(2) = 1./REAL(nstep) + +!+---+-----------------------------------------------------------------+ + + nstep = 0 + do k = kte, kts, -1 + vts = 0. + + if (rs(k).gt. R1) then + xDs = smoc(k) / smob(k) + Mrat = 1./xDs + ils1 = 1./(Mrat*Lam0 + fv_s) + ils2 = 1./(Mrat*Lam1 + fv_s) + t1_vts = Kap0*csg(4)*ils1**cse(4) + t2_vts = Kap1*Mrat**mu_s*csg(10)*ils2**cse(10) + ils1 = 1./(Mrat*Lam0) + ils2 = 1./(Mrat*Lam1) + t3_vts = Kap0*csg(1)*ils1**cse(1) + t4_vts = Kap1*Mrat**mu_s*csg(7)*ils2**cse(7) + vts = rhof(k)*av_s * (t1_vts+t2_vts)/(t3_vts+t4_vts) + if (temp(k).gt. (T_0+0.1)) then + vtsk(k) = MAX(vts*vts_boost(k), & + & vts*((vtrk(k)-vts*vts_boost(k))/(temp(k)-T_0))) + else + vtsk(k) = vts*vts_boost(k) + endif + else + vtsk(k) = vtsk(k+1) + endif + + if (vtsk(k) .gt. 1.E-3) then + ksed1(3) = MAX(ksed1(3), k) + delta_tp = dzq(k)/vtsk(k) + nstep = MAX(nstep, INT(DT/delta_tp + 1.)) + endif + enddo + if (ksed1(3) .eq. kte) ksed1(3) = kte-1 + if (nstep .gt. 0) onstep(3) = 1./REAL(nstep) + +!+---+-----------------------------------------------------------------+ + + nstep = 0 + do k = kte, kts, -1 + vtg = 0. + + if (rg(k).gt. R1) then + vtg = rhof(k)*av_g*cgg(6)*ogg3 * ilamg(k)**bv_g + if (temp(k).gt. T_0) then + vtgk(k) = MAX(vtg, vtrk(k)) + else + vtgk(k) = vtg + endif + else + vtgk(k) = vtgk(k+1) + endif + + if (vtgk(k) .gt. 1.E-3) then + ksed1(4) = MAX(ksed1(4), k) + delta_tp = dzq(k)/vtgk(k) + nstep = MAX(nstep, INT(DT/delta_tp + 1.)) + endif + enddo + if (ksed1(4) .eq. kte) ksed1(4) = kte-1 + if (nstep .gt. 0) onstep(4) = 1./REAL(nstep) + endif + +!+---+-----------------------------------------------------------------+ +!..Sedimentation of mixing ratio is the integral of v(D)*m(D)*N(D)*dD, +!.. whereas neglect m(D) term for number concentration. Therefore, +!.. cloud ice has proper differential sedimentation. +!.. New in v3.0+ is computing separate for rain, ice, snow, and +!.. graupel species thus making code faster with credit to J. Schmidt. +!.. Bug fix, 2013Nov01 to tendencies using rho(k+1) correction thanks to +!.. Eric Skyllingstad. +!+---+-----------------------------------------------------------------+ + + nstep = NINT(1./onstep(1)) + do n = 1, nstep + do k = kte, kts, -1 + sed_r(k) = vtrk(k)*rr(k) + sed_n(k) = vtnrk(k)*nr(k) + enddo + k = kte + odzq = 1./dzq(k) + orho = 1./rho(k) + qrten(k) = qrten(k) - sed_r(k)*odzq*onstep(1)*orho + nrten(k) = nrten(k) - sed_n(k)*odzq*onstep(1)*orho + rr(k) = MAX(R1, rr(k) - sed_r(k)*odzq*DT*onstep(1)) + nr(k) = MAX(R2, nr(k) - sed_n(k)*odzq*DT*onstep(1)) + do k = ksed1(1), kts, -1 + odzq = 1./dzq(k) + orho = 1./rho(k) + qrten(k) = qrten(k) + (sed_r(k+1)-sed_r(k)) & + *odzq*onstep(1)*orho + nrten(k) = nrten(k) + (sed_n(k+1)-sed_n(k)) & + *odzq*onstep(1)*orho + rr(k) = MAX(R1, rr(k) + (sed_r(k+1)-sed_r(k)) & + *odzq*DT*onstep(1)) + nr(k) = MAX(R2, nr(k) + (sed_n(k+1)-sed_n(k)) & + *odzq*DT*onstep(1)) + enddo + + if (rr(kts).gt.R1*10.) & + pptrain = pptrain + sed_r(kts)*DT*onstep(1) + enddo + +!+---+-----------------------------------------------------------------+ + + do k = kte, kts, -1 + sed_c(k) = vtck(k)*rc(k) + sed_n(k) = vtnck(k)*nc(k) + enddo + do k = ksed1(5), kts, -1 + odzq = 1./dzq(k) + orho = 1./rho(k) + qcten(k) = qcten(k) + (sed_c(k+1)-sed_c(k)) *odzq*orho + ncten(k) = ncten(k) + (sed_n(k+1)-sed_n(k)) *odzq*orho + rc(k) = MAX(R1, rc(k) + (sed_c(k+1)-sed_c(k)) *odzq*DT) + nc(k) = MAX(10., nc(k) + (sed_n(k+1)-sed_n(k)) *odzq*DT) + enddo + +!+---+-----------------------------------------------------------------+ + + nstep = NINT(1./onstep(2)) + do n = 1, nstep + do k = kte, kts, -1 + sed_i(k) = vtik(k)*ri(k) + sed_n(k) = vtnik(k)*ni(k) + enddo + k = kte + odzq = 1./dzq(k) + orho = 1./rho(k) + qiten(k) = qiten(k) - sed_i(k)*odzq*onstep(2)*orho + niten(k) = niten(k) - sed_n(k)*odzq*onstep(2)*orho + ri(k) = MAX(R1, ri(k) - sed_i(k)*odzq*DT*onstep(2)) + ni(k) = MAX(R2, ni(k) - sed_n(k)*odzq*DT*onstep(2)) + do k = ksed1(2), kts, -1 + odzq = 1./dzq(k) + orho = 1./rho(k) + qiten(k) = qiten(k) + (sed_i(k+1)-sed_i(k)) & + *odzq*onstep(2)*orho + niten(k) = niten(k) + (sed_n(k+1)-sed_n(k)) & + *odzq*onstep(2)*orho + ri(k) = MAX(R1, ri(k) + (sed_i(k+1)-sed_i(k)) & + *odzq*DT*onstep(2)) + ni(k) = MAX(R2, ni(k) + (sed_n(k+1)-sed_n(k)) & + *odzq*DT*onstep(2)) + enddo + + if (ri(kts).gt.R1*10.) & + pptice = pptice + sed_i(kts)*DT*onstep(2) + enddo + +!+---+-----------------------------------------------------------------+ + + nstep = NINT(1./onstep(3)) + do n = 1, nstep + do k = kte, kts, -1 + sed_s(k) = vtsk(k)*rs(k) + enddo + k = kte + odzq = 1./dzq(k) + orho = 1./rho(k) + qsten(k) = qsten(k) - sed_s(k)*odzq*onstep(3)*orho + rs(k) = MAX(R1, rs(k) - sed_s(k)*odzq*DT*onstep(3)) + do k = ksed1(3), kts, -1 + odzq = 1./dzq(k) + orho = 1./rho(k) + qsten(k) = qsten(k) + (sed_s(k+1)-sed_s(k)) & + *odzq*onstep(3)*orho + rs(k) = MAX(R1, rs(k) + (sed_s(k+1)-sed_s(k)) & + *odzq*DT*onstep(3)) + enddo + + if (rs(kts).gt.R1*10.) & + pptsnow = pptsnow + sed_s(kts)*DT*onstep(3) + enddo + +!+---+-----------------------------------------------------------------+ + + nstep = NINT(1./onstep(4)) + do n = 1, nstep + do k = kte, kts, -1 + sed_g(k) = vtgk(k)*rg(k) + enddo + k = kte + odzq = 1./dzq(k) + orho = 1./rho(k) + qgten(k) = qgten(k) - sed_g(k)*odzq*onstep(4)*orho + rg(k) = MAX(R1, rg(k) - sed_g(k)*odzq*DT*onstep(4)) + do k = ksed1(4), kts, -1 + odzq = 1./dzq(k) + orho = 1./rho(k) + qgten(k) = qgten(k) + (sed_g(k+1)-sed_g(k)) & + *odzq*onstep(4)*orho + rg(k) = MAX(R1, rg(k) + (sed_g(k+1)-sed_g(k)) & + *odzq*DT*onstep(4)) + enddo + + if (rg(kts).gt.R1*10.) & + pptgraul = pptgraul + sed_g(kts)*DT*onstep(4) + enddo + +!+---+-----------------------------------------------------------------+ +!.. Instantly melt any cloud ice into cloud water if above 0C and +!.. instantly freeze any cloud water found below HGFR. +!+---+-----------------------------------------------------------------+ + if (.not. iiwarm) then + do k = kts, kte + xri = MAX(0.0, qi1d(k) + qiten(k)*DT) + if ( (temp(k).gt. T_0) .and. (xri.gt. 0.0) ) then + qcten(k) = qcten(k) + xri*odt + ncten(k) = ncten(k) + ni1d(k)*odt + qiten(k) = qiten(k) - xri*odt + niten(k) = -ni1d(k)*odt + tten(k) = tten(k) - lfus*ocp(k)*xri*odt*(1-IFDRY) + endif + + xrc = MAX(0.0, qc1d(k) + qcten(k)*DT) + if ( (temp(k).lt. HGFR) .and. (xrc.gt. 0.0) ) then + lfus2 = lsub - lvap(k) + xnc = nc1d(k) + ncten(k)*DT + qiten(k) = qiten(k) + xrc*odt + niten(k) = niten(k) + xnc*odt + qcten(k) = qcten(k) - xrc*odt + ncten(k) = ncten(k) - xnc*odt + tten(k) = tten(k) + lfus2*ocp(k)*xrc*odt*(1-IFDRY) + endif + enddo + endif + +!+---+-----------------------------------------------------------------+ +!.. All tendencies computed, apply and pass back final values to parent. +!+---+-----------------------------------------------------------------+ + do k = kts, kte + t1d(k) = t1d(k) + tten(k)*DT + qv1d(k) = MAX(1.E-10, qv1d(k) + qvten(k)*DT) + qc1d(k) = qc1d(k) + qcten(k)*DT + nc1d(k) = MAX(2./rho(k), nc1d(k) + ncten(k)*DT) + nwfa1d(k) = MAX(11.1E6/rho(k), MIN(9999.E6/rho(k), & + (nwfa1d(k)+nwfaten(k)*DT))) + nifa1d(k) = MAX(naIN1*0.01, MIN(9999.E6/rho(k), & + (nifa1d(k)+nifaten(k)*DT))) + + if (qc1d(k) .le. R1) then + qc1d(k) = 0.0 + nc1d(k) = 0.0 + else + nu_c = MIN(15, NINT(1000.E6/(nc1d(k)*rho(k))) + 2) + lamc = (am_r*ccg(2,nu_c)*ocg1(nu_c)*nc1d(k)/qc1d(k))**obmr + xDc = (bm_r + nu_c + 1.) / lamc + if (xDc.lt. D0c) then + lamc = cce(2,nu_c)/D0c + elseif (xDc.gt. D0r*2.) then + lamc = cce(2,nu_c)/(D0r*2.) + endif + nc1d(k) = MIN(ccg(1,nu_c)*ocg2(nu_c)*qc1d(k)/am_r*lamc**bm_r,& + DBLE(Nt_c_max)/rho(k)) + endif + + qi1d(k) = qi1d(k) + qiten(k)*DT + ni1d(k) = MAX(R2/rho(k), ni1d(k) + niten(k)*DT) + if (qi1d(k) .le. R1) then + qi1d(k) = 0.0 + ni1d(k) = 0.0 + else + lami = (am_i*cig(2)*oig1*ni1d(k)/qi1d(k))**obmi + ilami = 1./lami + xDi = (bm_i + mu_i + 1.) * ilami + if (xDi.lt. 5.E-6) then + lami = cie(2)/5.E-6 + elseif (xDi.gt. 300.E-6) then + lami = cie(2)/300.E-6 + endif + ni1d(k) = MIN(cig(1)*oig2*qi1d(k)/am_i*lami**bm_i, & + 499.D3/rho(k)) + endif + qr1d(k) = qr1d(k) + qrten(k)*DT + nr1d(k) = MAX(R2/rho(k), nr1d(k) + nrten(k)*DT) + if (qr1d(k) .le. R1) then + qr1d(k) = 0.0 + nr1d(k) = 0.0 + else + lamr = (am_r*crg(3)*org2*nr1d(k)/qr1d(k))**obmr + mvd_r(k) = (3.0 + mu_r + 0.672) / lamr + if (mvd_r(k) .gt. 2.5E-3) then + mvd_r(k) = 2.5E-3 + elseif (mvd_r(k) .lt. D0r*0.75) then + mvd_r(k) = D0r*0.75 + endif + lamr = (3.0 + mu_r + 0.672) / mvd_r(k) + nr1d(k) = crg(2)*org3*qr1d(k)*lamr**bm_r / am_r + endif + qs1d(k) = qs1d(k) + qsten(k)*DT + if (qs1d(k) .le. R1) qs1d(k) = 0.0 + qg1d(k) = qg1d(k) + qgten(k)*DT + if (qg1d(k) .le. R1) qg1d(k) = 0.0 + enddo + + end subroutine mp_thompson +!+---+-----------------------------------------------------------------+ +!ctrlL +!+---+-----------------------------------------------------------------+ +!..Creation of the lookup tables and support functions found below here. +!+---+-----------------------------------------------------------------+ +!..Rain collecting graupel (and inverse). Explicit CE integration. +!+---+-----------------------------------------------------------------+ + + subroutine qr_acr_qg + + implicit none + +!..Local variables + INTEGER:: i, j, k, m, n, n2 + INTEGER:: km, km_s, km_e + DOUBLE PRECISION, DIMENSION(nbg):: vg, N_g + DOUBLE PRECISION, DIMENSION(nbr):: vr, N_r + DOUBLE PRECISION:: N0_r, N0_g, lam_exp, lamg, lamr + DOUBLE PRECISION:: massg, massr, dvg, dvr, t1, t2, z1, z2, y1, y2 + +!+---+ + + do n2 = 1, nbr +! vr(n2) = av_r*Dr(n2)**bv_r * DEXP(-fv_r*Dr(n2)) + vr(n2) = -0.1021 + 4.932E3*Dr(n2) - 0.9551E6*Dr(n2)*Dr(n2) & + + 0.07934E9*Dr(n2)*Dr(n2)*Dr(n2) & + - 0.002362E12*Dr(n2)*Dr(n2)*Dr(n2)*Dr(n2) + enddo + do n = 1, nbg + vg(n) = av_g*Dg(n)**bv_g + enddo + + km_s = 0 + km_e = ntb_r*ntb_r1 - 1 + + do km = km_s, km_e + m = km / ntb_r1 + 1 + k = mod( km , ntb_r1 ) + 1 + + lam_exp = (N0r_exp(k)*am_r*crg(1)/r_r(m))**ore1 + lamr = lam_exp * (crg(3)*org2*org1)**obmr + N0_r = N0r_exp(k)/(crg(2)*lam_exp) * lamr**cre(2) + do n2 = 1, nbr + N_r(n2) = N0_r*Dr(n2)**mu_r *DEXP(-lamr*Dr(n2))*dtr(n2) + enddo + + do j = 1, ntb_g + do i = 1, ntb_g1 + lam_exp = (N0g_exp(i)*am_g*cgg(1)/r_g(j))**oge1 + lamg = lam_exp * (cgg(3)*ogg2*ogg1)**obmg + N0_g = N0g_exp(i)/(cgg(2)*lam_exp) * lamg**cge(2) + do n = 1, nbg + N_g(n) = N0_g*Dg(n)**mu_g * DEXP(-lamg*Dg(n))*dtg(n) + enddo + + t1 = 0.0d0 + t2 = 0.0d0 + z1 = 0.0d0 + z2 = 0.0d0 + y1 = 0.0d0 + y2 = 0.0d0 + do n2 = 1, nbr + massr = am_r * Dr(n2)**bm_r + do n = 1, nbg + massg = am_g * Dg(n)**bm_g + + dvg = 0.5d0*((vr(n2) - vg(n)) + DABS(vr(n2)-vg(n))) + dvr = 0.5d0*((vg(n) - vr(n2)) + DABS(vg(n)-vr(n2))) + + t1 = t1+ PI*.25*Ef_rg*(Dg(n)+Dr(n2))*(Dg(n)+Dr(n2)) & + *dvg*massg * N_g(n)* N_r(n2) + z1 = z1+ PI*.25*Ef_rg*(Dg(n)+Dr(n2))*(Dg(n)+Dr(n2)) & + *dvg*massr * N_g(n)* N_r(n2) + y1 = y1+ PI*.25*Ef_rg*(Dg(n)+Dr(n2))*(Dg(n)+Dr(n2)) & + *dvg * N_g(n)* N_r(n2) + + t2 = t2+ PI*.25*Ef_rg*(Dg(n)+Dr(n2))*(Dg(n)+Dr(n2)) & + *dvr*massr * N_g(n)* N_r(n2) + y2 = y2+ PI*.25*Ef_rg*(Dg(n)+Dr(n2))*(Dg(n)+Dr(n2)) & + *dvr * N_g(n)* N_r(n2) + z2 = z2+ PI*.25*Ef_rg*(Dg(n)+Dr(n2))*(Dg(n)+Dr(n2)) & + *dvr*massg * N_g(n)* N_r(n2) + enddo + 97 continue + enddo + tcg_racg(i,j,k,m) = t1 + tmr_racg(i,j,k,m) = DMIN1(z1, r_r(m)*1.0d0) + tcr_gacr(i,j,k,m) = t2 + tmg_gacr(i,j,k,m) = z2 + tnr_racg(i,j,k,m) = y1 + tnr_gacr(i,j,k,m) = y2 + enddo + enddo + enddo + + end subroutine qr_acr_qg +!+---+-----------------------------------------------------------------+ +!ctrlL +!+---+-----------------------------------------------------------------+ +!..Rain collecting snow (and inverse). Explicit CE integration. +!+---+-----------------------------------------------------------------+ + + subroutine qr_acr_qs + + implicit none + +!..Local variables + INTEGER:: i, j, k, m, n, n2 + INTEGER:: km, km_s, km_e + DOUBLE PRECISION, DIMENSION(nbr):: vr, D1, N_r + DOUBLE PRECISION, DIMENSION(nbs):: vs, N_s + DOUBLE PRECISION:: loga_, a_, b_, second, M0, M2, M3, Mrat, oM3 + DOUBLE PRECISION:: N0_r, lam_exp, lamr, slam1, slam2 + DOUBLE PRECISION:: dvs, dvr, masss, massr + DOUBLE PRECISION:: t1, t2, t3, t4, z1, z2, z3, z4 + DOUBLE PRECISION:: y1, y2, y3, y4 + +!+---+ + + do n2 = 1, nbr +! vr(n2) = av_r*Dr(n2)**bv_r * DEXP(-fv_r*Dr(n2)) + vr(n2) = -0.1021 + 4.932E3*Dr(n2) - 0.9551E6*Dr(n2)*Dr(n2) & + + 0.07934E9*Dr(n2)*Dr(n2)*Dr(n2) & + - 0.002362E12*Dr(n2)*Dr(n2)*Dr(n2)*Dr(n2) + D1(n2) = (vr(n2)/av_s)**(1./bv_s) + enddo + do n = 1, nbs + vs(n) = 1.5*av_s*Ds(n)**bv_s * DEXP(-fv_s*Ds(n)) + enddo + + km_s = 0 + km_e = ntb_r*ntb_r1 - 1 + + do km = km_s, km_e + m = km / ntb_r1 + 1 + k = mod( km , ntb_r1 ) + 1 + + lam_exp = (N0r_exp(k)*am_r*crg(1)/r_r(m))**ore1 + lamr = lam_exp * (crg(3)*org2*org1)**obmr + N0_r = N0r_exp(k)/(crg(2)*lam_exp) * lamr**cre(2) + do n2 = 1, nbr + N_r(n2) = N0_r*Dr(n2)**mu_r * DEXP(-lamr*Dr(n2))*dtr(n2) + enddo + + do j = 1, ntb_t + do i = 1, ntb_s + +!..From the bm_s moment, compute plus one moment. If we are not +!.. using bm_s=2, then we must transform to the pure 2nd moment +!.. (variable called "second") and then to the bm_s+1 moment. + + M2 = r_s(i)*oams *1.0d0 + if (bm_s.gt.2.0-1.E-3 .and. bm_s.lt.2.0+1.E-3) then + loga_ = sa(1) + sa(2)*Tc(j) + sa(3)*bm_s & + + sa(4)*Tc(j)*bm_s + sa(5)*Tc(j)*Tc(j) & + + sa(6)*bm_s*bm_s + sa(7)*Tc(j)*Tc(j)*bm_s & + + sa(8)*Tc(j)*bm_s*bm_s + sa(9)*Tc(j)*Tc(j)*Tc(j) & + + sa(10)*bm_s*bm_s*bm_s + a_ = 10.0**loga_ + b_ = sb(1) + sb(2)*Tc(j) + sb(3)*bm_s & + + sb(4)*Tc(j)*bm_s + sb(5)*Tc(j)*Tc(j) & + + sb(6)*bm_s*bm_s + sb(7)*Tc(j)*Tc(j)*bm_s & + + sb(8)*Tc(j)*bm_s*bm_s + sb(9)*Tc(j)*Tc(j)*Tc(j) & + + sb(10)*bm_s*bm_s*bm_s + second = (M2/a_)**(1./b_) + else + second = M2 + endif + + loga_ = sa(1) + sa(2)*Tc(j) + sa(3)*cse(1) & + + sa(4)*Tc(j)*cse(1) + sa(5)*Tc(j)*Tc(j) & + + sa(6)*cse(1)*cse(1) + sa(7)*Tc(j)*Tc(j)*cse(1) & + + sa(8)*Tc(j)*cse(1)*cse(1) + sa(9)*Tc(j)*Tc(j)*Tc(j) & + + sa(10)*cse(1)*cse(1)*cse(1) + a_ = 10.0**loga_ + b_ = sb(1)+sb(2)*Tc(j)+sb(3)*cse(1) + sb(4)*Tc(j)*cse(1) & + + sb(5)*Tc(j)*Tc(j) + sb(6)*cse(1)*cse(1) & + + sb(7)*Tc(j)*Tc(j)*cse(1) + sb(8)*Tc(j)*cse(1)*cse(1) & + + sb(9)*Tc(j)*Tc(j)*Tc(j)+sb(10)*cse(1)*cse(1)*cse(1) + M3 = a_ * second**b_ + + oM3 = 1./M3 + Mrat = M2*(M2*oM3)*(M2*oM3)*(M2*oM3) + M0 = (M2*oM3)**mu_s + slam1 = M2 * oM3 * Lam0 + slam2 = M2 * oM3 * Lam1 + + do n = 1, nbs + N_s(n) = Mrat*(Kap0*DEXP(-slam1*Ds(n)) & + + Kap1*M0*Ds(n)**mu_s * DEXP(-slam2*Ds(n)))*dts(n) + enddo + + t1 = 0.0d0 + t2 = 0.0d0 + t3 = 0.0d0 + t4 = 0.0d0 + z1 = 0.0d0 + z2 = 0.0d0 + z3 = 0.0d0 + z4 = 0.0d0 + y1 = 0.0d0 + y2 = 0.0d0 + y3 = 0.0d0 + y4 = 0.0d0 + do n2 = 1, nbr + massr = am_r * Dr(n2)**bm_r + do n = 1, nbs + masss = am_s * Ds(n)**bm_s + + dvs = 0.5d0*((vr(n2) - vs(n)) + DABS(vr(n2)-vs(n))) + dvr = 0.5d0*((vs(n) - vr(n2)) + DABS(vs(n)-vr(n2))) + + if (massr .gt. 1.5*masss) then + t1 = t1+ PI*.25*Ef_rs*(Ds(n)+Dr(n2))*(Ds(n)+Dr(n2)) & + *dvs*masss * N_s(n)* N_r(n2) + z1 = z1+ PI*.25*Ef_rs*(Ds(n)+Dr(n2))*(Ds(n)+Dr(n2)) & + *dvs*massr * N_s(n)* N_r(n2) + y1 = y1+ PI*.25*Ef_rs*(Ds(n)+Dr(n2))*(Ds(n)+Dr(n2)) & + *dvs * N_s(n)* N_r(n2) + else + t3 = t3+ PI*.25*Ef_rs*(Ds(n)+Dr(n2))*(Ds(n)+Dr(n2)) & + *dvs*masss * N_s(n)* N_r(n2) + z3 = z3+ PI*.25*Ef_rs*(Ds(n)+Dr(n2))*(Ds(n)+Dr(n2)) & + *dvs*massr * N_s(n)* N_r(n2) + y3 = y3+ PI*.25*Ef_rs*(Ds(n)+Dr(n2))*(Ds(n)+Dr(n2)) & + *dvs * N_s(n)* N_r(n2) + endif + + if (massr .gt. 1.5*masss) then + t2 = t2+ PI*.25*Ef_rs*(Ds(n)+Dr(n2))*(Ds(n)+Dr(n2)) & + *dvr*massr * N_s(n)* N_r(n2) + y2 = y2+ PI*.25*Ef_rs*(Ds(n)+Dr(n2))*(Ds(n)+Dr(n2)) & + *dvr * N_s(n)* N_r(n2) + z2 = z2+ PI*.25*Ef_rs*(Ds(n)+Dr(n2))*(Ds(n)+Dr(n2)) & + *dvr*masss * N_s(n)* N_r(n2) + else + t4 = t4+ PI*.25*Ef_rs*(Ds(n)+Dr(n2))*(Ds(n)+Dr(n2)) & + *dvr*massr * N_s(n)* N_r(n2) + y4 = y4+ PI*.25*Ef_rs*(Ds(n)+Dr(n2))*(Ds(n)+Dr(n2)) & + *dvr * N_s(n)* N_r(n2) + z4 = z4+ PI*.25*Ef_rs*(Ds(n)+Dr(n2))*(Ds(n)+Dr(n2)) & + *dvr*masss * N_s(n)* N_r(n2) + endif + + enddo + enddo + tcs_racs1(i,j,k,m) = t1 + tmr_racs1(i,j,k,m) = DMIN1(z1, r_r(m)*1.0d0) + tcs_racs2(i,j,k,m) = t3 + tmr_racs2(i,j,k,m) = z3 + tcr_sacr1(i,j,k,m) = t2 + tms_sacr1(i,j,k,m) = z2 + tcr_sacr2(i,j,k,m) = t4 + tms_sacr2(i,j,k,m) = z4 + tnr_racs1(i,j,k,m) = y1 + tnr_racs2(i,j,k,m) = y3 + tnr_sacr1(i,j,k,m) = y2 + tnr_sacr2(i,j,k,m) = y4 + enddo + enddo + enddo + + end subroutine qr_acr_qs +!+---+-----------------------------------------------------------------+ +!ctrlL +!+---+-----------------------------------------------------------------+ +!..This is a literal adaptation of Bigg (1954) probability of drops of +!..a particular volume freezing. Given this probability, simply freeze +!..the proportion of drops summing their masses. +!+---+-----------------------------------------------------------------+ + + subroutine freezeH2O + + implicit none + +!..Local variables + INTEGER:: i, j, k, m, n, n2 + DOUBLE PRECISION, DIMENSION(nbr):: N_r, massr + DOUBLE PRECISION, DIMENSION(nbc):: N_c, massc + DOUBLE PRECISION:: sum1, sum2, sumn1, sumn2, & + prob, vol, Texp, orho_w, & + lam_exp, lamr, N0_r, lamc, N0_c, y + INTEGER:: nu_c + REAL:: T_adjust + +!+---+ + + orho_w = 1./rho_w + + do n2 = 1, nbr + massr(n2) = am_r*Dr(n2)**bm_r + enddo + do n = 1, nbc + massc(n) = am_r*Dc(n)**bm_r + enddo + +!..Freeze water (smallest drops become cloud ice, otherwise graupel). + do m = 1, ntb_IN + T_adjust = MAX(-3.0, MIN(3.0 - ALOG10(Nt_IN(m)), 3.0)) + do k = 1, 45 +! print*, ' Freezing water for temp = ', -k + Texp = DEXP( DFLOAT(k) - T_adjust*1.0D0 ) - 1.0D0 + do j = 1, ntb_r1 + do i = 1, ntb_r + lam_exp = (N0r_exp(j)*am_r*crg(1)/r_r(i))**ore1 + lamr = lam_exp * (crg(3)*org2*org1)**obmr + N0_r = N0r_exp(j)/(crg(2)*lam_exp) * lamr**cre(2) + sum1 = 0.0d0 + sum2 = 0.0d0 + sumn1 = 0.0d0 + sumn2 = 0.0d0 + do n2 = nbr, 1, -1 + N_r(n2) = N0_r*Dr(n2)**mu_r*DEXP(-lamr*Dr(n2))*dtr(n2) + vol = massr(n2)*orho_w + prob = 1.0D0 - DEXP(-120.0D0*vol*5.2D-4 * Texp) + if (massr(n2) .lt. xm0g) then + sumn1 = sumn1 + prob*N_r(n2) + sum1 = sum1 + prob*N_r(n2)*massr(n2) + else + sumn2 = sumn2 + prob*N_r(n2) + sum2 = sum2 + prob*N_r(n2)*massr(n2) + endif + if ((sum1+sum2).ge.r_r(i)) EXIT + enddo + tpi_qrfz(i,j,k,m) = sum1 + tni_qrfz(i,j,k,m) = sumn1 + tpg_qrfz(i,j,k,m) = sum2 + tnr_qrfz(i,j,k,m) = sumn2 + enddo + enddo + + do j = 1, nbc + nu_c = MIN(15, NINT(1000.E6/t_Nc(j)) + 2) + do i = 1, ntb_c + lamc = (t_Nc(j)*am_r* ccg(2,nu_c) * ocg1(nu_c) / r_c(i))**obmr + N0_c = t_Nc(j)*ocg1(nu_c) * lamc**cce(1,nu_c) + sum1 = 0.0d0 + sumn2 = 0.0d0 + do n = nbc, 1, -1 + vol = massc(n)*orho_w + prob = 1.0D0 - DEXP(-120.0D0*vol*5.2D-4 * Texp) + N_c(n) = N0_c*Dc(n)**nu_c*EXP(-lamc*Dc(n))*dtc(n) + sumn2 = MIN(t_Nc(j), sumn2 + prob*N_c(n)) + sum1 = sum1 + prob*N_c(n)*massc(n) + if (sum1 .ge. r_c(i)) EXIT + enddo + tpi_qcfz(i,j,k,m) = sum1 + tni_qcfz(i,j,k,m) = sumn2 + enddo + enddo + enddo + enddo + + end subroutine freezeH2O +!+---+-----------------------------------------------------------------+ +!ctrlL +!+---+-----------------------------------------------------------------+ +!..Cloud ice converting to snow since portion greater than min snow +!.. size. Given cloud ice content (kg/m**3), number concentration +!.. (#/m**3) and gamma shape parameter, mu_i, break the distrib into +!.. bins and figure out the mass/number of ice with sizes larger than +!.. D0s. Also, compute incomplete gamma function for the integration +!.. of ice depositional growth from diameter=0 to D0s. Amount of +!.. ice depositional growth is this portion of distrib while larger +!.. diameters contribute to snow growth (as in Harrington et al. 1995). +!+---+-----------------------------------------------------------------+ + + subroutine qi_aut_qs + + implicit none + +!..Local variables + INTEGER:: i, j, n2 + DOUBLE PRECISION, DIMENSION(nbi):: N_i + DOUBLE PRECISION:: N0_i, lami, Di_mean, t1, t2 + REAL:: xlimit_intg + +!+---+ + + do j = 1, ntb_i1 + do i = 1, ntb_i + lami = (am_i*cig(2)*oig1*Nt_i(j)/r_i(i))**obmi + Di_mean = (bm_i + mu_i + 1.) / lami + N0_i = Nt_i(j)*oig1 * lami**cie(1) + t1 = 0.0d0 + t2 = 0.0d0 + if (SNGL(Di_mean) .gt. 5.*D0s) then + t1 = r_i(i) + t2 = Nt_i(j) + tpi_ide(i,j) = 0.0D0 + elseif (SNGL(Di_mean) .lt. D0i) then + t1 = 0.0D0 + t2 = 0.0D0 + tpi_ide(i,j) = 1.0D0 + else + xlimit_intg = lami*D0s + tpi_ide(i,j) = GAMMP(mu_i+2.0, xlimit_intg) * 1.0D0 + do n2 = 1, nbi + N_i(n2) = N0_i*Di(n2)**mu_i * DEXP(-lami*Di(n2))*dti(n2) + if (Di(n2).ge.D0s) then + t1 = t1 + N_i(n2) * am_i*Di(n2)**bm_i + t2 = t2 + N_i(n2) + endif + enddo + endif + tps_iaus(i,j) = t1 + tni_iaus(i,j) = t2 + enddo + enddo + + end subroutine qi_aut_qs +!ctrlL +!+---+-----------------------------------------------------------------+ +!..Variable collision efficiency for rain collecting cloud water using +!.. method of Beard and Grover, 1974 if a/A less than 0.25; otherwise +!.. uses polynomials to get close match of Pruppacher & Klett Fig 14-9. +!+---+-----------------------------------------------------------------+ + + subroutine table_Efrw + + implicit none + +!..Local variables + DOUBLE PRECISION:: vtr, stokes, reynolds, Ef_rw + DOUBLE PRECISION:: p, yc0, F, G, H, z, K0, X + INTEGER:: i, j + + do j = 1, nbc + do i = 1, nbr + Ef_rw = 0.0 + p = Dc(j)/Dr(i) + if (Dr(i).lt.50.E-6 .or. Dc(j).lt.3.E-6) then + t_Efrw(i,j) = 0.0 + elseif (p.gt.0.25) then + X = Dc(j)*1.D6 + if (Dr(i) .lt. 75.e-6) then + Ef_rw = 0.026794*X - 0.20604 + elseif (Dr(i) .lt. 125.e-6) then + Ef_rw = -0.00066842*X*X + 0.061542*X - 0.37089 + elseif (Dr(i) .lt. 175.e-6) then + Ef_rw = 4.091e-06*X*X*X*X - 0.00030908*X*X*X & + + 0.0066237*X*X - 0.0013687*X - 0.073022 + elseif (Dr(i) .lt. 250.e-6) then + Ef_rw = 9.6719e-5*X*X*X - 0.0068901*X*X + 0.17305*X & + - 0.65988 + elseif (Dr(i) .lt. 350.e-6) then + Ef_rw = 9.0488e-5*X*X*X - 0.006585*X*X + 0.16606*X & + - 0.56125 + else + Ef_rw = 0.00010721*X*X*X - 0.0072962*X*X + 0.1704*X & + - 0.46929 + endif + else + vtr = -0.1021 + 4.932E3*Dr(i) - 0.9551E6*Dr(i)*Dr(i) & + + 0.07934E9*Dr(i)*Dr(i)*Dr(i) & + - 0.002362E12*Dr(i)*Dr(i)*Dr(i)*Dr(i) + stokes = Dc(j)*Dc(j)*vtr*rho_w/(9.*1.718E-5*Dr(i)) + reynolds = 9.*stokes/(p*p*rho_w) + + F = DLOG(reynolds) + G = -0.1007D0 - 0.358D0*F + 0.0261D0*F*F + K0 = DEXP(G) + z = DLOG(stokes/(K0+1.D-15)) + H = 0.1465D0 + 1.302D0*z - 0.607D0*z*z + 0.293D0*z*z*z + yc0 = 2.0D0/PI * ATAN(H) + Ef_rw = (yc0+p)*(yc0+p) / ((1.+p)*(1.+p)) + + endif + + t_Efrw(i,j) = MAX(0.0, MIN(SNGL(Ef_rw), 0.95)) + + enddo + enddo + + end subroutine table_Efrw +!ctrlL +!+---+-----------------------------------------------------------------+ +!..Variable collision efficiency for snow collecting cloud water using +!.. method of Wang and Ji, 2000 except equate melted snow diameter to +!.. their "effective collision cross-section." +!+---+-----------------------------------------------------------------+ + + subroutine table_Efsw + + implicit none + +!..Local variables + DOUBLE PRECISION:: Ds_m, vts, vtc, stokes, reynolds, Ef_sw + DOUBLE PRECISION:: p, yc0, F, G, H, z, K0 + INTEGER:: i, j + + do j = 1, nbc + vtc = 1.19D4 * (1.0D4*Dc(j)*Dc(j)*0.25D0) + do i = 1, nbs + vts = av_s*Ds(i)**bv_s * DEXP(-fv_s*Ds(i)) - vtc + Ds_m = (am_s*Ds(i)**bm_s / am_r)**obmr + p = Dc(j)/Ds_m + if (p.gt.0.25 .or. Ds(i).lt.D0s .or. Dc(j).lt.6.E-6 & + .or. vts.lt.1.E-3) then + t_Efsw(i,j) = 0.0 + else + stokes = Dc(j)*Dc(j)*vts*rho_w/(9.*1.718E-5*Ds_m) + reynolds = 9.*stokes/(p*p*rho_w) + + F = DLOG(reynolds) + G = -0.1007D0 - 0.358D0*F + 0.0261D0*F*F + K0 = DEXP(G) + z = DLOG(stokes/(K0+1.D-15)) + H = 0.1465D0 + 1.302D0*z - 0.607D0*z*z + 0.293D0*z*z*z + yc0 = 2.0D0/PI * ATAN(H) + Ef_sw = (yc0+p)*(yc0+p) / ((1.+p)*(1.+p)) + + t_Efsw(i,j) = MAX(0.0, MIN(SNGL(Ef_sw), 0.95)) + endif + + enddo + enddo + + end subroutine table_Efsw +!ctrlL +!+---+-----------------------------------------------------------------+ +!..Function to compute collision efficiency of collector species (rain, +!.. snow, graupel) of aerosols. Follows Wang et al, 2010, ACP, which +!.. follows Slinn (1983). +!+---+-----------------------------------------------------------------+ + + real function Eff_aero(D, Da, visc,rhoa,Temp,species) + + implicit none + real:: D, Da, visc, rhoa, Temp + character(LEN=1):: species + real:: aval, Cc, diff, Re, Sc, St, St2, vt, Eff + real, parameter:: boltzman = 1.3806503E-23 + real, parameter:: meanPath = 0.0256E-6 + + vt = 1. + if (species .eq. 'r') then + vt = -0.1021 + 4.932E3*D - 0.9551E6*D*D & + + 0.07934E9*D*D*D - 0.002362E12*D*D*D*D + elseif (species .eq. 's') then + vt = av_s*D**bv_s + elseif (species .eq. 'g') then + vt = av_g*D**bv_g + endif + + Cc = 1. + 2.*meanPath/Da *(1.257+0.4*exp(-0.55*Da/meanPath)) + diff = boltzman*Temp*Cc/(3.*PI*visc*Da) + + Re = 0.5*rhoa*D*vt/visc + Sc = visc/(rhoa*diff) + + St = Da*Da*vt*1000./(9.*visc*D) + aval = 1.+LOG(1.+Re) + St2 = (1.2 + 1./12.*aval)/(1.+aval) + + Eff = 4./(Re*Sc) * (1. + 0.4*SQRT(Re)*Sc**0.3333 & + + 0.16*SQRT(Re)*SQRT(Sc)) & + + 4.*Da/D * (0.02 + Da/D*(1.+2.*SQRT(Re))) + + if (St.gt.St2) Eff = Eff + ( (St-St2)/(St-St2+0.666667))**1.5 + Eff_aero = MAX(1.E-5, MIN(Eff, 1.0)) + + end function Eff_aero + +!ctrlL +!+---+-----------------------------------------------------------------+ +!..Integrate rain size distribution from zero to D-star to compute the +!.. number of drops smaller than D-star that evaporate in a single +!.. timestep. Drops larger than D-star dont evaporate entirely so do +!.. not affect number concentration. +!+---+-----------------------------------------------------------------+ + + subroutine table_dropEvap + + implicit none + +!..Local variables + INTEGER:: i, j, k, n + DOUBLE PRECISION, DIMENSION(nbc):: N_c, massc + DOUBLE PRECISION:: summ, summ2, lamc, N0_c + INTEGER:: nu_c +! DOUBLE PRECISION:: Nt_r, N0, lam_exp, lam +! REAL:: xlimit_intg + + do n = 1, nbc + massc(n) = am_r*Dc(n)**bm_r + enddo + + do k = 1, nbc + nu_c = MIN(15, NINT(1000.E6/t_Nc(k)) + 2) + do j = 1, ntb_c + lamc = (t_Nc(k)*am_r* ccg(2,nu_c)*ocg1(nu_c) / r_c(j))**obmr + N0_c = t_Nc(k)*ocg1(nu_c) * lamc**cce(1,nu_c) + do i = 1, nbc +!-GT tnc_wev(i,j,k) = GAMMP(nu_c+1., SNGL(Dc(i)*lamc))*t_Nc(k) + N_c(i) = N0_c* Dc(i)**nu_c*EXP(-lamc*Dc(i))*dtc(i) +! if(j.eq.18 .and. k.eq.50) print*, ' N_c = ', N_c(i) + summ = 0. + summ2 = 0. + do n = 1, i + summ = summ + massc(n)*N_c(n) + summ2 = summ2 + N_c(n) + enddo +! if(j.eq.18 .and. k.eq.50) print*, ' DEBUG-TABLE: ', r_c(j), t_Nc(k), summ2, summ + tpc_wev(i,j,k) = summ + tnc_wev(i,j,k) = summ2 + enddo + enddo + enddo + +! +!..To do the same thing for rain. +! +! do k = 1, ntb_r +! do j = 1, ntb_r1 +! lam_exp = (N0r_exp(j)*am_r*crg(1)/r_r(k))**ore1 +! lam = lam_exp * (crg(3)*org2*org1)**obmr +! N0 = N0r_exp(j)/(crg(2)*lam_exp) * lam**cre(2) +! Nt_r = N0 * crg(2) / lam**cre(2) +! do i = 1, nbr +! xlimit_intg = lam*Dr(i) +! tnr_rev(i,j,k) = GAMMP(mu_r+1.0, xlimit_intg) * Nt_r +! enddo +! enddo +! enddo + +! TO APPLY TABLE ABOVE +!..Rain lookup table indexes. +! Dr_star = DSQRT(-2.D0*DT * t1_evap/(2.*PI) & +! * 0.78*4.*diffu(k)*xsat*rvs/rho_w) +! idx_d = NINT(1.0 + FLOAT(nbr) * DLOG(Dr_star/D0r) & +! / DLOG(Dr(nbr)/D0r)) +! idx_d = MAX(1, MIN(idx_d, nbr)) +! +! nir = NINT(ALOG10(rr(k))) +! do nn = nir-1, nir+1 +! n = nn +! if ( (rr(k)/10.**nn).ge.1.0 .and. & +! (rr(k)/10.**nn).lt.10.0) goto 154 +! enddo +!154 continue +! idx_r = INT(rr(k)/10.**n) + 10*(n-nir2) - (n-nir2) +! idx_r = MAX(1, MIN(idx_r, ntb_r)) +! +! lamr = (am_r*crg(3)*org2*nr(k)/rr(k))**obmr +! lam_exp = lamr * (crg(3)*org2*org1)**bm_r +! N0_exp = org1*rr(k)/am_r * lam_exp**cre(1) +! nir = NINT(DLOG10(N0_exp)) +! do nn = nir-1, nir+1 +! n = nn +! if ( (N0_exp/10.**nn).ge.1.0 .and. & +! (N0_exp/10.**nn).lt.10.0) goto 155 +! enddo +!155 continue +! idx_r1 = INT(N0_exp/10.**n) + 10*(n-nir3) - (n-nir3) +! idx_r1 = MAX(1, MIN(idx_r1, ntb_r1)) +! +! pnr_rev(k) = MIN(nr(k)*odts, SNGL(tnr_rev(idx_d,idx_r1,idx_r) & ! RAIN2M +! * odts)) + + end subroutine table_dropEvap +! +!ctrlL +#if !defined (mpas) +!+---+-----------------------------------------------------------------+ +!..Fill the table of CCN activation data created from parcel model run +!.. by Trude Eidhammer with inputs of aerosol number concentration, +!.. vertical velocity, temperature, lognormal mean aerosol radius, and +!.. hygroscopicity, kappa. The data are read from external file and +!.. contain activated fraction of CCN for given conditions. +!+---+-----------------------------------------------------------------+ + + subroutine table_ccnAct + + USE module_domain + USE module_dm + implicit none + + LOGICAL, EXTERNAL:: wrf_dm_on_monitor + +!..Local variables + INTEGER:: iunit_mp_th1, i + LOGICAL:: opened + CHARACTER*64 errmess + + iunit_mp_th1 = -1 + IF ( wrf_dm_on_monitor() ) THEN + DO i = 20,99 + INQUIRE ( i , OPENED = opened ) + IF ( .NOT. opened ) THEN + iunit_mp_th1 = i + GOTO 2010 + ENDIF + ENDDO + 2010 CONTINUE + ENDIF +#if defined(DM_PARALLEL) && !defined(STUBMPI) + CALL wrf_dm_bcast_bytes ( iunit_mp_th1 , IWORDSIZE ) +#endif + IF ( iunit_mp_th1 < 0 ) THEN + CALL wrf_error_fatal ( 'module_mp_thompson: table_ccnAct: '// & + 'Can not find unused fortran unit to read in lookup table.') + ENDIF + + IF ( wrf_dm_on_monitor() ) THEN + WRITE(errmess, '(A,I2)') 'module_mp_thompson: opening CCN_ACTIVATE.BIN on unit ',iunit_mp_th1 +! CALL wrf_debug(150, errmess) + OPEN(iunit_mp_th1,FILE='CCN_ACTIVATE.BIN', & + FORM='UNFORMATTED',STATUS='OLD',ERR=9009) + ENDIF + +#define DM_BCAST_MACRO(A) CALL wrf_dm_bcast_bytes(A, size(A)*R4SIZE) + + IF ( wrf_dm_on_monitor() ) READ(iunit_mp_th1,ERR=9010) tnccn_act +#if defined(DM_PARALLEL) && !defined(STUBMPI) + DM_BCAST_MACRO(tnccn_act) +#endif + + + RETURN + 9009 CONTINUE + WRITE( errmess , '(A,I2)' ) 'module_mp_thompson: error opening CCN_ACTIVATE.BIN on unit ',iunit_mp_th1 + CALL wrf_error_fatal(errmess) + RETURN + 9010 CONTINUE + WRITE( errmess , '(A,I2)' ) 'module_mp_thompson: error reading CCN_ACTIVATE.BIN on unit ',iunit_mp_th1 + CALL wrf_error_fatal(errmess) + + end subroutine table_ccnAct +#endif +!^L +!+---+-----------------------------------------------------------------+ +!..Retrieve fraction of CCN that gets activated given the model temp, +!.. vertical velocity, and available CCN concentration. The lookup +!.. table (read from external file) has CCN concentration varying the +!.. quickest, then updraft, then temperature, then mean aerosol radius, +!.. and finally hygroscopicity, kappa. +!.. TO_DO ITEM: For radiation cooling producing fog, in which case the +!.. updraft velocity could easily be negative, we could use the temp +!.. and its tendency to diagnose a pretend postive updraft velocity. +!+---+-----------------------------------------------------------------+ + real function activ_ncloud(Tt, Ww, NCCN) + + implicit none + REAL, INTENT(IN):: Tt, Ww, NCCN + REAL:: n_local, w_local + INTEGER:: i, j, k, l, m, n + REAL:: A, B, C, D, t, u, x1, x2, y1, y2, nx, wy, fraction + + +! ta_Na = (/10.0, 31.6, 100.0, 316.0, 1000.0, 3160.0, 10000.0/) ntb_arc +! ta_Ww = (/0.01, 0.0316, 0.1, 0.316, 1.0, 3.16, 10.0, 31.6, 100.0/) ntb_arw +! ta_Tk = (/243.15, 253.15, 263.15, 273.15, 283.15, 293.15, 303.15/) ntb_art +! ta_Ra = (/0.01, 0.02, 0.04, 0.08, 0.16/) ntb_arr +! ta_Ka = (/0.2, 0.4, 0.6, 0.8/) ntb_ark + + n_local = NCCN * 1.E-6 + w_local = Ww + + if (n_local .ge. ta_Na(ntb_arc)) then + n_local = ta_Na(ntb_arc) - 1.0 + elseif (n_local .le. ta_Na(1)) then + n_local = ta_Na(1) + 1.0 + endif + do n = 2, ntb_arc + if (n_local.ge.ta_Na(n-1) .and. n_local.lt.ta_Na(n)) goto 8003 + enddo + 8003 continue + i = n + x1 = LOG(ta_Na(i-1)) + x2 = LOG(ta_Na(i)) + + if (w_local .ge. ta_Ww(ntb_arw)) then + w_local = ta_Ww(ntb_arw) - 1.0 + elseif (w_local .le. ta_Ww(1)) then + w_local = ta_Ww(1) + 0.001 + endif + do n = 2, ntb_arw + if (w_local.ge.ta_Ww(n-1) .and. w_local.lt.ta_Ww(n)) goto 8005 + enddo + 8005 continue + j = n + y1 = LOG(ta_Ww(j-1)) + y2 = LOG(ta_Ww(j)) + + k = MAX(1, MIN( NINT( (Tt - ta_Tk(1))*0.1) + 1, ntb_art)) + +!..The next two values are indexes of mean aerosol radius and +!.. hygroscopicity. Currently these are constant but a future version +!.. should implement other variables to allow more freedom such as +!.. at least simple separation of tiny size sulfates from larger +!.. sea salts. + l = 3 + m = 2 + + A = tnccn_act(i-1,j-1,k,l,m) + B = tnccn_act(i,j-1,k,l,m) + C = tnccn_act(i,j,k,l,m) + D = tnccn_act(i-1,j,k,l,m) + nx = LOG(n_local) + wy = LOG(w_local) + + t = (nx-x1)/(x2-x1) + u = (wy-y1)/(y2-y1) + +! t = (n_local-ta(Na(i-1))/(ta_Na(i)-ta_Na(i-1)) +! u = (w_local-ta_Ww(j-1))/(ta_Ww(j)-ta_Ww(j-1)) + + fraction = (1.0-t)*(1.0-u)*A + t*(1.0-u)*B + t*u*C + (1.0-t)*u*D + +! if (NCCN*fraction .gt. 0.75*Nt_c_max) then +! write(*,*) ' DEBUG-GT ', n_local, w_local, Tt, i, j, k +! endif + + activ_ncloud = NCCN*fraction + + end function activ_ncloud + +#if !defined(mpas) +!+---+-----------------------------------------------------------------+ +!+---+-----------------------------------------------------------------+ + SUBROUTINE GCF(GAMMCF,A,X,GLN) +! --- RETURNS THE INCOMPLETE GAMMA FUNCTION Q(A,X) EVALUATED BY ITS +! --- CONTINUED FRACTION REPRESENTATION AS GAMMCF. ALSO RETURNS +! --- LN(GAMMA(A)) AS GLN. THE CONTINUED FRACTION IS EVALUATED BY +! --- A MODIFIED LENTZ METHOD. +! --- USES GAMMLN + IMPLICIT NONE + INTEGER, PARAMETER:: ITMAX=100 + REAL, PARAMETER:: gEPS=3.E-7 + REAL, PARAMETER:: FPMIN=1.E-30 + REAL, INTENT(IN):: A, X + REAL:: GAMMCF,GLN + INTEGER:: I + REAL:: AN,B,C,D,DEL,H + GLN=GAMMLN(A) + B=X+1.-A + C=1./FPMIN + D=1./B + H=D + DO 11 I=1,ITMAX + AN=-I*(I-A) + B=B+2. + D=AN*D+B + IF(ABS(D).LT.FPMIN)D=FPMIN + C=B+AN/C + IF(ABS(C).LT.FPMIN)C=FPMIN + D=1./D + DEL=D*C + H=H*DEL + IF(ABS(DEL-1.).LT.gEPS)GOTO 1 + 11 CONTINUE + PRINT *, 'A TOO LARGE, ITMAX TOO SMALL IN GCF' + 1 GAMMCF=EXP(-X+A*LOG(X)-GLN)*H + END SUBROUTINE GCF +! (C) Copr. 1986-92 Numerical Recipes Software 2.02 +!+---+-----------------------------------------------------------------+ + SUBROUTINE GSER(GAMSER,A,X,GLN) +! --- RETURNS THE INCOMPLETE GAMMA FUNCTION P(A,X) EVALUATED BY ITS +! --- ITS SERIES REPRESENTATION AS GAMSER. ALSO RETURNS LN(GAMMA(A)) +! --- AS GLN. +! --- USES GAMMLN + IMPLICIT NONE + INTEGER, PARAMETER:: ITMAX=100 + REAL, PARAMETER:: gEPS=3.E-7 + REAL, INTENT(IN):: A, X + REAL:: GAMSER,GLN + INTEGER:: N + REAL:: AP,DEL,SUM + GLN=GAMMLN(A) + IF(X.LE.0.)THEN + IF(X.LT.0.) PRINT *, 'X < 0 IN GSER' + GAMSER=0. + RETURN + ENDIF + AP=A + SUM=1./A + DEL=SUM + DO 11 N=1,ITMAX + AP=AP+1. + DEL=DEL*X/AP + SUM=SUM+DEL + IF(ABS(DEL).LT.ABS(SUM)*gEPS)GOTO 1 + 11 CONTINUE + PRINT *,'A TOO LARGE, ITMAX TOO SMALL IN GSER' + 1 GAMSER=SUM*EXP(-X+A*LOG(X)-GLN) + END SUBROUTINE GSER +! (C) Copr. 1986-92 Numerical Recipes Software 2.02 +!+---+-----------------------------------------------------------------+ + REAL FUNCTION GAMMLN(XX) +! --- RETURNS THE VALUE LN(GAMMA(XX)) FOR XX > 0. + IMPLICIT NONE + REAL, INTENT(IN):: XX + DOUBLE PRECISION, PARAMETER:: STP = 2.5066282746310005D0 + DOUBLE PRECISION, DIMENSION(6), PARAMETER:: & + COF = (/76.18009172947146D0, -86.50532032941677D0, & + 24.01409824083091D0, -1.231739572450155D0, & + .1208650973866179D-2, -.5395239384953D-5/) + DOUBLE PRECISION:: SER,TMP,X,Y + INTEGER:: J + + X=XX + Y=X + TMP=X+5.5D0 + TMP=(X+0.5D0)*LOG(TMP)-TMP + SER=1.000000000190015D0 + DO 11 J=1,6 + Y=Y+1.D0 + SER=SER+COF(J)/Y +11 CONTINUE + GAMMLN=TMP+LOG(STP*SER/X) + END FUNCTION GAMMLN +! (C) Copr. 1986-92 Numerical Recipes Software 2.02 +!+---+-----------------------------------------------------------------+ + REAL FUNCTION GAMMP(A,X) +! --- COMPUTES THE INCOMPLETE GAMMA FUNCTION P(A,X) +! --- SEE ABRAMOWITZ AND STEGUN 6.5.1 +! --- USES GCF,GSER + IMPLICIT NONE + REAL, INTENT(IN):: A,X + REAL:: GAMMCF,GAMSER,GLN + GAMMP = 0. + IF((X.LT.0.) .OR. (A.LE.0.)) THEN + PRINT *, 'BAD ARGUMENTS IN GAMMP' + RETURN + ELSEIF(X.LT.A+1.)THEN + CALL GSER(GAMSER,A,X,GLN) + GAMMP=GAMSER + ELSE + CALL GCF(GAMMCF,A,X,GLN) + GAMMP=1.-GAMMCF + ENDIF + END FUNCTION GAMMP +! (C) Copr. 1986-92 Numerical Recipes Software 2.02 +!+---+-----------------------------------------------------------------+ + REAL FUNCTION WGAMMA(y) + + IMPLICIT NONE + REAL, INTENT(IN):: y + + WGAMMA = EXP(GAMMLN(y)) + + END FUNCTION WGAMMA +!+---+-----------------------------------------------------------------+ +! THIS FUNCTION CALCULATES THE LIQUID SATURATION VAPOR MIXING RATIO AS +! A FUNCTION OF TEMPERATURE AND PRESSURE +! + REAL FUNCTION RSLF(P,T) + + IMPLICIT NONE + REAL, INTENT(IN):: P, T + REAL:: ESL,X + REAL, PARAMETER:: C0= .611583699E03 + REAL, PARAMETER:: C1= .444606896E02 + REAL, PARAMETER:: C2= .143177157E01 + REAL, PARAMETER:: C3= .264224321E-1 + REAL, PARAMETER:: C4= .299291081E-3 + REAL, PARAMETER:: C5= .203154182E-5 + REAL, PARAMETER:: C6= .702620698E-8 + REAL, PARAMETER:: C7= .379534310E-11 + REAL, PARAMETER:: C8=-.321582393E-13 + + X=MAX(-80.,T-273.16) + +! ESL=612.2*EXP(17.67*X/(T-29.65)) + ESL=C0+X*(C1+X*(C2+X*(C3+X*(C4+X*(C5+X*(C6+X*(C7+X*C8))))))) + ESL=MIN(ESL, P*0.15) ! Even with P=1050mb and T=55C, the sat. vap. pres only contributes to ~15% of total pres. + RSLF=.622*ESL/(P-ESL) + +! ALTERNATIVE +! ; Source: Murphy and Koop, Review of the vapour pressure of ice and +! supercooled water for atmospheric applications, Q. J. R. +! Meteorol. Soc (2005), 131, pp. 1539-1565. +! ESL = EXP(54.842763 - 6763.22 / T - 4.210 * ALOG(T) + 0.000367 * T +! + TANH(0.0415 * (T - 218.8)) * (53.878 - 1331.22 +! / T - 9.44523 * ALOG(T) + 0.014025 * T)) + + END FUNCTION RSLF +!+---+-----------------------------------------------------------------+ +! THIS FUNCTION CALCULATES THE ICE SATURATION VAPOR MIXING RATIO AS A +! FUNCTION OF TEMPERATURE AND PRESSURE +! + REAL FUNCTION RSIF(P,T) + + IMPLICIT NONE + REAL, INTENT(IN):: P, T + REAL:: ESI,X + REAL, PARAMETER:: C0= .609868993E03 + REAL, PARAMETER:: C1= .499320233E02 + REAL, PARAMETER:: C2= .184672631E01 + REAL, PARAMETER:: C3= .402737184E-1 + REAL, PARAMETER:: C4= .565392987E-3 + REAL, PARAMETER:: C5= .521693933E-5 + REAL, PARAMETER:: C6= .307839583E-7 + REAL, PARAMETER:: C7= .105785160E-9 + REAL, PARAMETER:: C8= .161444444E-12 + + X=MAX(-80.,T-273.16) + ESI=C0+X*(C1+X*(C2+X*(C3+X*(C4+X*(C5+X*(C6+X*(C7+X*C8))))))) + ESI=MIN(ESI, P*0.15) + RSIF=.622*ESI/(P-ESI) + +! ALTERNATIVE +! ; Source: Murphy and Koop, Review of the vapour pressure of ice and +! supercooled water for atmospheric applications, Q. J. R. +! Meteorol. Soc (2005), 131, pp. 1539-1565. +! ESI = EXP(9.550426 - 5723.265/T + 3.53068*ALOG(T) - 0.00728332*T) + + END FUNCTION RSIF + +#endif +!+---+-----------------------------------------------------------------+ + real function iceDeMott(tempc, qv, qvs, qvsi, rho, nifa) + implicit none + + REAL, INTENT(IN):: tempc, qv, qvs, qvsi, rho, nifa + +!..Local vars + REAL:: satw, sati, siw, p_x, si0x, dtt, dsi, dsw, dab, fc, hx + REAL:: ntilde, n_in, nmax, nhat, mux, xni, nifa_cc + REAL, PARAMETER:: p_c1 = 1000. + REAL, PARAMETER:: p_rho_c = 0.76 + REAL, PARAMETER:: p_alpha = 1.0 + REAL, PARAMETER:: p_gam = 2. + REAL, PARAMETER:: delT = 5. + REAL, PARAMETER:: T0x = -40. + REAL, PARAMETER:: Sw0x = 0.97 + REAL, PARAMETER:: delSi = 0.1 + REAL, PARAMETER:: hdm = 0.15 + REAL, PARAMETER:: p_psi = 0.058707*p_gam/p_rho_c + REAL, PARAMETER:: aap = 1. + REAL, PARAMETER:: bbp = 0. + REAL, PARAMETER:: y1p = -35. + REAL, PARAMETER:: y2p = -25. + REAL, PARAMETER:: rho_not0 = 101325./(287.05*273.15) + +!+---+ + + xni = 0.0 +! satw = qv/qvs +! sati = qv/qvsi +! siw = qvs/qvsi +! p_x = -1.0261+(3.1656e-3*tempc)+(5.3938e-4*(tempc*tempc)) & +! + (8.2584e-6*(tempc*tempc*tempc)) +! si0x = 1.+(10.**p_x) +! if (sati.ge.si0x .and. satw.lt.0.985) then +! dtt = delta_p (tempc, T0x, T0x+delT, 1., hdm) +! dsi = delta_p (sati, Si0x, Si0x+delSi, 0., 1.) +! dsw = delta_p (satw, Sw0x, 1., 0., 1.) +! fc = dtt*dsi*0.5 +! hx = min(fc+((1.-fc)*dsw), 1.) +! ntilde = p_c1*p_gam*((exp(12.96*(sati-1.1)))**0.3) / p_rho_c +! if (tempc .le. y1p) then +! n_in = ntilde +! elseif (tempc .ge. y2p) then +! n_in = p_psi*p_c1*exp(12.96*(sati-1.)-0.639) +! else +! if (tempc .le. -30.) then +! nmax = p_c1*p_gam*(exp(12.96*(siw-1.1)))**0.3/p_rho_c +! else +! nmax = p_psi*p_c1*exp(12.96*(siw-1.)-0.639) +! endif +! ntilde = MIN(ntilde, nmax) +! nhat = MIN(p_psi*p_c1*exp(12.96*(sati-1.)-0.639), nmax) +! dab = delta_p (tempc, y1p, y2p, aap, bbp) +! n_in = MIN(nhat*(ntilde/nhat)**dab, nmax) +! endif +! mux = hx*p_alpha*n_in*rho +! xni = mux*((6700.*nifa)-200.)/((6700.*5.E5)-200.) +! elseif (satw.ge.0.985 .and. tempc.gt.HGFR-273.15) then + nifa_cc = nifa*RHO_NOT0*1.E-6/rho +! xni = 3.*nifa_cc**(1.25)*exp((0.46*(-tempc))-11.6) ! [DeMott, 2015] + xni = (5.94e-5*(-tempc)**3.33) & ! [DeMott, 2010] + * (nifa_cc**((-0.0264*(tempc))+0.0033)) + xni = xni*rho/RHO_NOT0 * 1000. +! endif + + iceDeMott = MAX(0., xni) + + end FUNCTION iceDeMott + +!+---+-----------------------------------------------------------------+ +!..Newer research since Koop et al (2001) suggests that the freezing +!.. rate should be lower than original paper, so J_rate is reduced +!.. by two orders of magnitude. + + real function iceKoop(temp, qv, qvs, naero, dt) + implicit none + + REAL, INTENT(IN):: temp, qv, qvs, naero, DT + REAL:: mu_diff, a_w_i, delta_aw, log_J_rate, J_rate, prob_h, satw + REAL:: xni + + xni = 0.0 + satw = qv/qvs + mu_diff = 210368.0 + (131.438*temp) - (3.32373E6/temp) & + & - (41729.1*alog(temp)) + a_w_i = exp(mu_diff/(R_uni*temp)) + delta_aw = satw - a_w_i + log_J_rate = -906.7 + (8502.0*delta_aw) & + & - (26924.0*delta_aw*delta_aw) & + & + (29180.0*delta_aw*delta_aw*delta_aw) + log_J_rate = MIN(20.0, log_J_rate) + J_rate = 10.**log_J_rate ! cm-3 s-1 + prob_h = MIN(1.-exp(-J_rate*ar_volume*DT), 1.) + if (prob_h .gt. 0.) then + xni = MIN(prob_h*naero, 1000.E3) + endif + + iceKoop = MAX(0.0, xni) + + end FUNCTION iceKoop + +!+---+-----------------------------------------------------------------+ +!.. Helper routine for Phillips et al (2008) ice nucleation. Trude + + REAL FUNCTION delta_p (yy, y1, y2, aa, bb) + IMPLICIT NONE + + REAL, INTENT(IN):: yy, y1, y2, aa, bb + REAL:: dab, A, B, a0, a1, a2, a3 + + A = 6.*(aa-bb)/((y2-y1)*(y2-y1)*(y2-y1)) + B = aa+(A*y1*y1*y1/6.)-(A*y1*y1*y2*0.5) + a0 = B + a1 = A*y1*y2 + a2 = -A*(y1+y2)*0.5 + a3 = A/3. + + if (yy.le.y1) then + dab = aa + else if (yy.ge.y2) then + dab = bb + else + dab = a0+(a1*yy)+(a2*yy*yy)+(a3*yy*yy*yy) + endif + + if (dab.lt.aa) then + dab = aa + endif + if (dab.gt.bb) then + dab = bb + endif + delta_p = dab + + END FUNCTION delta_p + +!+---+-----------------------------------------------------------------+ +!ctrlL + +!+---+-----------------------------------------------------------------+ +!..Compute _radiation_ effective radii of cloud water, ice, and snow. +!.. These are entirely consistent with microphysics assumptions, not +!.. constant or otherwise ad hoc as is internal to most radiation +!.. schemes. Since only the smallest snowflakes should impact +!.. radiation, compute from first portion of complicated Field number +!.. distribution, not the second part, which is the larger sizes. +!+---+-----------------------------------------------------------------+ + + subroutine calc_effectRad (t1d, p1d, qv1d, qc1d, nc1d, qi1d, ni1d, qs1d, & + & re_qc1d, re_qi1d, re_qs1d, kts, kte) + + IMPLICIT NONE + +!..Sub arguments + INTEGER, INTENT(IN):: kts, kte + REAL, DIMENSION(kts:kte), INTENT(IN):: & + & t1d, p1d, qv1d, qc1d, nc1d, qi1d, ni1d, qs1d + REAL, DIMENSION(kts:kte), INTENT(INOUT):: re_qc1d, re_qi1d, re_qs1d +!..Local variables + INTEGER:: k + REAL, DIMENSION(kts:kte):: rho, rc, nc, ri, ni, rs + REAL:: smo2, smob, smoc + REAL:: tc0, loga_, a_, b_ + DOUBLE PRECISION:: lamc, lami + LOGICAL:: has_qc, has_qi, has_qs + INTEGER:: inu_c + real, dimension(15), parameter:: g_ratio = (/24,60,120,210,336, & + & 504,720,990,1320,1716,2184,2730,3360,4080,4896/) + + has_qc = .false. + has_qi = .false. + has_qs = .false. + + do k = kts, kte + rho(k) = 0.622*p1d(k)/(R*t1d(k)*(qv1d(k)+0.622)) + rc(k) = MAX(R1, qc1d(k)*rho(k)) + nc(k) = MAX(R2, nc1d(k)*rho(k)) + if (.NOT. is_aerosol_aware) nc(k) = Nt_c + if (rc(k).gt.R1 .and. nc(k).gt.R2) has_qc = .true. + ri(k) = MAX(R1, qi1d(k)*rho(k)) + ni(k) = MAX(R2, ni1d(k)*rho(k)) + if (ri(k).gt.R1 .and. ni(k).gt.R2) has_qi = .true. + rs(k) = MAX(R1, qs1d(k)*rho(k)) + if (rs(k).gt.R1) has_qs = .true. + enddo + + if (has_qc) then + do k = kts, kte + if (rc(k).le.R1 .or. nc(k).le.R2) CYCLE + if (nc(k).lt.100) then + inu_c = 15 + elseif (nc(k).gt.1.E10) then + inu_c = 2 + else + inu_c = MIN(15, NINT(1000.E6/nc(k)) + 2) + endif + lamc = (nc(k)*am_r*g_ratio(inu_c)/rc(k))**obmr + re_qc1d(k) = MAX(2.51E-6, MIN(SNGL(0.5D0 * DBLE(3.+inu_c)/lamc), 50.E-6)) + enddo + endif + + if (has_qi) then + do k = kts, kte + if (ri(k).le.R1 .or. ni(k).le.R2) CYCLE + lami = (am_i*cig(2)*oig1*ni(k)/ri(k))**obmi + re_qi1d(k) = MAX(5.01E-6, MIN(SNGL(0.5D0 * DBLE(3.+mu_i)/lami), 125.E-6)) + enddo + endif + + if (has_qs) then + do k = kts, kte + if (rs(k).le.R1) CYCLE + tc0 = MIN(-0.1, t1d(k)-273.15) + smob = rs(k)*oams + +!..All other moments based on reference, 2nd moment. If bm_s.ne.2, +!.. then we must compute actual 2nd moment and use as reference. + if (bm_s.gt.(2.0-1.e-3) .and. bm_s.lt.(2.0+1.e-3)) then + smo2 = smob + else + loga_ = sa(1) + sa(2)*tc0 + sa(3)*bm_s & + & + sa(4)*tc0*bm_s + sa(5)*tc0*tc0 & + & + sa(6)*bm_s*bm_s + sa(7)*tc0*tc0*bm_s & + & + sa(8)*tc0*bm_s*bm_s + sa(9)*tc0*tc0*tc0 & + & + sa(10)*bm_s*bm_s*bm_s + a_ = 10.0**loga_ + b_ = sb(1) + sb(2)*tc0 + sb(3)*bm_s & + & + sb(4)*tc0*bm_s + sb(5)*tc0*tc0 & + & + sb(6)*bm_s*bm_s + sb(7)*tc0*tc0*bm_s & + & + sb(8)*tc0*bm_s*bm_s + sb(9)*tc0*tc0*tc0 & + & + sb(10)*bm_s*bm_s*bm_s + smo2 = (smob/a_)**(1./b_) + endif +!..Calculate bm_s+1 (th) moment. Useful for diameter calcs. + loga_ = sa(1) + sa(2)*tc0 + sa(3)*cse(1) & + & + sa(4)*tc0*cse(1) + sa(5)*tc0*tc0 & + & + sa(6)*cse(1)*cse(1) + sa(7)*tc0*tc0*cse(1) & + & + sa(8)*tc0*cse(1)*cse(1) + sa(9)*tc0*tc0*tc0 & + & + sa(10)*cse(1)*cse(1)*cse(1) + a_ = 10.0**loga_ + b_ = sb(1)+ sb(2)*tc0 + sb(3)*cse(1) + sb(4)*tc0*cse(1) & + & + sb(5)*tc0*tc0 + sb(6)*cse(1)*cse(1) & + & + sb(7)*tc0*tc0*cse(1) + sb(8)*tc0*cse(1)*cse(1) & + & + sb(9)*tc0*tc0*tc0 + sb(10)*cse(1)*cse(1)*cse(1) + smoc = a_ * smo2**b_ + re_qs1d(k) = MAX(10.E-6, MIN(0.5*(smoc/smob), 999.E-6)) + enddo + endif + + end subroutine calc_effectRad + +!+---+-----------------------------------------------------------------+ +!..Compute radar reflectivity assuming 10 cm wavelength radar and using +!.. Rayleigh approximation. Only complication is melted snow/graupel +!.. which we treat as water-coated ice spheres and use Uli Blahak's +!.. library of routines. The meltwater fraction is simply the amount +!.. of frozen species remaining from what initially existed at the +!.. melting level interface. +!+---+-----------------------------------------------------------------+ + + subroutine calc_refl10cm (qv1d, qc1d, qr1d, nr1d, qs1d, qg1d, & + t1d, p1d, dBZ, kts, kte, ii, jj) + + IMPLICIT NONE + +!..Sub arguments + INTEGER, INTENT(IN):: kts, kte, ii, jj + REAL, DIMENSION(kts:kte), INTENT(IN):: & + qv1d, qc1d, qr1d, nr1d, qs1d, qg1d, t1d, p1d + REAL, DIMENSION(kts:kte), INTENT(INOUT):: dBZ +! REAL, DIMENSION(kts:kte), INTENT(INOUT):: vt_dBZ + +!..Local variables + REAL, DIMENSION(kts:kte):: temp, pres, qv, rho, rhof + REAL, DIMENSION(kts:kte):: rc, rr, nr, rs, rg + + DOUBLE PRECISION, DIMENSION(kts:kte):: ilamr, ilamg, N0_r, N0_g + REAL, DIMENSION(kts:kte):: mvd_r + REAL, DIMENSION(kts:kte):: smob, smo2, smoc, smoz + REAL:: oM3, M0, Mrat, slam1, slam2, xDs + REAL:: ils1, ils2, t1_vts, t2_vts, t3_vts, t4_vts + REAL:: vtr_dbz_wt, vts_dbz_wt, vtg_dbz_wt + + REAL, DIMENSION(kts:kte):: ze_rain, ze_snow, ze_graupel + + DOUBLE PRECISION:: N0_exp, N0_min, lam_exp, lamr, lamg + REAL:: a_, b_, loga_, tc0 + DOUBLE PRECISION:: fmelt_s, fmelt_g + + INTEGER:: i, k, k_0, kbot, n + LOGICAL:: melti + LOGICAL, DIMENSION(kts:kte):: L_qr, L_qs, L_qg + + DOUBLE PRECISION:: cback, x, eta, f_d + REAL:: xslw1, ygra1, zans1 + +!+---+ + + do k = kts, kte + dBZ(k) = -35.0 + enddo + +!+---+-----------------------------------------------------------------+ +!..Put column of data into local arrays. +!+---+-----------------------------------------------------------------+ + do k = kts, kte + temp(k) = t1d(k) + qv(k) = MAX(1.E-10, qv1d(k)) + pres(k) = p1d(k) + rho(k) = 0.622*pres(k)/(R*temp(k)*(qv(k)+0.622)) + rhof(k) = SQRT(RHO_NOT/rho(k)) + rc(k) = MAX(R1, qc1d(k)*rho(k)) + if (qr1d(k) .gt. R1) then + rr(k) = qr1d(k)*rho(k) + nr(k) = MAX(R2, nr1d(k)*rho(k)) + lamr = (am_r*crg(3)*org2*nr(k)/rr(k))**obmr + ilamr(k) = 1./lamr + N0_r(k) = nr(k)*org2*lamr**cre(2) + mvd_r(k) = (3.0 + mu_r + 0.672) * ilamr(k) + L_qr(k) = .true. + else + rr(k) = R1 + nr(k) = R1 + mvd_r(k) = 50.E-6 + L_qr(k) = .false. + endif + if (qs1d(k) .gt. R2) then + rs(k) = qs1d(k)*rho(k) + L_qs(k) = .true. + else + rs(k) = R1 + L_qs(k) = .false. + endif + if (qg1d(k) .gt. R2) then + rg(k) = qg1d(k)*rho(k) + L_qg(k) = .true. + else + rg(k) = R1 + L_qg(k) = .false. + endif + enddo + +!+---+-----------------------------------------------------------------+ +!..Calculate y-intercept, slope, and useful moments for snow. +!+---+-----------------------------------------------------------------+ + do k = kts, kte + tc0 = MIN(-0.1, temp(k)-273.15) + smob(k) = rs(k)*oams + +!..All other moments based on reference, 2nd moment. If bm_s.ne.2, +!.. then we must compute actual 2nd moment and use as reference. + if (bm_s.gt.(2.0-1.e-3) .and. bm_s.lt.(2.0+1.e-3)) then + smo2(k) = smob(k) + else + loga_ = sa(1) + sa(2)*tc0 + sa(3)*bm_s & + & + sa(4)*tc0*bm_s + sa(5)*tc0*tc0 & + & + sa(6)*bm_s*bm_s + sa(7)*tc0*tc0*bm_s & + & + sa(8)*tc0*bm_s*bm_s + sa(9)*tc0*tc0*tc0 & + & + sa(10)*bm_s*bm_s*bm_s + a_ = 10.0**loga_ + b_ = sb(1) + sb(2)*tc0 + sb(3)*bm_s & + & + sb(4)*tc0*bm_s + sb(5)*tc0*tc0 & + & + sb(6)*bm_s*bm_s + sb(7)*tc0*tc0*bm_s & + & + sb(8)*tc0*bm_s*bm_s + sb(9)*tc0*tc0*tc0 & + & + sb(10)*bm_s*bm_s*bm_s + smo2(k) = (smob(k)/a_)**(1./b_) + endif + +!..Calculate bm_s+1 (th) moment. Useful for diameter calcs. + loga_ = sa(1) + sa(2)*tc0 + sa(3)*cse(1) & + & + sa(4)*tc0*cse(1) + sa(5)*tc0*tc0 & + & + sa(6)*cse(1)*cse(1) + sa(7)*tc0*tc0*cse(1) & + & + sa(8)*tc0*cse(1)*cse(1) + sa(9)*tc0*tc0*tc0 & + & + sa(10)*cse(1)*cse(1)*cse(1) + a_ = 10.0**loga_ + b_ = sb(1)+ sb(2)*tc0 + sb(3)*cse(1) + sb(4)*tc0*cse(1) & + & + sb(5)*tc0*tc0 + sb(6)*cse(1)*cse(1) & + & + sb(7)*tc0*tc0*cse(1) + sb(8)*tc0*cse(1)*cse(1) & + & + sb(9)*tc0*tc0*tc0 + sb(10)*cse(1)*cse(1)*cse(1) + smoc(k) = a_ * smo2(k)**b_ + +!..Calculate bm_s*2 (th) moment. Useful for reflectivity. + loga_ = sa(1) + sa(2)*tc0 + sa(3)*cse(3) & + & + sa(4)*tc0*cse(3) + sa(5)*tc0*tc0 & + & + sa(6)*cse(3)*cse(3) + sa(7)*tc0*tc0*cse(3) & + & + sa(8)*tc0*cse(3)*cse(3) + sa(9)*tc0*tc0*tc0 & + & + sa(10)*cse(3)*cse(3)*cse(3) + a_ = 10.0**loga_ + b_ = sb(1)+ sb(2)*tc0 + sb(3)*cse(3) + sb(4)*tc0*cse(3) & + & + sb(5)*tc0*tc0 + sb(6)*cse(3)*cse(3) & + & + sb(7)*tc0*tc0*cse(3) + sb(8)*tc0*cse(3)*cse(3) & + & + sb(9)*tc0*tc0*tc0 + sb(10)*cse(3)*cse(3)*cse(3) + smoz(k) = a_ * smo2(k)**b_ + enddo + +!+---+-----------------------------------------------------------------+ +!..Calculate y-intercept, slope values for graupel. +!+---+-----------------------------------------------------------------+ + + N0_min = gonv_max + k_0 = kts + do k = kte, kts, -1 + if (temp(k).ge.270.65) k_0 = MAX(k_0, k) + enddo + do k = kte, kts, -1 + if (k.gt.k_0 .and. L_qr(k) .and. mvd_r(k).gt.100.E-6) then + xslw1 = 4.01 + alog10(mvd_r(k)) + else + xslw1 = 0.01 + endif + ygra1 = 4.31 + alog10(max(5.E-5, rg(k))) + zans1 = 3.1 + (100./(300.*xslw1*ygra1/(10./xslw1+1.+0.25*ygra1)+30.+10.*ygra1)) + N0_exp = 10.**(zans1) + N0_exp = MAX(DBLE(gonv_min), MIN(N0_exp, DBLE(gonv_max))) + N0_min = MIN(N0_exp, N0_min) + N0_exp = N0_min + lam_exp = (N0_exp*am_g*cgg(1)/rg(k))**oge1 + lamg = lam_exp * (cgg(3)*ogg2*ogg1)**obmg + ilamg(k) = 1./lamg + N0_g(k) = N0_exp/(cgg(2)*lam_exp) * lamg**cge(2) + enddo + +!+---+-----------------------------------------------------------------+ +!..Locate K-level of start of melting (k_0 is level above). +!+---+-----------------------------------------------------------------+ + melti = .false. + k_0 = kts + do k = kte-1, kts, -1 + if ( (temp(k).gt.273.15) .and. L_qr(k) & + & .and. (L_qs(k+1).or.L_qg(k+1)) ) then + k_0 = MAX(k+1, k_0) + melti=.true. + goto 195 + endif + enddo + 195 continue + +!+---+-----------------------------------------------------------------+ +!..Assume Rayleigh approximation at 10 cm wavelength. Rain (all temps) +!.. and non-water-coated snow and graupel when below freezing are +!.. simple. Integrations of m(D)*m(D)*N(D)*dD. +!+---+-----------------------------------------------------------------+ + + do k = kts, kte + ze_rain(k) = 1.e-22 + ze_snow(k) = 1.e-22 + ze_graupel(k) = 1.e-22 + if (L_qr(k)) ze_rain(k) = N0_r(k)*crg(4)*ilamr(k)**cre(4) + if (L_qs(k)) ze_snow(k) = (0.176/0.93) * (6.0/PI)*(6.0/PI) & + & * (am_s/900.0)*(am_s/900.0)*smoz(k) + if (L_qg(k)) ze_graupel(k) = (0.176/0.93) * (6.0/PI)*(6.0/PI) & + & * (am_g/900.0)*(am_g/900.0) & + & * N0_g(k)*cgg(4)*ilamg(k)**cge(4) + enddo + +!+---+-----------------------------------------------------------------+ +!..Special case of melting ice (snow/graupel) particles. Assume the +!.. ice is surrounded by the liquid water. Fraction of meltwater is +!.. extremely simple based on amount found above the melting level. +!.. Uses code from Uli Blahak (rayleigh_soak_wetgraupel and supporting +!.. routines). +!+---+-----------------------------------------------------------------+ + + if (.not. iiwarm .and. melti .and. k_0.ge.2) then + do k = k_0-1, kts, -1 + +!..Reflectivity contributed by melting snow + if (L_qs(k) .and. L_qs(k_0) ) then + fmelt_s = MAX(0.05d0, MIN(1.0d0-rs(k)/rs(k_0), 0.99d0)) + eta = 0.d0 + oM3 = 1./smoc(k) + M0 = (smob(k)*oM3) + Mrat = smob(k)*M0*M0*M0 + slam1 = M0 * Lam0 + slam2 = M0 * Lam1 + do n = 1, nrbins + x = am_s * xxDs(n)**bm_s + call rayleigh_soak_wetgraupel (x, DBLE(ocms), DBLE(obms), & + & fmelt_s, melt_outside_s, m_w_0, m_i_0, lamda_radar, & + & CBACK, mixingrulestring_s, matrixstring_s, & + & inclusionstring_s, hoststring_s, & + & hostmatrixstring_s, hostinclusionstring_s) + f_d = Mrat*(Kap0*DEXP(-slam1*xxDs(n)) & + & + Kap1*(M0*xxDs(n))**mu_s * DEXP(-slam2*xxDs(n))) + eta = eta + f_d * CBACK * simpson(n) * xdts(n) + enddo + ze_snow(k) = SNGL(lamda4 / (pi5 * K_w) * eta) + endif + +!..Reflectivity contributed by melting graupel + + if (L_qg(k) .and. L_qg(k_0) ) then + fmelt_g = MAX(0.05d0, MIN(1.0d0-rg(k)/rg(k_0), 0.99d0)) + eta = 0.d0 + lamg = 1./ilamg(k) + do n = 1, nrbins + x = am_g * xxDg(n)**bm_g + call rayleigh_soak_wetgraupel (x, DBLE(ocmg), DBLE(obmg), & + & fmelt_g, melt_outside_g, m_w_0, m_i_0, lamda_radar, & + & CBACK, mixingrulestring_g, matrixstring_g, & + & inclusionstring_g, hoststring_g, & + & hostmatrixstring_g, hostinclusionstring_g) + f_d = N0_g(k)*xxDg(n)**mu_g * DEXP(-lamg*xxDg(n)) + eta = eta + f_d * CBACK * simpson(n) * xdtg(n) + enddo + ze_graupel(k) = SNGL(lamda4 / (pi5 * K_w) * eta) + endif + + enddo + endif + + do k = kte, kts, -1 + dBZ(k) = 10.*log10((ze_rain(k)+ze_snow(k)+ze_graupel(k))*1.d18) + enddo + + +!..Reflectivity-weighted terminal velocity (snow, rain, graupel, mix). +! do k = kte, kts, -1 +! vt_dBZ(k) = 1.E-3 +! if (rs(k).gt.R2) then +! Mrat = smob(k) / smoc(k) +! ils1 = 1./(Mrat*Lam0 + fv_s) +! ils2 = 1./(Mrat*Lam1 + fv_s) +! t1_vts = Kap0*csg(5)*ils1**cse(5) +! t2_vts = Kap1*Mrat**mu_s*csg(11)*ils2**cse(11) +! ils1 = 1./(Mrat*Lam0) +! ils2 = 1./(Mrat*Lam1) +! t3_vts = Kap0*csg(6)*ils1**cse(6) +! t4_vts = Kap1*Mrat**mu_s*csg(12)*ils2**cse(12) +! vts_dbz_wt = rhof(k)*av_s * (t1_vts+t2_vts)/(t3_vts+t4_vts) +! if (temp(k).ge.273.15 .and. temp(k).lt.275.15) then +! vts_dbz_wt = vts_dbz_wt*1.5 +! elseif (temp(k).ge.275.15) then +! vts_dbz_wt = vts_dbz_wt*2.0 +! endif +! else +! vts_dbz_wt = 1.E-3 +! endif + +! if (rr(k).gt.R1) then +! lamr = 1./ilamr(k) +! vtr_dbz_wt = rhof(k)*av_r*crg(13)*(lamr+fv_r)**(-cre(13)) & +! & / (crg(4)*lamr**(-cre(4))) +! else +! vtr_dbz_wt = 1.E-3 +! endif + +! if (rg(k).gt.R2) then +! lamg = 1./ilamg(k) +! vtg_dbz_wt = rhof(k)*av_g*cgg(5)*lamg**(-cge(5)) & +! & / (cgg(4)*lamg**(-cge(4))) +! else +! vtg_dbz_wt = 1.E-3 +! endif + +! vt_dBZ(k) = (vts_dbz_wt*ze_snow(k) + vtr_dbz_wt*ze_rain(k) & +! & + vtg_dbz_wt*ze_graupel(k)) & +! & / (ze_rain(k)+ze_snow(k)+ze_graupel(k)) +! enddo + + end subroutine calc_refl10cm +! +!+---+-----------------------------------------------------------------+ + +!+---+-----------------------------------------------------------------+ +END MODULE module_mp_thompson +!+---+-----------------------------------------------------------------+ diff --git a/src/core_atmosphere/physics/physics_wrf/module_mp_thompson_cldfra3.F b/src/core_atmosphere/physics/physics_wrf/module_mp_thompson_cldfra3.F new file mode 100644 index 0000000000..bb5ded6e22 --- /dev/null +++ b/src/core_atmosphere/physics/physics_wrf/module_mp_thompson_cldfra3.F @@ -0,0 +1,521 @@ +!================================================================================================================= + module module_mp_thompson_cldfra3 + +!module_mp_thompson_cldfra3 contains the subroutine cal_cldfra3 which calculates the cloud fraction as a function +!of relative humidity. The subroutine cal_cldfra3 was tested in WRF 3.8.1 with the Thompson cloud microphysics +!and should not be used with other cloud microphysics schemes. + +!subroutine cal_cldfra3 was originally copied from ./phys/module_radiation_driver.F from WRF version 3.8.1. +!Laura D. Fowler (laura@ucar.edu) / 2016-09-22. + +! add-ons and modifications to sourcecode: +! ---------------------------------------- +! * in subroutine find_cloudLayers, changed the line k = k_m12C+2 to k = min(k_m12C,k_m12C+2) to avoid k greater +! than the model-top index. +! Laura D. Fowler (laura@ucar.edu)/2016-09-23. + + + implicit none + private + public:: cal_cldfra3 + + + contains + + +!================================================================================================================= + +!+---+-----------------------------------------------------------------+ +!..Cloud fraction scheme by G. Thompson (NCAR-RAL), not intended for +!.. combining with any cumulus or shallow cumulus parameterization +!.. scheme cloud fractions. This is intended as a stand-alone for +!.. cloud fraction and is relatively good at getting widespread stratus +!.. and stratoCu without caring whether any deep/shallow Cu param schemes +!.. is making sub-grid-spacing clouds/precip. Under the hood, this +!.. scheme follows Mocko and Cotton (1995) in applicaiton of the +!.. Sundqvist et al (1989) scheme but using a grid-scale dependent +!.. RH threshold, one each for land v. ocean points based on +!.. experiences with HWRF testing. +!+---+-----------------------------------------------------------------+ +! +!+---+-----------------------------------------------------------------+ + + SUBROUTINE cal_cldfra3(CLDFRA, qv, qc, qi, qs, & + & p,t,rho, XLAND, gridkm, & +! & rand_perturb_on, kme_stoch, rand_pert, & + & ids,ide, jds,jde, kds,kde, & + & ims,ime, jms,jme, kms,kme, & + & its,ite, jts,jte, kts,kte) +! + USE module_mp_thompson , ONLY : rsif, rslf + IMPLICIT NONE +! + INTEGER, INTENT(IN):: ids,ide, jds,jde, kds,kde, & + & ims,ime, jms,jme, kms,kme, & +! & kme_stoch, & + & its,ite, jts,jte, kts,kte + +! INTEGER, INTENT(IN):: rand_perturb_on + REAL, DIMENSION(ims:ime,kms:kme,jms:jme), INTENT(IN):: qv,p,t,rho + REAL, DIMENSION(ims:ime,kms:kme,jms:jme), INTENT(INOUT):: qc,qi,qs +! REAL, DIMENSION(ims:ime,kms:kme_stoch,jms:jme), INTENT(IN):: rand_pert + REAL, DIMENSION(ims:ime,jms:jme), INTENT(IN):: XLAND + + REAL, DIMENSION(ims:ime,kms:kme,jms:jme), INTENT(INOUT):: cldfra + REAL, DIMENSION(ims:ime,jms:jme), INTENT(IN):: gridkm + +!..Local vars. + REAL:: RH_00, RHI_max, entrmnt + REAL, DIMENSION(its:ite,jts:jte):: RH_00L, RH_00O + REAL, DIMENSION(ims:ime,kms:kme,jms:jme):: qvsat + INTEGER:: i,j,k + REAL:: TK, TC, qvsi, qvsw, RHUM, xx, yy + REAL, DIMENSION(kms:kme):: qvs1d, cfr1d, T1d, & + & P1d, R1d, qc1d, qi1d, qs1d + + character*512 dbg_msg + LOGICAL:: debug_flag + +!+---+ + +!..First cut scale-aware. Higher resolution should require closer to +!.. saturated grid box for higher cloud fraction. Simple functions +!.. chosen based on Mocko and Cotton (1995) starting point and desire +!.. to get near 100% RH as grid spacing moves toward 1.0km, but higher +!.. RH over ocean required as compared to over land. + + do j = jts,jte + do i = its,ite + RH_00L(i,j) = 0.781 + SQRT(1./(35.0+gridkm(i,j)*gridkm(i,j)*gridkm(i,j)*0.5)) + RH_00O(i,j) = 0.831 + SQRT(1./(70.0+gridkm(i,j)*gridkm(i,j)*gridkm(i,j)*0.5)) + enddo + enddo + + DO j = jts,jte + DO k = kts,kte + DO i = its,ite + + CLDFRA(I,K,J) = 0.0 + + if (qc(i,k,j).gt.1.E-6 .or. qi(i,k,j).ge.1.E-7 .or. qs(i,k,j).gt.1.E-5) then + CLDFRA(I,K,J) = 1.0 + qvsat(i,k,j) = qv(i,k,j) + else + TK = t(i,k,j) + TC = TK - 273.16 + + qvsw = rslf(P(i,k,j), TK) + qvsi = rsif(P(i,k,j), TK) + + if (tc .ge. -12.0) then + qvsat(i,k,j) = qvsw + elseif (tc .lt. -20.0) then + qvsat(i,k,j) = qvsi + else + qvsat(i,k,j) = qvsw - (qvsw-qvsi)*(-12.0-tc)/(-12.0+20.) + endif + RHUM = MAX(0.01, MIN(qv(i,k,j)/qvsat(i,k,j), 0.9999)) + + IF ((XLAND(I,J)-1.5).GT.0.) THEN !--- Ocean + RH_00 = RH_00O(i,j) + ELSE !--- Land + RH_00 = RH_00L(i,j) + ENDIF + + if (tc .ge. -12.0) then + RHUM = MIN(0.999, RHUM) + CLDFRA(I,K,J) = MAX(0.0, 1.0-SQRT((1.0-RHUM)/(1.-RH_00))) + elseif (tc.lt.-12..and.tc.gt.-70. .and. RHUM.gt.RH_00O(i,j)) then + RHUM = MAX(0.01, MIN(qv(i,k,j)/qvsat(i,k,j), qvsw/qvsi - 1.E-6)) + RHI_max = MAX(RHUM+1.E-6, qvsw/qvsi) + CLDFRA(I,K,J) = MAX(0., 1.0-SQRT((RHI_max-RHUM)/(RHI_max-RH_00O(i,j)))) + endif + CLDFRA(I,K,J) = MIN(0.90, CLDFRA(I,K,J)) + + endif + ENDDO + ENDDO + ENDDO + +!..Prepare for a 1-D column to find various cloud layers. + + DO j = jts,jte + DO i = its,ite +! if (i.gt.10.and.i.le.20 .and. j.gt.10.and.j.le.20) then +! debug_flag = .true. +! else +! debug_flag = .false. +! endif + +! if (rand_perturb_on .eq. 1) then +! entrmnt = MAX(0.01, MIN(0.99, 0.5 + rand_pert(i,1,j)*0.5)) +! else + entrmnt = 0.5 +! endif + + DO k = kts,kte + qvs1d(k) = qvsat(i,k,j) + cfr1d(k) = cldfra(i,k,j) + T1d(k) = t(i,k,j) + P1d(k) = p(i,k,j) + R1d(k) = rho(i,k,j) + qc1d(k) = qc(i,k,j) + qi1d(k) = qi(i,k,j) + qs1d(k) = qs(i,k,j) + ENDDO + +! if (debug_flag) then +! WRITE (dbg_msg,*) 'DEBUG-GT: finding cloud layers at point (', i, ', ', j, ')' +! CALL wrf_debug (150, dbg_msg) +! endif + + call find_cloudLayers(qvs1d, cfr1d, T1d, P1d, R1d, entrmnt, & + & debug_flag, qc1d, qi1d, qs1d, kts,kte) + + DO k = kts,kte + cldfra(i,k,j) = cfr1d(k) + qc(i,k,j) = qc1d(k) + qi(i,k,j) = qi1d(k) + ENDDO + ENDDO + ENDDO + + END SUBROUTINE cal_cldfra3 + +!+---+-----------------------------------------------------------------+ +!..From cloud fraction array, find clouds of multi-level depth and compute +!.. a reasonable value of LWP or IWP that might be contained in that depth, +!.. unless existing LWC/IWC is already there. + + SUBROUTINE find_cloudLayers(qvs1d, cfr1d, T1d, P1d, R1d, entrmnt, & + & debugfl, qc1d, qi1d, qs1d, kts,kte) +! + IMPLICIT NONE +! + INTEGER, INTENT(IN):: kts, kte + LOGICAL, INTENT(IN):: debugfl + REAL, INTENT(IN):: entrmnt + REAL, DIMENSION(kts:kte), INTENT(IN):: qvs1d,T1d,P1d,R1d + REAL, DIMENSION(kts:kte), INTENT(INOUT):: cfr1d + REAL, DIMENSION(kts:kte), INTENT(INOUT):: qc1d, qi1d, qs1d + +!..Local vars. + REAL, DIMENSION(kts:kte):: theta, dz + REAL:: Z1, Z2, theta1, theta2, ht1, ht2 + INTEGER:: k, k2, k_tropo, k_m12C, k_m40C, k_cldb, k_cldt, kbot + LOGICAL:: in_cloud + character*512 dbg_msg + +!+---+ + k_m12C = 0 + k_m40C = 0 + DO k = kte, kts, -1 + theta(k) = T1d(k)*((100000.0/P1d(k))**(287.05/1004.)) + if (T1d(k)-273.16 .gt. -40.0) k_m40C = MAX(k_m40C, k) + if (T1d(k)-273.16 .gt. -12.0) k_m12C = MAX(k_m12C, k) + ENDDO + if (k_m40C .le. kts) k_m40C = kts + if (k_m12C .le. kts) k_m12C = kts + + Z2 = 44307.692 * (1.0 - (P1d(kte)/101325.)**0.190) + DO k = kte-1, kts, -1 + Z1 = 44307.692 * (1.0 - (P1d(k)/101325.)**0.190) + dz(k+1) = Z2 - Z1 + Z2 = Z1 + ENDDO + dz(kts) = dz(kts+1) + +!..Find tropopause height, best surrogate, because we would not really +!.. wish to put fake clouds into the stratosphere. The 10/1500 ratio +!.. d(Theta)/d(Z) approximates a vertical line on typical SkewT chart +!.. near typical (mid-latitude) tropopause height. Since messy data +!.. could give us a false signal of such a transition, do the check over +!.. three K-level change, not just a level-to-level check. This method +!.. has potential failure in arctic-like conditions with extremely low +!.. tropopause height, as would any other diagnostic, so ensure resulting +!.. k_tropo level is above 4km. + + DO k = kte-3, kts, -1 + theta1 = theta(k) + theta2 = theta(k+2) + ht1 = 44307.692 * (1.0 - (P1d(k)/101325.)**0.190) + ht2 = 44307.692 * (1.0 - (P1d(k+2)/101325.)**0.190) + if ( (((theta2-theta1)/(ht2-ht1)) .lt. 10./1500. ) .AND. & + & (ht1.lt.19000.) .and. (ht1.gt.4000.) ) then + goto 86 + endif + ENDDO + 86 continue + k_tropo = MAX(kts+2, k+2) + +! if (debugfl) then +! print*, ' FOUND TROPOPAUSE ', k_tropo, ' near ', ht2, ' m' +! WRITE (dbg_msg,*) 'DEBUG-GT: FOUND TROPOPAUSE ', k_tropo, ' near ', ht2, ' m' +! CALL wrf_debug (150, dbg_msg) +! endif + +!..Eliminate possible fractional clouds above supposed tropopause. + DO k = k_tropo+1, kte + if (cfr1d(k).gt.0.0 .and. cfr1d(k).lt.0.999) then + cfr1d(k) = 0. + endif + ENDDO + +!..We would like to prevent fractional clouds below LCL in idealized +!.. situation with deep well-mixed convective PBL, that otherwise is +!.. likely to get clouds in more realistic capping inversion layer. + + kbot = kts+2 + DO k = kbot, k_m12C + if ( (theta(k)-theta(k-1)) .gt. 0.05E-3*dz(k)) EXIT + ENDDO + kbot = MAX(kts+1, k-2) + DO k = kts, kbot + if (cfr1d(k).gt.0.0 .and. cfr1d(k).lt.0.999) cfr1d(k) = 0. + ENDDO + + +!..Starting below tropo height, if cloud fraction greater than 1 percent, +!.. compute an approximate total layer depth of cloud, determine a total +!.. liquid water/ice path (LWP/IWP), then reduce that amount with tuning +!.. parameter to represent entrainment factor, then divide up LWP/IWP +!.. into delta-Z weighted amounts for individual levels per cloud layer. + + k_cldb = k_tropo + in_cloud = .false. + k = k_tropo + + DO WHILE (.not. in_cloud .AND. k.gt.k_m12C) + k_cldt = 0 + if (cfr1d(k).ge.0.01) then + in_cloud = .true. + k_cldt = MAX(k_cldt, k) + endif + if (in_cloud) then + DO k2 = k_cldt-1, k_m12C, -1 + if (cfr1d(k2).lt.0.01 .or. k2.eq.k_m12C) then + k_cldb = k2+1 + goto 87 + endif + ENDDO + 87 continue + in_cloud = .false. + endif + if ((k_cldt - k_cldb + 1) .ge. 2) then +! if (debugfl) then +! print*, 'An ice cloud layer is found between ', k_cldt, k_cldb, P1d(k_cldt)*0.01, P1d(k_cldb)*0.01 +! WRITE (dbg_msg,*) 'DEBUG-GT: An ice cloud layer is found between ', k_cldt, k_cldb, P1d(k_cldt)*0.01, P1d(k_cldb)*0.01 +! CALL wrf_debug (150, dbg_msg) +! endif + call adjust_cloudIce(cfr1d, qi1d, qs1d, qvs1d, T1d,R1d,dz, & + & entrmnt, k_cldb,k_cldt,kts,kte) + k = k_cldb + else + if (cfr1d(k_cldb).gt.0.and.qi1d(k_cldb).lt.1.E-6) & + & qi1d(k_cldb)=1.E-5*cfr1d(k_cldb) + endif + k = k - 1 + ENDDO + + + k_cldb = k_tropo + in_cloud = .false. + +! k = k_m12C + 2 + k = min(k_m12C,k_m12C+2) + DO WHILE (.not. in_cloud .AND. k.gt.kbot) + k_cldt = 0 + if (cfr1d(k).ge.0.01) then + in_cloud = .true. + k_cldt = MAX(k_cldt, k) + endif + if (in_cloud) then + DO k2 = k_cldt-1, kbot, -1 + if (cfr1d(k2).lt.0.01 .or. k2.eq.kbot) then + k_cldb = k2+1 + goto 88 + endif + ENDDO + 88 continue + in_cloud = .false. + endif + if ((k_cldt - k_cldb + 1) .ge. 2) then +! if (debugfl) then +! print*, 'A water cloud layer is found between ', k_cldt, k_cldb, P1d(k_cldt)*0.01, P1d(k_cldb)*0.01 +! WRITE (dbg_msg,*) 'DEBUG-GT: A water cloud layer is found between ', k_cldt, k_cldb, P1d(k_cldt)*0.01, P1d(k_cldb)*0.01 +! CALL wrf_debug (150, dbg_msg) +! endif + call adjust_cloudH2O(cfr1d, qc1d, qvs1d, T1d,R1d,dz, & + & entrmnt, k_cldb,k_cldt,kts,kte) + k = k_cldb + else + if (cfr1d(k_cldb).gt.0.and.qc1d(k_cldb).lt.1.E-6) & + & qc1d(k_cldb)=1.E-5*cfr1d(k_cldb) + endif + k = k - 1 + ENDDO + +!..Do a final total column adjustment since we may have added more than 1mm +!.. LWP/IWP for multiple cloud decks. + + call adjust_cloudFinal(cfr1d, qc1d, qi1d, R1d,dz, kts,kte,k_tropo) + +! if (debugfl) then +! print*, ' Made-up fake profile of clouds' +! do k = kte, kts, -1 +! write(*,'(i3, 2x, f8.2, 2x, f9.2, 2x, f6.2, 2x, f15.7, 2x, f15.7)') & +! & K, T1d(k)-273.15, P1d(k)*0.01, cfr1d(k)*100., qc1d(k)*1000.,qi1d(k)*1000. +! enddo +! WRITE (dbg_msg,*) 'DEBUG-GT: Made-up fake profile of clouds' +! CALL wrf_debug (150, dbg_msg) +! do k = kte, kts, -1 +! write(dbg_msg,'(f8.2, 2x, f9.2, 2x, f6.2, 2x, f15.7, 2x, f15.7)') & +! & T1d(k)-273.15, P1d(k)*0.01, cfr1d(k)*100., qc1d(k)*1000.,qi1d(k)*1000. +! CALL wrf_debug (150, dbg_msg) +! enddo +! endif + + + END SUBROUTINE find_cloudLayers + +!+---+-----------------------------------------------------------------+ + + SUBROUTINE adjust_cloudIce(cfr,qi,qs,qvs, T,Rho,dz, entr, k1,k2,kts,kte) +! + IMPLICIT NONE +! + INTEGER, INTENT(IN):: k1,k2, kts,kte + REAL, INTENT(IN):: entr + REAL, DIMENSION(kts:kte), INTENT(IN):: cfr, qvs, T, Rho, dz + REAL, DIMENSION(kts:kte), INTENT(INOUT):: qi, qs + REAL:: iwc, max_iwc, tdz, this_iwc, this_dz, iwp_exists + INTEGER:: k, kmid + + tdz = 0. + do k = k1, k2 + tdz = tdz + dz(k) + enddo + kmid = NINT(0.5*(k1+k2)) + max_iwc = ABS(qvs(k2-1)-qvs(k1)) +! print*, ' max_iwc = ', max_iwc, ' over DZ=',tdz + + iwp_exists = 0. + do k = k1, k2 + iwp_exists = iwp_exists + (qi(k)+qs(k))*Rho(k)*dz(k) + enddo + if (iwp_exists .gt. 1.0) RETURN + + this_dz = 0.0 + do k = k1, k2 + if (k.eq.k1) then + this_dz = this_dz + 0.5*dz(k) + else + this_dz = this_dz + dz(k) + endif + this_iwc = max_iwc*this_dz/tdz + iwc = MAX(1.E-6, this_iwc*(1.-entr)) + if (cfr(k).gt.0.01.and.cfr(k).lt.0.99.and.T(k).ge.203.16) then + qi(k) = qi(k) + 0.1*cfr(k)*iwc + elseif (qi(k).lt.1.E-5.and.cfr(k).ge.0.99.and.T(k).ge.203.16) then + qi(k) = qi(k) + 0.01*iwc + endif + enddo + + END SUBROUTINE adjust_cloudIce + +!+---+-----------------------------------------------------------------+ + + SUBROUTINE adjust_cloudH2O(cfr, qc, qvs, T,Rho,dz, entr, k1,k2,kts,kte) +! + IMPLICIT NONE +! + INTEGER, INTENT(IN):: k1,k2, kts,kte + REAL, INTENT(IN):: entr + REAL, DIMENSION(kts:kte):: cfr, qc, qvs, T, Rho, dz + REAL:: lwc, max_lwc, tdz, this_lwc, this_dz, lwp_exists + INTEGER:: k, kmid + + tdz = 0. + do k = k1, k2 + tdz = tdz + dz(k) + enddo + kmid = NINT(0.5*(k1+k2)) + max_lwc = ABS(qvs(k2-1)-qvs(k1)) +! print*, ' max_lwc = ', max_lwc, ' over DZ=',tdz + + lwp_exists = 0. + do k = k1, k2 + lwp_exists = lwp_exists + qc(k)*Rho(k)*dz(k) + enddo + if (lwp_exists .gt. 1.0) RETURN + + this_dz = 0.0 + do k = k1, k2 + if (k.eq.k1) then + this_dz = this_dz + 0.5*dz(k) + else + this_dz = this_dz + dz(k) + endif + this_lwc = max_lwc*this_dz/tdz + lwc = MAX(1.E-6, this_lwc*(1.-entr)) + if (cfr(k).gt.0.01.and.cfr(k).lt.0.99.and.T(k).lt.298.16.and.T(k).ge.253.16) then + qc(k) = qc(k) + cfr(k)*cfr(k)*lwc + elseif (cfr(k).ge.0.99.and.qc(k).lt.1.E-5.and.T(k).lt.298.16.and.T(k).ge.253.16) then + qc(k) = qc(k) + 0.1*lwc + endif + enddo + + END SUBROUTINE adjust_cloudH2O + +!+---+-----------------------------------------------------------------+ + +!..Do not alter any grid-explicitly resolved hydrometeors, rather only +!.. the supposed amounts due to the cloud fraction scheme. + + SUBROUTINE adjust_cloudFinal(cfr, qc, qi, Rho,dz, kts,kte,k_tropo) +! + IMPLICIT NONE +! + INTEGER, INTENT(IN):: kts,kte,k_tropo + REAL, DIMENSION(kts:kte), INTENT(IN):: cfr, Rho, dz + REAL, DIMENSION(kts:kte), INTENT(INOUT):: qc, qi + REAL:: lwp, iwp, xfac + INTEGER:: k + + lwp = 0. + do k = kts, k_tropo + if (cfr(k).gt.0.01 .and. cfr(k).lt.0.99) then + lwp = lwp + qc(k)*Rho(k)*dz(k) + endif + enddo + + iwp = 0. + do k = kts, k_tropo + if (cfr(k).gt.0.01 .and. cfr(k).lt.0.99) then + iwp = iwp + qi(k)*Rho(k)*dz(k) + endif + enddo + + if (lwp .gt. 1.0) then + xfac = 1./lwp + do k = kts, k_tropo + if (cfr(k).gt.0.01 .and. cfr(k).lt.0.99) then + qc(k) = qc(k)*xfac + endif + enddo + endif + + if (iwp .gt. 1.0) then + xfac = 1./iwp + do k = kts, k_tropo + if (cfr(k).gt.0.01 .and. cfr(k).lt.0.99) then + qi(k) = qi(k)*xfac + endif + enddo + endif + + END SUBROUTINE adjust_cloudFinal + +!================================================================================================================= + end module module_mp_thompson_cldfra3 +!================================================================================================================= diff --git a/src/core_atmosphere/physics/physics_wrf/module_mp_wsm6.F b/src/core_atmosphere/physics/physics_wrf/module_mp_wsm6.F index 8b65073868..b95266c7e5 100644 --- a/src/core_atmosphere/physics/physics_wrf/module_mp_wsm6.F +++ b/src/core_atmosphere/physics/physics_wrf/module_mp_wsm6.F @@ -1,3 +1,17 @@ +!================================================================================================================= +!module_mp_wsm6.F was originally copied from ./phys/module_mp_wsm6.F from WRF version 3.8.1. +!Laura D. Fowler (laura@ucar.edu) / 2016-09-23. + +!modifications to sourcecode for MPAS: +! * replaced the line "#if ( RWORDSIZE == 4 )" with "#ifdef SINGLE_PRECISION". +! * commented out the lines: +! USE module_utility, ONLY: WRFU_Clock, WRFU_Alarm +! USE module_domain, ONLY : HISTORY_ALARM, Is_alarm_tstep +! * changed the declaration of refl_10cm to optional since subroutine refl10cm_wsm6 is called +! in mpas_atmphys_driver_microphysics.F. +! Laura D. Fowler (laura@ucar.edu) / 2016-10-17. + +!================================================================================================================= #ifdef SINGLE_PRECISION # define VREC vsrec # define VSQRT vssqrt @@ -8,17 +22,13 @@ MODULE module_mp_wsm6 ! -!#if defined(mpas) -! USE mpas_atmphys_utilities -!#else -! USE module_utility, ONLY: WRFU_Clock, WRFU_Alarm -! USE module_domain, ONLY : HISTORY_ALARM, Is_alarm_tstep -!#endif +! USE module_utility, ONLY: WRFU_Clock, WRFU_Alarm +! USE module_domain, ONLY : HISTORY_ALARM, Is_alarm_tstep USE module_mp_radar ! REAL, PARAMETER, PRIVATE :: dtcldcr = 120. ! maximum time step for minor loops REAL, PARAMETER, PRIVATE :: n0r = 8.e6 ! intercept parameter rain - REAL, PARAMETER, PRIVATE :: n0g = 4.e6 ! intercept parameter graupel +! REAL, PARAMETER, PRIVATE :: n0g = 4.e6 ! intercept parameter graupel ! set later with hail_opt REAL, PARAMETER, PRIVATE :: avtr = 841.9 ! a constant for terminal velocity of rain REAL, PARAMETER, PRIVATE :: bvtr = 0.8 ! a constant for terminal velocity of rain REAL, PARAMETER, PRIVATE :: r0 = .8e-5 ! 8 microm in contrast to 10 micro m @@ -27,13 +37,13 @@ MODULE module_mp_wsm6 REAL, PARAMETER, PRIVATE :: xmyu = 1.718e-5 ! the dynamic viscosity kgm-1s-1 REAL, PARAMETER, PRIVATE :: avts = 11.72 ! a constant for terminal velocity of snow REAL, PARAMETER, PRIVATE :: bvts = .41 ! a constant for terminal velocity of snow - REAL, PARAMETER, PRIVATE :: avtg = 330. ! a constant for terminal velocity of graupel - REAL, PARAMETER, PRIVATE :: bvtg = 0.8 ! a constant for terminal velocity of graupel - REAL, PARAMETER, PRIVATE :: deng = 500. ! density of graupel +! REAL, PARAMETER, PRIVATE :: avtg = 330. ! a constant for terminal velocity of graupel ! set later with hail_opt +! REAL, PARAMETER, PRIVATE :: bvtg = 0.8 ! a constant for terminal velocity of graupel ! set later with hail_opt +! REAL, PARAMETER, PRIVATE :: deng = 500. ! density of graupel ! set later with hail_opt REAL, PARAMETER, PRIVATE :: n0smax = 1.e11 ! maximum n0s (t=-90C unlimited) REAL, PARAMETER, PRIVATE :: lamdarmax = 8.e4 ! limited maximum value for slope parameter of rain REAL, PARAMETER, PRIVATE :: lamdasmax = 1.e5 ! limited maximum value for slope parameter of snow - REAL, PARAMETER, PRIVATE :: lamdagmax = 6.e4 ! limited maximum value for slope parameter of graupel +! REAL, PARAMETER, PRIVATE :: lamdagmax = 6.e4 ! limited maximum value for slope parameter of graupel REAL, PARAMETER, PRIVATE :: dicon = 11.9 ! constant for the cloud-ice diamter REAL, PARAMETER, PRIVATE :: dimax = 500.e-6 ! limited maximum value for the cloud-ice diamter REAL, PARAMETER, PRIVATE :: n0s = 2.e6 ! temperature dependent intercept parameter snow @@ -45,11 +55,13 @@ MODULE module_mp_wsm6 REAL, PARAMETER, PRIVATE :: dens = 100.0 ! Density of snow REAL, PARAMETER, PRIVATE :: qs0 = 6.e-4 ! threshold amount for aggretion to occur REAL, SAVE :: & - qc0, qck1,bvtr1,bvtr2,bvtr3,bvtr4,g1pbr, & + qc0, qck1, pidnc, & + bvtr1,bvtr2,bvtr3,bvtr4,g1pbr, & g3pbr,g4pbr,g5pbro2,pvtr,eacrr,pacrr, & bvtr6,g6pbr, & precr1,precr2,roqimax,bvts1, & bvts2,bvts3,bvts4,g1pbs,g3pbs,g4pbs, & + n0g,avtg,bvtg,deng,lamdagmax, & !RAS13.3 - set these in wsm6init g5pbso2,pvts,pacrs,precs1,precs2,pidn0r, & pidn0s,xlv1,pacrc,pi, & bvtg1,bvtg2,bvtg3,bvtg4,g1pbg, & @@ -73,6 +85,8 @@ SUBROUTINE wsm6(th, q, qc, qr, qi, qs, qg & ,sr & ,refl_10cm, diagflag, do_radar_ref & ,graupel, graupelncv & + ,has_reqc, has_reqi, has_reqs & ! for radiation + ,re_cloud, re_ice, re_snow & ! for radiation ,ids,ide, jds,jde, kds,kde & ,ims,ime, jms,jme, kms,kme & ,its,ite, jts,jte, kts,kte & @@ -120,15 +134,19 @@ SUBROUTINE wsm6(th, q, qc, qr, qi, qs, qg & INTENT(INOUT) :: rain, & rainncv, & sr - +! for radiation connecting + INTEGER, INTENT(IN):: & + has_reqc, & + has_reqi, & + has_reqs + REAL, DIMENSION(ims:ime, kms:kme, jms:jme), & + INTENT(INOUT):: & + re_cloud, & + re_ice, & + re_snow !+---+-----------------------------------------------------------------+ -#if defined(mpas) - REAL, DIMENSION(ims:ime, kms:kme, jms:jme), INTENT(INOUT),optional:: & ! GT + REAL, DIMENSION(ims:ime, kms:kme, jms:jme), INTENT(INOUT), OPTIONAL:: & ! GT refl_10cm -#else - REAL, DIMENSION(ims:ime, kms:kme, jms:jme), INTENT(INOUT):: & ! GT - refl_10cm -#endif !+---+-----------------------------------------------------------------+ REAL, DIMENSION( ims:ime , jms:jme ), OPTIONAL, & @@ -148,6 +166,11 @@ SUBROUTINE wsm6(th, q, qc, qr, qi, qs, qg & LOGICAL, OPTIONAL, INTENT(IN) :: diagflag INTEGER, OPTIONAL, INTENT(IN) :: do_radar_ref !+---+-----------------------------------------------------------------+ +! to calculate effective radius for radiation + REAL, DIMENSION( kts:kte ) :: den1d + REAL, DIMENSION( kts:kte ) :: qc1d + REAL, DIMENSION( kts:kte ) :: qi1d + REAL, DIMENSION( kts:kte ) :: re_qc, re_qi, re_qs DO j=jts,jte DO k=kts,kte @@ -209,6 +232,30 @@ SUBROUTINE wsm6(th, q, qc, qr, qi, qs, qg & ENDDO endif ENDIF + + if (has_reqc.ne.0 .and. has_reqi.ne.0 .and. has_reqs.ne.0) then + do i=its,ite + do k=kts,kte + re_qc(k) = 2.51E-6 + re_qi(k) = 10.01E-6 + re_qs(k) = 25.E-6 + + t1d(k) = th(i,k,j)*pii(i,k,j) + den1d(k)= den(i,k,j) + qc1d(k) = qc(i,k,j) + qi1d(k) = qi(i,k,j) + qs1d(k) = qs(i,k,j) + enddo + call effectRad_wsm6(t1d, qc1d, qi1d, qs1d, den1d, & + qmin, t0c, re_qc, re_qi, re_qs, & + kts, kte, i, j) + do k=kts,kte + re_cloud(i,k,j) = MAX(2.51E-6, MIN(re_qc(k), 50.E-6)) + re_ice(i,k,j) = MAX(10.01E-6, MIN(re_qi(k), 125.E-6)) + re_snow(i,k,j) = MAX(25.E-6, MIN(re_qs(k), 999.E-6)) + enddo + enddo + endif ! has_reqc, etc... !+---+-----------------------------------------------------------------+ ENDDO @@ -251,8 +298,17 @@ SUBROUTINE wsm62D(t, q & ! Implemented by Song-You Hong (Yonsei Univ.) and Jimy Dudhia (NCAR) ! Summer 2004 ! -! History : semi-lagrangian scheme sedimentation(JH), and clean up -! Hong, August 2009 +! further modifications : +! semi-lagrangian sedimentation (JH,2010),hong, aug 2009 +! ==> higher accuracy and efficient at lower resolutions +! reflectivity computation from greg thompson, lim, jun 2011 +! ==> only diagnostic, but with removal of too large drops +! add hail option from afwa, aug 2014 +! ==> switch graupel or hail by changing no, den, fall vel. +! effective radius of hydrometeors, bae from kiaps, jan 2015 +! ==> consistency in solar insolation of rrtmg radiation +! bug fix in melting terms, bae from kiaps, nov 2015 +! ==> density of air is divided, which has not been ! ! Reference) Hong, Dudhia, Chen (HDC, 2004) Mon. Wea. Rev. ! Hong and Lim (HL, 2006) J. Korean Meteor. Soc. @@ -662,7 +718,7 @@ SUBROUTINE wsm62D(t, q & coeres = rslope2(i,k,2)*sqrt(rslope(i,k,2)*rslopeb(i,k,2)) psmlt(i,k) = xka(t(i,k),den(i,k))/xlf*(t0c-t(i,k))*pi/2. & *n0sfac(i,k)*(precs1*rslope2(i,k,2) & - +precs2*work2(i,k)*coeres) + +precs2*work2(i,k)*coeres)/den(i,k) psmlt(i,k) = min(max(psmlt(i,k)*dtcld/mstep(i), & -qrs(i,k,2)/mstep(i)),0.) qrs(i,k,2) = qrs(i,k,2) + psmlt(i,k) @@ -677,7 +733,7 @@ SUBROUTINE wsm62D(t, q & coeres = rslope2(i,k,3)*sqrt(rslope(i,k,3)*rslopeb(i,k,3)) pgmlt(i,k) = xka(t(i,k),den(i,k))/xlf & *(t0c-t(i,k))*(precg1*rslope2(i,k,3) & - +precg2*work2(i,k)*coeres) + +precg2*work2(i,k)*coeres)/den(i,k) pgmlt(i,k) = min(max(pgmlt(i,k)*dtcld/mstep(i), & -qrs(i,k,3)/mstep(i)),0.) qrs(i,k,3) = qrs(i,k,3) + pgmlt(i,k) @@ -741,7 +797,7 @@ SUBROUTINE wsm62D(t, q & ENDIF endif if(fallsum_qg.gt.0.) then - tstepgraup(i) = fallsum_qsi*delz(i,kts)/denr*dtcld*1000. & + tstepgraup(i) = fallsum_qg*delz(i,kts)/denr*dtcld*1000. & +tstepgraup(i) IF ( PRESENT (graupelncv) .AND. PRESENT (graupel)) THEN graupelncv(i,lat) = fallsum_qg*delz(i,kts)/denr*dtcld*1000. & @@ -749,8 +805,11 @@ SUBROUTINE wsm62D(t, q & graupel(i,lat) = fallsum_qg*delz(i,kts)/denr*dtcld*1000. + graupel(i,lat) ENDIF endif -! if(fallsum.gt.0.)sr(i)=(snowncv(i,lat) + graupelncv(i,lat))/(rainncv(i)+1.e-12) - if(fallsum.gt.0.)sr(i)=(tstepsnow(i) + tstepgraup(i))/(rainncv(i)+1.e-12) + IF ( PRESENT (snowncv)) THEN + if(fallsum.gt.0.)sr(i)=(snowncv(i,lat) + graupelncv(i,lat))/(rainncv(i)+1.e-12) + ELSE + if(fallsum.gt.0.)sr(i)=(tstepsnow(i) + tstepgraup(i))/(rainncv(i)+1.e-12) + ENDIF enddo ! !--------------------------------------------------------------- @@ -988,10 +1047,10 @@ SUBROUTINE wsm62D(t, q & ! paacw: Accretion of cloud water by averaged snow/graupel ! (TG or S, and T>=T0: C->R) !------------------------------------------------------------- - if(qrs(i,k,2).gt.qcrmin.and.qrs(i,k,3).gt.qcrmin) then + if(qsum(i,k) .gt. 1.e-15) then paacw(i,k) = (qrs(i,k,2)*psacw(i,k)+qrs(i,k,3)*pgacw(i,k)) & /(qsum(i,k)) - endif + endif !------------------------------------------------------------- ! pracs: Accretion of snow by rain [HL A11] [LFO 27] ! (TG) @@ -1463,19 +1522,37 @@ REAL FUNCTION fpvs(t,ice,rd,rv,cvap,cliq,cice,hvap,hsub,psat,t0c) ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - END FUNCTION fpvs !------------------------------------------------------------------- - SUBROUTINE wsm6init(den0,denr,dens,cl,cpv,allowed_to_read) + SUBROUTINE wsm6init(den0,denr,dens,cl,cpv,hail_opt,allowed_to_read) !------------------------------------------------------------------- IMPLICIT NONE !------------------------------------------------------------------- !.... constants which may not be tunable REAL, INTENT(IN) :: den0,denr,dens,cl,cpv + INTEGER, INTENT(IN) :: hail_opt ! RAS LOGICAL, INTENT(IN) :: allowed_to_read + +! RAS13.1 define graupel parameters as graupel-like or hail-like, +! depending on namelist option + IF (hail_opt .eq. 1) THEN !Hail! + n0g = 4.e4 + deng = 700. + avtg = 285.0 + bvtg = 0.8 + lamdagmax = 2.e4 + ELSE !Graupel! + n0g = 4.e6 + deng = 500 + avtg = 330.0 + bvtg = 0.8 + lamdagmax = 6.e4 + ENDIF ! pi = 4.*atan(1.) xlv1 = cl-cpv ! qc0 = 4./3.*pi*denr*r0**3*xncr/den0 ! 0.419e-3 -- .61e-3 qck1 = .104*9.8*peaut/(xncr*denr)**(1./3.)/xmyu*den0**(4./3.) ! 7.03 + pidnc = pi*denr/6. ! syb ! bvtr1 = 1.+bvtr bvtr2 = 2.5+.5*bvtr @@ -2068,7 +2145,7 @@ SUBROUTINE nislfv_rain_plm6(im,km,denl,denfacl,tkl,dzl,wwl,rql,rql2, precip1, pr ! skip for no precipitation for all layers allold = 0.0 do k=1,km - allold = allold + qq(k) + allold = allold + qq(k) + qq2(k) enddo if(allold.le.0.0) then cycle i_loop @@ -2459,4 +2536,101 @@ subroutine refl10cm_wsm6 (qv1d, qr1d, qs1d, qg1d, & end subroutine refl10cm_wsm6 !+---+-----------------------------------------------------------------+ +!----------------------------------------------------------------------- + subroutine effectRad_wsm6 (t, qc, qi, qs, rho, qmin, t0c, & + re_qc, re_qi, re_qs, kts, kte, ii, jj) + +!----------------------------------------------------------------------- +! Compute radiation effective radii of cloud water, ice, and snow for +! single-moment microphysics. +! These are entirely consistent with microphysics assumptions, not +! constant or otherwise ad hoc as is internal to most radiation +! schemes. +! Coded and implemented by Soo ya Bae, KIAPS, January 2015. +!----------------------------------------------------------------------- + + implicit none + +!..Sub arguments + integer, intent(in) :: kts, kte, ii, jj + real, intent(in) :: qmin + real, intent(in) :: t0c + real, dimension( kts:kte ), intent(in):: t + real, dimension( kts:kte ), intent(in):: qc + real, dimension( kts:kte ), intent(in):: qi + real, dimension( kts:kte ), intent(in):: qs + real, dimension( kts:kte ), intent(in):: rho + real, dimension( kts:kte ), intent(inout):: re_qc + real, dimension( kts:kte ), intent(inout):: re_qi + real, dimension( kts:kte ), intent(inout):: re_qs +!..Local variables + integer:: i,k + integer :: inu_c + real, dimension( kts:kte ):: ni + real, dimension( kts:kte ):: rqc + real, dimension( kts:kte ):: rqi + real, dimension( kts:kte ):: rni + real, dimension( kts:kte ):: rqs + real :: temp + real :: lamdac + real :: supcol, n0sfac, lamdas + real :: diai ! diameter of ice in m + logical :: has_qc, has_qi, has_qs +!..Minimum microphys values + real, parameter :: R1 = 1.E-12 + real, parameter :: R2 = 1.E-6 +!..Mass power law relations: mass = am*D**bm + real, parameter :: bm_r = 3.0 + real, parameter :: obmr = 1.0/bm_r + real, parameter :: nc0 = 3.E8 +!----------------------------------------------------------------------- + has_qc = .false. + has_qi = .false. + has_qs = .false. + + do k = kts, kte + ! for cloud + rqc(k) = max(R1, qc(k)*rho(k)) + if (rqc(k).gt.R1) has_qc = .true. + ! for ice + rqi(k) = max(R1, qi(k)*rho(k)) + temp = (rho(k)*max(qi(k),qmin)) + temp = sqrt(sqrt(temp*temp*temp)) + ni(k) = min(max(5.38e7*temp,1.e3),1.e6) + rni(k)= max(R2, ni(k)*rho(k)) + if (rqi(k).gt.R1 .and. rni(k).gt.R2) has_qi = .true. + ! for snow + rqs(k) = max(R1, qs(k)*rho(k)) + if (rqs(k).gt.R1) has_qs = .true. + enddo + + if (has_qc) then + do k=kts,kte + if (rqc(k).le.R1) CYCLE + lamdac = (pidnc*nc0/rqc(k))**obmr + re_qc(k) = max(2.51E-6,min(1.5*(1.0/lamdac),50.E-6)) + enddo + endif + + if (has_qi) then + do k=kts,kte + if (rqi(k).le.R1 .or. rni(k).le.R2) CYCLE + diai = 11.9*sqrt(rqi(k)/ni(k)) + re_qi(k) = max(10.01E-6,min(0.75*0.163*diai,125.E-6)) + enddo + endif + + if (has_qs) then + do k=kts,kte + if (rqs(k).le.R1) CYCLE + supcol = t0c-t(k) + n0sfac = max(min(exp(alpha*supcol),n0smax/n0s),1.) + lamdas = sqrt(sqrt(pidn0s*n0sfac/rqs(k))) + re_qs(k) = max(25.E-6,min(0.5*(1./lamdas), 999.E-6)) + enddo + endif + + end subroutine effectRad_wsm6 +!----------------------------------------------------------------------- + END MODULE module_mp_wsm6 diff --git a/src/core_atmosphere/physics/physics_wrf/module_ra_rrtmg_lw.F b/src/core_atmosphere/physics/physics_wrf/module_ra_rrtmg_lw.F index 4ede5288bf..30d6c014bb 100644 --- a/src/core_atmosphere/physics/physics_wrf/module_ra_rrtmg_lw.F +++ b/src/core_atmosphere/physics/physics_wrf/module_ra_rrtmg_lw.F @@ -2095,8 +2095,8 @@ module mcica_subcol_gen_lw !------------------------------------------------------------------ subroutine mcica_subcol_lw(iplon, ncol, nlay, icld, permuteseed, irng, play, & - cldfrac, ciwp, clwp, rei, rel, tauc, cldfmcl, & - ciwpmcl, clwpmcl, reicmcl, relqmcl, taucmcl) + cldfrac, ciwp, clwp, cswp, rei, rel, res, tauc, cldfmcl, & + ciwpmcl, clwpmcl, cswpmcl, reicmcl, relqmcl, resnmcl, taucmcl) ! ----- Input ----- ! Control @@ -2129,10 +2129,14 @@ subroutine mcica_subcol_lw(iplon, ncol, nlay, icld, permuteseed, irng, play, & ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: clwp(:,:) ! in-cloud liquid water path ! Dimensions: (ncol,nlay) + real(kind=rb), intent(in) :: cswp(:,:) ! in-cloud snow path + ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: rei(:,:) ! cloud ice particle size ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: rel(:,:) ! cloud liquid particle size ! Dimensions: (ncol,nlay) + real(kind=rb), intent(in) :: res(:,:) ! snow particle size + ! Dimensions: (ncol,nlay) ! ----- Output ----- ! Atmosphere/clouds - cldprmc [mcica] @@ -2142,10 +2146,14 @@ subroutine mcica_subcol_lw(iplon, ncol, nlay, icld, permuteseed, irng, play, & ! Dimensions: (ngptlw,ncol,nlay) real(kind=rb), intent(out) :: clwpmcl(:,:,:) ! in-cloud liquid water path [mcica] ! Dimensions: (ngptlw,ncol,nlay) + real(kind=rb), intent(out) :: cswpmcl(:,:,:) ! in-cloud snow path [mcica] + ! Dimensions: (ngptlw,ncol,nlay) real(kind=rb), intent(out) :: relqmcl(:,:) ! liquid particle size (microns) ! Dimensions: (ncol,nlay) real(kind=rb), intent(out) :: reicmcl(:,:) ! ice partcle size (microns) ! Dimensions: (ncol,nlay) + real(kind=rb), intent(out) :: resnmcl(:,:) ! snow partcle size (microns) + ! Dimensions: (ncol,nlay) real(kind=rb), intent(out) :: taucmcl(:,:,:) ! in-cloud optical depth [mcica] ! Dimensions: (ngptlw,ncol,nlay) ! real(kind=rb), intent(out) :: ssacmcl(:,:,:) ! in-cloud single scattering albedo [mcica] @@ -2179,6 +2187,7 @@ subroutine mcica_subcol_lw(iplon, ncol, nlay, icld, permuteseed, irng, play, & reicmcl(:ncol,:nlay) = rei(:ncol,:nlay) relqmcl(:ncol,:nlay) = rel(:ncol,:nlay) + resnmcl(:ncol,:nlay) = res(:ncol,:nlay) pmid(:ncol,:nlay) = play(:ncol,:nlay)*1.e2_rb ! Convert input ice and liquid cloud water paths to specific humidity ice and liquid components @@ -2197,15 +2206,15 @@ subroutine mcica_subcol_lw(iplon, ncol, nlay, icld, permuteseed, irng, play, & ! enddo ! Generate the stochastic subcolumns of cloud optical properties for the longwave; - call generate_stochastic_clouds (ncol, nlay, nsubclw, icld, irng, pmid, cldfrac, clwp, ciwp, tauc, & - cldfmcl, clwpmcl, ciwpmcl, taucmcl, permuteseed) + call generate_stochastic_clouds (ncol, nlay, nsubclw, icld, irng, pmid, cldfrac, clwp, ciwp, cswp, tauc, & + cldfmcl, clwpmcl, ciwpmcl, cswpmcl, taucmcl, permuteseed) end subroutine mcica_subcol_lw !------------------------------------------------------------------------------------------------- - subroutine generate_stochastic_clouds(ncol, nlay, nsubcol, icld, irng, pmid, cld, clwp, ciwp, tauc, & - cld_stoch, clwp_stoch, ciwp_stoch, tauc_stoch, changeSeed) + subroutine generate_stochastic_clouds(ncol, nlay, nsubcol, icld, irng, pmid, cld, clwp, ciwp, cswp, tauc, & + cld_stoch, clwp_stoch, ciwp_stoch, cswp_stoch, tauc_stoch, changeSeed) !------------------------------------------------------------------------------------------------- !---------------------------------------------------------------------------------------------------------------- @@ -2289,6 +2298,8 @@ subroutine generate_stochastic_clouds(ncol, nlay, nsubcol, icld, irng, pmid, cld ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: ciwp(:,:) ! in-cloud ice water path ! Dimensions: (ncol,nlay) + real(kind=rb), intent(in) :: cswp(:,:) ! in-cloud snow path + ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: tauc(:,:,:) ! in-cloud optical depth ! Dimensions: (nbndlw,ncol,nlay) ! real(kind=rb), intent(in) :: ssac(:,:,:) ! in-cloud single scattering albedo @@ -2304,6 +2315,8 @@ subroutine generate_stochastic_clouds(ncol, nlay, nsubcol, icld, irng, pmid, cld ! Dimensions: (ngptlw,ncol,nlay) real(kind=rb), intent(out) :: ciwp_stoch(:,:,:) ! subcolumn in-cloud ice water path ! Dimensions: (ngptlw,ncol,nlay) + real(kind=rb), intent(out) :: cswp_stoch(:,:,:) ! subcolumn in-cloud snow path + ! Dimensions: (ngptlw,ncol,nlay) real(kind=rb), intent(out) :: tauc_stoch(:,:,:) ! subcolumn in-cloud optical depth ! Dimensions: (ngptlw,ncol,nlay) ! real(kind=rb), intent(out) :: ssac_stoch(:,:,:)! subcolumn in-cloud single scattering albedo @@ -2530,6 +2543,7 @@ subroutine generate_stochastic_clouds(ncol, nlay, nsubcol, icld, irng, pmid, cld cld_stoch(isubcol,i,ilev) = 1._rb clwp_stoch(isubcol,i,ilev) = clwp(i,ilev) ciwp_stoch(isubcol,i,ilev) = ciwp(i,ilev) + cswp_stoch(isubcol,i,ilev) = cswp(i,ilev) n = ngb(isubcol) tauc_stoch(isubcol,i,ilev) = tauc(n,i,ilev) ! ssac_stoch(isubcol,i,ilev) = ssac(n,i,ilev) @@ -2538,6 +2552,7 @@ subroutine generate_stochastic_clouds(ncol, nlay, nsubcol, icld, irng, pmid, cld cld_stoch(isubcol,i,ilev) = 0._rb clwp_stoch(isubcol,i,ilev) = 0._rb ciwp_stoch(isubcol,i,ilev) = 0._rb + cswp_stoch(isubcol,i,ilev) = 0._rb tauc_stoch(isubcol,i,ilev) = 0._rb ! ssac_stoch(isubcol,i,ilev) = 1._rb ! asmc_stoch(isubcol,i,ilev) = 1._rb @@ -2644,7 +2659,7 @@ module rrtmg_lw_cldprmc ! ------------------------------------------------------------------------------ subroutine cldprmc(nlayers, inflag, iceflag, liqflag, cldfmc, & - ciwpmc, clwpmc, reicmc, relqmc, ncbands, taucmc) + ciwpmc, clwpmc, cswpmc, reicmc, relqmc, resnmc, ncbands, taucmc) ! ------------------------------------------------------------------------------ ! Purpose: Compute the cloud optical depth(s) for each cloudy layer. @@ -2662,10 +2677,14 @@ subroutine cldprmc(nlayers, inflag, iceflag, liqflag, cldfmc, & ! Dimensions: (ngptlw,nlayers) real(kind=rb), intent(in) :: clwpmc(:,:) ! cloud liquid water path [mcica] ! Dimensions: (ngptlw,nlayers) + real(kind=rb), intent(in) :: cswpmc(:,:) ! cloud snow path [mcica] + ! Dimensions: (ngptlw,nlayers) real(kind=rb), intent(in) :: relqmc(:) ! liquid particle effective radius (microns) ! Dimensions: (nlayers) real(kind=rb), intent(in) :: reicmc(:) ! ice particle effective radius (microns) ! Dimensions: (nlayers) + real(kind=rb), intent(in) :: resnmc(:) ! snow particle effective radius (microns) + ! Dimensions: (nlayers) ! specific definition of reicmc depends on setting of iceflag: ! iceflag = 0: ice effective radius, r_ec, (Ebert and Curry, 1992), ! r_ec must be >= 10.0 microns @@ -2693,13 +2712,16 @@ subroutine cldprmc(nlayers, inflag, iceflag, liqflag, cldfmc, & real(kind=rb) :: abscoice(ngptlw) ! ice absorption coefficients real(kind=rb) :: abscoliq(ngptlw) ! liquid absorption coefficients + real(kind=rb) :: abscosno(ngptlw) ! snow absorption coefficients real(kind=rb) :: cwp ! cloud water path real(kind=rb) :: radice ! cloud ice effective size (microns) real(kind=rb) :: factor ! real(kind=rb) :: fint ! real(kind=rb) :: radliq ! cloud liquid droplet radius (microns) + real(kind=rb) :: radsno ! cloud snow effective size (microns) real(kind=rb), parameter :: eps = 1.e-6_rb ! epsilon real(kind=rb), parameter :: cldmin = 1.e-20_rb ! minimum value for cloud quantities + character*80 errmess ! ------- Definitions ------- @@ -2758,7 +2780,7 @@ subroutine cldprmc(nlayers, inflag, iceflag, liqflag, cldfmc, & data icb /1,2,3,3,3,4,4,4,5, 5, 5, 5, 5, 5, 5, 5/ - hvrclc = '$Revision: 1.8 $' +!jm not thread safe hvrclc = '$Revision: 1.8 $' ncbands = 1 @@ -2773,7 +2795,7 @@ subroutine cldprmc(nlayers, inflag, iceflag, liqflag, cldfmc, & do lay = 1, nlayers do ig = 1, ngptlw - cwp = ciwpmc(ig,lay) + clwpmc(ig,lay) + cwp = ciwpmc(ig,lay) + clwpmc(ig,lay) + cswpmc(ig,lay) if (cldfmc(ig,lay) .ge. cldmin .and. & (cwp .ge. cldmin .or. taucmc(ig,lay) .ge. cldmin)) then @@ -2788,16 +2810,18 @@ subroutine cldprmc(nlayers, inflag, iceflag, liqflag, cldfmc, & ! taucmc(ig,lay) = abscld1 * cwp ! Separate treatement of ice clouds and water clouds. - elseif(inflag .eq. 2) then + elseif(inflag .ge. 2) then radice = reicmc(lay) ! Calculation of absorption coefficients due to ice clouds. - if (ciwpmc(ig,lay) .eq. 0.0_rb) then + if ((ciwpmc(ig,lay)+cswpmc(ig,lay)) .eq. 0.0_rb) then abscoice(ig) = 0.0_rb + abscosno(ig) = 0.0_rb elseif (iceflag .eq. 0) then if (radice .lt. 10.0_rb) stop 'ICE RADIUS TOO SMALL' abscoice(ig) = absice0(1) + absice0(2)/radice + abscosno(ig) = 0.0_rb elseif (iceflag .eq. 1) then if (radice .lt. 13.0_rb .or. radice .gt. 130._rb) stop & @@ -2805,6 +2829,7 @@ subroutine cldprmc(nlayers, inflag, iceflag, liqflag, cldfmc, & ncbands = 5 ib = icb(ngb(ig)) abscoice(ig) = absice1(1,ib) + absice1(2,ib)/radice + abscosno(ig) = 0.0_rb ! For iceflag=2 option, ice particle effective radius is limited to 5.0 to 131.0 microns @@ -2819,11 +2844,17 @@ subroutine cldprmc(nlayers, inflag, iceflag, liqflag, cldfmc, & abscoice(ig) = & absice2(index,ib) + fint * & (absice2(index+1,ib) - (absice2(index,ib))) + abscosno(ig) = 0.0_rb ! For iceflag=3 option, ice particle generalized effective size is limited to 5.0 to 140.0 microns - elseif (iceflag .eq. 3) then - if (radice .lt. 5.0_rb .or. radice .gt. 140.0_rb) stop 'ICE GENERALIZED EFFECTIVE SIZE OUT OF BOUNDS' + elseif (iceflag .ge. 3) then + if (radice .lt. 5.0_rb .or. radice .gt. 140.0_rb) then + write(errmess,'(A,i5,i5,f8.2,f8.2)' ) & + 'ERROR: ICE GENERALIZED EFFECTIVE SIZE OUT OF BOUNDS' & + ,ig, lay, ciwpmc(ig,lay), radice + call wrf_error_fatal(errmess) + end if ncbands = 16 factor = (radice - 2._rb)/3._rb index = int(factor) @@ -2833,8 +2864,30 @@ subroutine cldprmc(nlayers, inflag, iceflag, liqflag, cldfmc, & abscoice(ig) = & absice3(index,ib) + fint * & (absice3(index+1,ib) - (absice3(index,ib))) + abscosno(ig) = 0.0_rb endif + +!..Incorporate additional effects due to snow. + if (cswpmc(ig,lay).gt.0.0_rb .and. iceflag .eq. 5) then + radsno = resnmc(lay) + if (radsno .lt. 5.0_rb .or. radsno .gt. 140.0_rb) then + write(errmess,'(A,i5,i5,f8.2,f8.2)' ) & + 'ERROR: SNOW GENERALIZED EFFECTIVE SIZE OUT OF BOUNDS' & + ,ig, lay, cswpmc(ig,lay), radsno + call wrf_error_fatal(errmess) + end if + ncbands = 16 + factor = (radsno - 2._rb)/3._rb + index = int(factor) + if (index .eq. 46) index = 45 + fint = factor - float(index) + ib = ngb(ig) + abscosno(ig) = & + absice3(index,ib) + fint * & + (absice3(index+1,ib) - (absice3(index,ib))) + endif + ! Calculation of absorption coefficients due to water clouds. if (clwpmc(ig,lay) .eq. 0.0_rb) then @@ -2858,7 +2911,8 @@ subroutine cldprmc(nlayers, inflag, iceflag, liqflag, cldfmc, & endif taucmc(ig,lay) = ciwpmc(ig,lay) * abscoice(ig) + & - clwpmc(ig,lay) * abscoliq(ig) + clwpmc(ig,lay) * abscoliq(ig) + & + cswpmc(ig,lay) * abscosno(ig) endif endif @@ -3091,7 +3145,7 @@ subroutine rtrnmc(nlayers, istart, iend, iout, pz, semiss, ncbands, & ! htrc ! clear sky longwave heating rate (k/day) - hvrrtc = '$Revision: 1.3 $' +!jm not thread safe hvrrtc = '$Revision: 1.3 $' do ibnd = 1,nbndlw if (ibnd.eq.1 .or. ibnd.eq.4 .or. ibnd.ge.10) then @@ -3510,7 +3564,7 @@ subroutine setcoef(nlayers, istart, pavel, tavel, tz, tbound, semiss, & real(kind=rb) :: plog, fp, ft, ft1, water, scalefac, factor, compfp - hvrset = '$Revision: 1.3 $' +!jm not thread safe hvrset = '$Revision: 1.3 $' stpfac = 296._rb/1013._rb @@ -4884,11 +4938,11 @@ subroutine taumol(nlayers, pavel, wx, coldry, & ! ----- Output ----- real(kind=rb), intent(out) :: fracs(:,:) ! planck fractions - ! Dimensions: (nlayers,ngptlw) + ! Dimensions: (nlayers+1,ngptlw) real(kind=rb), intent(out) :: taug(:,:) ! gaseous optical depth - ! Dimensions: (nlayers,ngptlw) + ! Dimensions: (nlayers+1,ngptlw) - hvrtau = '$Revision: 1.7 $' +!jm not thread safe hvrtau = '$Revision: 1.7 $' ! Calculate gaseous optical depth and planck fractions for each spectral band. @@ -7856,7 +7910,7 @@ subroutine rrtmg_lw_ini(cpdair) ! BPADE Inverse of the Pade approximation constant ! - hvrini = '$Revision: 1.3 $' +!jm not thread safe hvrini = '$Revision: 1.3 $' ! Initialize model data call lwdatinit(cpdair) @@ -7971,7 +8025,7 @@ subroutine lwdatinit(cpdair) use parrrtm, only : maxxsec, maxinpx use rrlw_con, only: heatfac, grav, planck, boltz, & clight, avogad, alosmt, gascon, radcn1, radcn2, & - sbcnst, secdy + sbcnst, secdy, fluxfac, oneminus, pi use rrlw_vsn save @@ -8035,6 +8089,12 @@ subroutine lwdatinit(cpdair) ! (W cm-2 K-4) secdy = 8.6400e4_rb ! Number of seconds per day ! (s d-1) + +!jm moved here for thread safety, 20141107 + oneminus = 1._rb - 1.e-6_rb + pi = 2._rb * asin(1._rb) + fluxfac = pi * 2.e4_rb ! orig: fluxfac = pi * 2.d4 + ! ! units are generally cgs ! @@ -10529,7 +10589,7 @@ subroutine rrtmg_lw & h2ovmr ,o3vmr ,co2vmr ,ch4vmr ,n2ovmr ,o2vmr , & cfc11vmr,cfc12vmr,cfc22vmr,ccl4vmr ,emis , & inflglw ,iceflglw,liqflglw,cldfmcl , & - taucmcl ,ciwpmcl ,clwpmcl ,reicmcl ,relqmcl , & + taucmcl ,ciwpmcl ,clwpmcl , cswpmcl ,reicmcl ,relqmcl , resnmcl , & tauaer , & uflx ,dflx ,hr ,uflxc ,dflxc, hrc) @@ -10673,6 +10733,8 @@ subroutine rrtmg_lw & ! Dimensions: (ngptlw,ncol,nlay) real(kind=rb), intent(in) :: clwpmcl(:,:,:) ! In-cloud liquid water path (g/m2) ! Dimensions: (ngptlw,ncol,nlay) + real(kind=rb), intent(in) :: cswpmcl(:,:,:) ! In-cloud snow water path (g/m2) + ! Dimensions: (ngptlw,ncol,nlay) real(kind=rb), intent(in) :: reicmcl(:,:) ! Cloud ice particle effective size (microns) ! Dimensions: (ncol,nlay) ! specific definition of reicmcl depends on setting of iceflglw: @@ -10687,6 +10749,8 @@ subroutine rrtmg_lw & ! [dge = 1.0315 * r_ec] real(kind=rb), intent(in) :: relqmcl(:,:) ! Cloud water drop effective radius (microns) ! Dimensions: (ncol,nlay) + real(kind=rb), intent(in) :: resnmcl(:,:) ! Snow effective radius (microns) + ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: taucmcl(:,:,:) ! In-cloud optical depth ! Dimensions: (ngptlw,ncol,nlay) ! real(kind=rb), intent(in) :: ssacmcl(:,:,:) ! In-cloud single scattering albedo @@ -10813,8 +10877,10 @@ subroutine rrtmg_lw & real(kind=rb) :: cldfmc(ngptlw,nlay+1) ! cloud fraction [mcica] real(kind=rb) :: ciwpmc(ngptlw,nlay+1) ! in-cloud ice water path [mcica] real(kind=rb) :: clwpmc(ngptlw,nlay+1) ! in-cloud liquid water path [mcica] + real(kind=rb) :: cswpmc(ngptlw,nlay+1) ! in-cloud snow path [mcica] real(kind=rb) :: relqmc(nlay+1) ! liquid particle effective radius (microns) real(kind=rb) :: reicmc(nlay+1) ! ice particle effective size (microns) + real(kind=rb) :: resnmc(nlay+1) ! snow particle effective size (microns) real(kind=rb) :: taucmc(ngptlw,nlay+1) ! in-cloud optical depth [mcica] ! real(kind=rb) :: ssacmc(ngptlw,nlay+1) ! in-cloud single scattering albedo [mcica] ! for future expansion @@ -10836,9 +10902,9 @@ subroutine rrtmg_lw & ! ! Initializations - oneminus = 1._rb - 1.e-6_rb - pi = 2._rb * asin(1._rb) - fluxfac = pi * 2.e4_rb ! orig: fluxfac = pi * 2.d4 +!jm not thread safe oneminus = 1._rb - 1.e-6_rb +!jm not thread safe pi = 2._rb * asin(1._rb) +!jm not thread safe fluxfac = pi * 2.e4_rb ! orig: fluxfac = pi * 2.d4 istart = 1 iend = 16 iout = 0 @@ -10880,10 +10946,10 @@ subroutine rrtmg_lw & play, plev, tlay, tlev, tsfc, h2ovmr, & o3vmr, co2vmr, ch4vmr, n2ovmr, o2vmr, cfc11vmr, cfc12vmr, & cfc22vmr, ccl4vmr, emis, inflglw, iceflglw, liqflglw, & - cldfmcl, taucmcl, ciwpmcl, clwpmcl, reicmcl, relqmcl, tauaer, & + cldfmcl, taucmcl, ciwpmcl, clwpmcl, cswpmcl, reicmcl, relqmcl, resnmcl, tauaer, & nlayers, pavel, pz, tavel, tz, tbound, semiss, coldry, & wkl, wbrodl, wx, pwvcm, inflag, iceflag, liqflag, & - cldfmc, taucmc, ciwpmc, clwpmc, reicmc, relqmc, taua) + cldfmc, taucmc, ciwpmc, clwpmc, cswpmc, reicmc, relqmc, resnmc, taua) ! For cloudy atmosphere, use cldprop to set cloud optical properties based on ! input cloud physical properties. Select method based on choices described @@ -10892,7 +10958,7 @@ subroutine rrtmg_lw & ! optical depth are transferred to rrtmg_lw arrays in cldprop. call cldprmc(nlayers, inflag, iceflag, liqflag, cldfmc, ciwpmc, & - clwpmc, reicmc, relqmc, ncbands, taucmc) + clwpmc, cswpmc, reicmc, relqmc, resnmc, ncbands, taucmc) ! Calculate information needed by the radiative transfer routine ! that is specific to this atmosphere, especially some of the @@ -10975,10 +11041,10 @@ subroutine inatm (iplon, nlay, icld, iaer, & play, plev, tlay, tlev, tsfc, h2ovmr, & o3vmr, co2vmr, ch4vmr, n2ovmr, o2vmr, cfc11vmr, cfc12vmr, & cfc22vmr, ccl4vmr, emis, inflglw, iceflglw, liqflglw, & - cldfmcl, taucmcl, ciwpmcl, clwpmcl, reicmcl, relqmcl, tauaer, & + cldfmcl, taucmcl, ciwpmcl, clwpmcl, cswpmcl, reicmcl, relqmcl, resnmcl, tauaer, & nlayers, pavel, pz, tavel, tz, tbound, semiss, coldry, & wkl, wbrodl, wx, pwvcm, inflag, iceflag, liqflag, & - cldfmc, taucmc, ciwpmc, clwpmc, reicmc, relqmc, taua) + cldfmc, taucmc, ciwpmc, clwpmc, cswpmc, reicmc, relqmc, resnmc, taua) !*************************************************************************** ! ! Input atmospheric profile from GCM, and prepare it for use in RRTMG_LW. @@ -11043,10 +11109,14 @@ subroutine inatm (iplon, nlay, icld, iaer, & ! Dimensions: (ngptlw,ncol,nlay) real(kind=rb), intent(in) :: clwpmcl(:,:,:) ! In-cloud liquid water path (g/m2) ! Dimensions: (ngptlw,ncol,nlay) + real(kind=rb), intent(in) :: cswpmcl(:,:,:) ! In-cloud snow water path (g/m2) + ! Dimensions: (ngptlw,ncol,nlay) real(kind=rb), intent(in) :: relqmcl(:,:) ! Cloud water drop effective radius (microns) ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: reicmcl(:,:) ! Cloud ice effective size (microns) ! Dimensions: (ncol,nlay) + real(kind=rb), intent(in) :: resnmcl(:,:) ! Snow effective size (microns) + ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: taucmcl(:,:,:) ! In-cloud optical depth ! Dimensions: (ngptlw,ncol,nlay) real(kind=rb), intent(in) :: tauaer(:,:,:) ! Aerosol optical depth @@ -11088,10 +11158,14 @@ subroutine inatm (iplon, nlay, icld, iaer, & ! Dimensions: (ngptlw,nlay) real(kind=rb), intent(out) :: clwpmc(:,:) ! in-cloud liquid water path [mcica] ! Dimensions: (ngptlw,nlay) + real(kind=rb), intent(out) :: cswpmc(:,:) ! in-cloud snow path [mcica] + ! Dimensions: (ngptlw,nlay) real(kind=rb), intent(out) :: relqmc(:) ! liquid particle effective radius (microns) ! Dimensions: (nlay) real(kind=rb), intent(out) :: reicmc(:) ! ice particle effective size (microns) ! Dimensions: (nlay) + real(kind=rb), intent(out) :: resnmc(:) ! snow effective size (microns) + ! Dimensions: (nlay) real(kind=rb), intent(out) :: taucmc(:,:) ! in-cloud optical depth [mcica] ! Dimensions: (ngptlw,nlay) real(kind=rb), intent(out) :: taua(:,:) ! aerosol optical depth @@ -11137,8 +11211,10 @@ subroutine inatm (iplon, nlay, icld, iaer, & taucmc(:,:) = 0.0_rb ciwpmc(:,:) = 0.0_rb clwpmc(:,:) = 0.0_rb + cswpmc(:,:) = 0.0_rb reicmc(:) = 0.0_rb relqmc(:) = 0.0_rb + resnmc(:) = 0.0_rb taua(:,:) = 0.0_rb amttl = 0.0_rb wvttl = 0.0_rb @@ -11278,9 +11354,11 @@ subroutine inatm (iplon, nlay, icld, iaer, & taucmc(ig,l) = taucmcl(ig,iplon,l) ciwpmc(ig,l) = ciwpmcl(ig,iplon,l) clwpmc(ig,l) = clwpmcl(ig,iplon,l) + cswpmc(ig,l) = cswpmcl(ig,iplon,l) enddo reicmc(l) = reicmcl(iplon,l) relqmc(l) = relqmcl(iplon,l) + resnmc(l) = resnmcl(iplon,l) enddo ! If an extra layer is being used in RRTMG, set all cloud properties to zero in the extra layer. @@ -11304,7 +11382,7 @@ MODULE module_ra_rrtmg_lw #if defined(mpas) !MPAS specific (Laura D. Fowler): -use mpas_atmphys_constants,only: cp +use mpas_atmphys_constants,only: cp,g=>gravity use mpas_atmphys_o3climatology,only: vinterp_ozn !> add-ons and modifications to sourcecode: @@ -11318,12 +11396,35 @@ MODULE module_ra_rrtmg_lw !> with if (specparm1 .lt. 0.125_rb) then, in accordance with the bug fix made in WRF !> revision 3.7. !> Laura D. Fowler (birch.mmm.ucar.edu) / 2015-05-04. +!> * at the bottom of subroutine rrtmg_lwrad, changed the definition of the arrays lwupt,lwuptc, +!> lwdnt,and lwdntc so that they are now defined at the top-of-the-atmosphere. +!> Laura D. Fowler (laura@ucar.edu) / 2016-06-23. +!> * cleaned-up the subroutine rrtmg_lwrad in preparation for the implementation of the calculation of the +!> cloud optical properties when the effective radii for cloud water, cloud ice, and snow are provided by +!> the cloud microphysics schemes (note that for now, only the Thompson cloud microphysics scheme has the +!> option to calculate cloud radii). With the -g option, results are exactly the same as the original +!> subroutine. +!> Laura D. Fowler (laura@ucar.edu) / 2016-06-30. +!> * updated module_ra_rrtmg_lw.F using module_ra_rrtmg_lw.F from WRF version 3.8, namely to update the +!> calculation of the cloud optical properties to include the radiative effect of snow. +!> Laura D. Fowler (laura@ucar.edu / 2016-07-05). +!> * added the effective radii for cloud water, cloud ice, and snow calculated in the Thompson cloud +!> microphysics scheme as inputs to the subroutine rrtmg_lwrad. revised the initialization of arrays rel, +!> rei, and res, accordingly. +!> Laura D. Fowler (laura@ucar.edu) / 2016-07-07. +!> * added diagnostics of the effective radii for cloud water, cloud ice, and snow used in rrtmg_lwrad. +!> Laura D. Fowler (laura@ucar.edu) / 2016-07-08. !MPAS specific end. #else use module_model_constants, only : cp use module_wrf_error +#if (HWRF == 1) + USE module_state_description, ONLY : FER_MP_HIRES, FER_MP_HIRES_ADVECT, ETAMP_HWRF +#else + USE module_state_description, ONLY : FER_MP_HIRES, FER_MP_HIRES_ADVECT +#endif !use module_dm #endif @@ -11333,743 +11434,438 @@ MODULE module_ra_rrtmg_lw use mcica_subcol_gen_lw, only: mcica_subcol_lw real retab(95) - data retab / & - 5.92779, 6.26422, 6.61973, 6.99539, 7.39234, & - 7.81177, 8.25496, 8.72323, 9.21800, 9.74075, 10.2930, & - 10.8765, 11.4929, 12.1440, 12.8317, 13.5581, 14.2319, & - 15.0351, 15.8799, 16.7674, 17.6986, 18.6744, 19.6955, & - 20.7623, 21.8757, 23.0364, 24.2452, 25.5034, 26.8125, & - 27.7895, 28.6450, 29.4167, 30.1088, 30.7306, 31.2943, & - 31.8151, 32.3077, 32.7870, 33.2657, 33.7540, 34.2601, & - 34.7892, 35.3442, 35.9255, 36.5316, 37.1602, 37.8078, & - 38.4720, 39.1508, 39.8442, 40.5552, 41.2912, 42.0635, & - 42.8876, 43.7863, 44.7853, 45.9170, 47.2165, 48.7221, & - 50.4710, 52.4980, 54.8315, 57.4898, 60.4785, 63.7898, & - 65.5604, 71.2885, 75.4113, 79.7368, 84.2351, 88.8833, & - 93.6658, 98.5739, 103.603, 108.752, 114.025, 119.424, & - 124.954, 130.630, 136.457, 142.446, 148.608, 154.956, & - 161.503, 168.262, 175.248, 182.473, 189.952, 197.699, & - 205.728, 214.055, 222.694, 231.661, 240.971, 250.639/ + data retab / & + 5.92779, 6.26422, 6.61973, 6.99539, 7.39234, & + 7.81177, 8.25496, 8.72323, 9.21800, 9.74075, 10.2930, & + 10.8765, 11.4929, 12.1440, 12.8317, 13.5581, 14.2319, & + 15.0351, 15.8799, 16.7674, 17.6986, 18.6744, 19.6955, & + 20.7623, 21.8757, 23.0364, 24.2452, 25.5034, 26.8125, & + 27.7895, 28.6450, 29.4167, 30.1088, 30.7306, 31.2943, & + 31.8151, 32.3077, 32.7870, 33.2657, 33.7540, 34.2601, & + 34.7892, 35.3442, 35.9255, 36.5316, 37.1602, 37.8078, & + 38.4720, 39.1508, 39.8442, 40.5552, 41.2912, 42.0635, & + 42.8876, 43.7863, 44.7853, 45.9170, 47.2165, 48.7221, & + 50.4710, 52.4980, 54.8315, 57.4898, 60.4785, 63.7898, & + 65.5604, 71.2885, 75.4113, 79.7368, 84.2351, 88.8833, & + 93.6658, 98.5739, 103.603, 108.752, 114.025, 119.424, & + 124.954, 130.630, 136.457, 142.446, 148.608, 154.956, & + 161.503, 168.262, 175.248, 182.473, 189.952, 197.699, & + 205.728, 214.055, 222.694, 231.661, 240.971, 250.639/ ! save retab +#if !defined(mpas) ! For buffer layer adjustment. Steven Cavallo, Dec 2010. integer , save :: nlayers +#endif real, PARAMETER :: deltap = 4. ! Pressure interval for buffer layer in mb CONTAINS -!------------------------------------------------------------------ - SUBROUTINE RRTMG_LWRAD( & - rthratenlw, & - lwupt, lwuptc, lwdnt, lwdntc, & - lwupb, lwupbc, lwdnb, lwdnbc, & -! lwupflx, lwupflxc, lwdnflx, lwdnflxc, & - glw, olr, lwcf, emiss, & - p8w, p3d, pi3d, & - dz8w, tsk, t3d, t8w, rho3d, r, g, & - icloud, warm_rain, cldfra3d, & - f_ice_phy, f_rain_phy, & - xland, xice, snow, & - qv3d, qc3d, qr3d, & - qi3d, qs3d, qg3d, & - o3input, o33d, & - f_qv, f_qc, f_qr, f_qi, f_qs, f_qg, & - tauaerlw1,tauaerlw2,tauaerlw3,tauaerlw4, & ! czhao - tauaerlw5,tauaerlw6,tauaerlw7,tauaerlw8, & ! czhao - tauaerlw9,tauaerlw10,tauaerlw11,tauaerlw12, & ! czhao - tauaerlw13,tauaerlw14,tauaerlw15,tauaerlw16, & ! czhao - aer_ra_feedback, & !czhao -!jdfcz progn,prescribe, & !czhao - progn, & !czhao - qndrop3d,f_qndrop, & !czhao +!================================================================================================================= + subroutine rrtmg_lwrad( & + p3d,p8w,pi3d,t3d,t8w,dz8w,qv3d,qc3d,qr3d, & + qi3d,qs3d,qg3d,cldfra3d,o33d,tsk,emiss, & + xland,xice,snow,icloud,o3input,noznlevels, & + pin,o3clim,glw,olr,lwcf,rthratenlw, & + has_reqc,has_reqi,has_reqs,re_cloud, & + re_ice,re_snow,rre_cloud,rre_ice,rre_snow, & + lwupt,lwuptc,lwdnt,lwdntc, & + lwupb,lwupbc,lwdnb,lwdnbc, & + lwupflx, lwupflxc, lwdnflx, lwdnflxc, & ids,ide, jds,jde, kds,kde, & ims,ime, jms,jme, kms,kme, & - its,ite, jts,jte, kts,kte, & - lwupflx, lwupflxc, lwdnflx, lwdnflxc & -#if defined(mpas) - ,noznlevels,pin,o3clim & -#endif - ) -!------------------------------------------------------------------ - IMPLICIT NONE -!------------------------------------------------------------------ - LOGICAL, INTENT(IN ) :: warm_rain -! - INTEGER, INTENT(IN ) :: ids,ide, jds,jde, kds,kde, & - ims,ime, jms,jme, kms,kme, & - its,ite, jts,jte, kts,kte - - INTEGER, INTENT(IN ) :: ICLOUD -! - REAL, DIMENSION( ims:ime, kms:kme, jms:jme ) , & - INTENT(IN ) :: dz8w, & - t3d, & - t8w, & - p8w, & - p3d, & - pi3d, & - rho3d - - REAL, DIMENSION( ims:ime, kms:kme, jms:jme ) , & - INTENT(INOUT) :: RTHRATENLW - - REAL, DIMENSION( ims:ime, jms:jme ) , & - INTENT(INOUT) :: GLW, & - OLR, & - LWCF - - REAL, DIMENSION( ims:ime, jms:jme ) , & - INTENT(IN ) :: EMISS, & - TSK - - REAL, INTENT(IN ) :: R,G - - REAL, DIMENSION( ims:ime, jms:jme ) , & - INTENT(IN ) :: XLAND, & - XICE, & - SNOW -! -! Optional -! - REAL, DIMENSION( ims:ime, kms:kme, jms:jme ) , & - OPTIONAL , & - INTENT(IN ) :: & - CLDFRA3D, & - QV3D, & - QC3D, & - QR3D, & - QI3D, & - QS3D, & - QG3D, & - QNDROP3D - real pi,third,relconst,lwpmin,rhoh2o - - REAL, DIMENSION( ims:ime, kms:kme, jms:jme ) , & - OPTIONAL , & - INTENT(IN ) :: & - F_ICE_PHY, & - F_RAIN_PHY - - LOGICAL, OPTIONAL, INTENT(IN) :: & - F_QV,F_QC,F_QR,F_QI,F_QS,F_QG,F_QNDROP -! Optional - REAL, DIMENSION( ims:ime, kms:kme, jms:jme ), OPTIONAL , & - INTENT(IN ) :: tauaerlw1,tauaerlw2,tauaerlw3,tauaerlw4, & ! czhao - tauaerlw5,tauaerlw6,tauaerlw7,tauaerlw8, & ! czhao - tauaerlw9,tauaerlw10,tauaerlw11,tauaerlw12, & ! czhao - tauaerlw13,tauaerlw14,tauaerlw15,tauaerlw16 - - INTEGER, INTENT(IN ), OPTIONAL :: aer_ra_feedback -!jdfcz INTEGER, INTENT(IN ), OPTIONAL :: progn,prescribe - INTEGER, INTENT(IN ), OPTIONAL :: progn -! Ozone - REAL, DIMENSION( ims:ime, kms:kme, jms:jme ) , & - OPTIONAL , & - INTENT(IN ) :: O33D - INTEGER, OPTIONAL, INTENT(IN ) :: o3input - - real, parameter :: thresh=1.e-9 - real slope - character(len=200) :: msg - - -! Top of atmosphere and surface longwave fluxes (W m-2) - REAL, DIMENSION( ims:ime, jms:jme ), & - OPTIONAL, INTENT(INOUT) :: & - LWUPT,LWUPTC,LWDNT,LWDNTC, & - LWUPB,LWUPBC,LWDNB,LWDNBC - -! Layer longwave fluxes (including extra layer above model top) -! Vertical ordering is from bottom to top (W m-2) - REAL, DIMENSION( ims:ime, kms:kme+2, jms:jme ), & - OPTIONAL, INTENT(OUT) :: & - LWUPFLX,LWUPFLXC,LWDNFLX,LWDNFLXC - -! LOCAL VARS + its,ite, jts,jte, kts,kte & + ) + + implicit none + +!================================================================================================================= + +!--- input arguments: + integer,intent(in):: ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + its,ite, jts,jte, kts,kte + integer,intent(in):: icloud,has_reqc,has_reqi,has_reqs + integer,intent(in),optional:: o3input + + real,intent(in),dimension(ims:ime,jms:jme):: emiss,tsk,snow,xice,xland + real,intent(in),dimension(ims:ime,kms:kme,jms:jme):: t3d,p3d,pi3d + real,intent(in),dimension(ims:ime,kms:kme,jms:jme):: dz8w,p8w,t8w + + real,intent(in),dimension(ims:ime,kms:kme,jms:jme):: re_cloud,re_ice,re_snow + real,intent(in),dimension(ims:ime,kms:kme,jms:jme),optional:: & + cldfra3d,qv3d,qc3d,qr3d,qi3d,qs3d,qg3d,o33d + +!--- additional input arguments to use the CAM ozone climatology: + integer,intent(in):: noznlevels + real,intent(in),dimension(1:noznlevels),optional:: pin + real,intent(in),dimension(ims:ime,1:noznlevels,jms:jme),optional:: o3clim + +!--- inout arguments: + real,intent(inout),dimension(ims:ime,jms:jme):: glw,olr,lwcf + real,intent(inout),dimension(ims:ime,jms:jme),optional:: & + lwupt,lwuptc,lwdnt,lwdntc,lwupb,lwupbc,lwdnb,lwdnbc + + real,intent(inout),dimension(ims:ime,kms:kme,jms:jme):: rthratenlw + +!--- output arguments: + real,intent(out),dimension(ims:ime,kms:kme,jms:jme),optional:: & + rre_cloud,rre_ice,rre_snow + real,intent(out),dimension(ims:ime,kms:kme+2,jms:jme ),optional:: & + lwupflx,lwupflxc,lwdnflx,lwdnflxc + +!local variables and arrays: + integer:: nb,ncol,nlay,icld,inflglw,iceflglw,liqflglw + integer:: iplon,irng,permuteseed + integer:: pcols,pver + integer:: idx_rei + integer:: i,j,k,kk,n - REAL, DIMENSION( kts:kte+1 ) :: Pw1D, & - Tw1D - - REAL, DIMENSION( kts:kte ) :: TTEN1D, & - CLDFRA1D, & - DZ1D, & - P1D, & - T1D, & - QV1D, & - QC1D, & - QR1D, & - QI1D, & - QS1D, & - QG1D, & - O31D, & - qndrop1d - -! Added local arrays for RRTMG - integer :: ncol, & - nlay, & - icld, & - inflglw, & - iceflglw, & - liqflglw + real:: ro,dz + real:: corr + real:: gliqwp,gicewp,gsnowp,gravmks + real:: snow_mass_factor + real,dimension(1):: tsfc,landfrac,landm,snowh,icefrac + real,dimension(1,1:kte-kts+1):: pdel,cliqwp,cicewp,csnowp,reliq,reice,resnow + real,dimension(1,nbndlw):: emis + + real,dimension(kts:kte):: & + tten1d,cldfra1d,dz1d,p1d,t1d,qv1d,qc1d,qr1d,qi1d,qs1d,qg1d,o31d + real,dimension(kts:kte+1):: pw1d,tw1d + +!--- additional local variables and arrays needed to include additional layers between the model top +! and the top of the atmosphere: + integer,dimension(its:ite,jts:jte):: mpas_nlay -#if defined(mpas) -!MPAS specific (Laura D. Fowler - 2013-03-15): Letting the arrays below to be -!allocatable allows the number of layers added above the model-top to vary as -!function of the grid-point. This is needed because the MPAS vertical coordi- -!nate system is a height coordinate system, and model-tops vary between grid- -!points: - integer,dimension(ims:ime,jms:jme):: mpas_nlay real,dimension(:),allocatable:: o3mmr,varint real,dimension(:,:),allocatable:: & plev,tlev,play,tlay,h2ovmr,o3vmr,co2vmr,o2vmr,ch4vmr,n2ovmr,cfc11vmr, & - cfc12vmr,cfc22vmr,ccl4vmr,clwpth,ciwpth,rel,rei,cldfrac,relqmcl,reicmcl - real,dimension(:,:),allocatable:: & - uflx,dflx,uflxc,dflxc,hr,hrc - real,dimension(:,:,:),allocatable:: & - taucld,cldfmcl,clwpmcl,ciwpmcl,taucmcl,tauaer -!For old cloud property specification for rrtm_lw - real,dimension(kts:kte):: clwp,ciwp,plwp,piwp -!Surface emissivity (for 16 LW spectral bands) - real,dimension(1,nbndlw):: emis + cfc12vmr,cfc22vmr,ccl4vmr,clwpth,ciwpth,cswpth,rel,rei,res,cldfrac,relqmcl,reicmcl,resnmcl + real,dimension(:,:),allocatable:: uflx,dflx,uflxc,dflxc,hr,hrc + real,dimension(:,:,:),allocatable:: taucld,cldfmcl,clwpmcl,ciwpmcl,cswpmcl,taucmcl,tauaer -!MPAS specific (Laura D. Fowler - 2013-04-16): Added the CAM ozone climatology. -!input arguments: - integer,intent(in):: noznlevels - real,intent(in),dimension(1:noznlevels),optional:: pin - real,intent(in),dimension(ims:ime,1:noznlevels,jms:jme),optional:: o3clim -!local variables: +!--- additional local variables and arrays needed for the CAM ozone climatologyL real,dimension(1:noznlevels):: o3clim1d -!end MPAS specific. - -#else -! Dimension with extra layer from model top to TOA - real, dimension( 1, kts:nlayers+1 ) :: plev, & - tlev - real, dimension( 1, kts:nlayers ) :: play, & - tlay, & - h2ovmr, & - o3vmr, & - co2vmr, & - o2vmr, & - ch4vmr, & - n2ovmr, & - cfc11vmr, & - cfc12vmr, & - cfc22vmr, & - ccl4vmr - real, dimension( kts:nlayers ) :: o3mmr -! For old cloud property specification for rrtm_lw - real, dimension( kts:kte ) :: clwp, & - ciwp, & - plwp, & - piwp -! Surface emissivity (for 16 LW spectral bands) - real, dimension( 1, nbndlw ) :: emis -! Dimension with extra layer from model top to TOA, -! though no clouds are allowed in extra layer - real, dimension( 1, kts:nlayers ) :: clwpth, & - ciwpth, & - rel, & - rei, & - cldfrac, & - relqmcl, & - reicmcl - real, dimension( nbndlw, 1, kts:nlayers ) :: taucld - real, dimension( ngptlw, 1, kts:nlayers ) :: cldfmcl, & - clwpmcl, & - ciwpmcl, & - taucmcl - real, dimension( 1, kts:nlayers, nbndlw ) :: tauaer - -! Output arrays contain extra layer from model top to TOA - real, dimension( 1, kts:nlayers+1 ) :: uflx, & - dflx, & - uflxc, & - dflxc - real, dimension( 1, kts:nlayers ) :: hr, & - hrc +#if defined(mpas) +!MPAS specific (Dom Heinzeller): + integer:: nlayers #endif - real, dimension ( 1 ) :: tsfc, & - ps - real :: ro, & - dz - - -! Set trace gas volume mixing ratios, 2005 values, IPCC (2007) -! carbon dioxide (379 ppmv) - real :: co2 - data co2 / 379.e-6 / -! methane (1774 ppbv) - real :: ch4 - data ch4 / 1774.e-9 / -! nitrous oxide (319 ppbv) - real :: n2o - data n2o / 319.e-9 / -! cfc-11 (251 ppt) - real :: cfc11 - data cfc11 / 0.251e-9 / -! cfc-12 (538 ppt) - real :: cfc12 - data cfc12 / 0.538e-9 / -! cfc-22 (169 ppt) - real :: cfc22 - data cfc22 / 0.169e-9 / -! ccl4 (93 ppt) - real :: ccl4 - data ccl4 / 0.093e-9 / -! Set oxygen volume mixing ratio (for o2mmr=0.23143) - real :: o2 - data o2 / 0.209488 / - - integer :: iplon, irng, permuteseed - integer :: nb - -! For old cloud property specification for rrtm_lw -! Cloud and precipitation absorption coefficients - real :: abcw,abice,abrn,absn - data abcw /0.144/ - data abice /0.0735/ - data abrn /0.330e-3/ - data absn /2.34e-3/ - -! Molecular weights and ratios for converting mmr to vmr units -! real :: amd ! Effective molecular weight of dry air (g/mol) -! real :: amw ! Molecular weight of water vapor (g/mol) -! real :: amo ! Molecular weight of ozone (g/mol) -! real :: amo2 ! Molecular weight of oxygen (g/mol) +!--- set trace gas volume mixing ratios, 2005 values, IPCC (2007): +!carbon dioxide (379 ppmv) + real :: co2 + data co2 / 379.e-6 / +!methane (1774 ppbv) + real :: ch4 + data ch4 / 1774.e-9 / +!nitrous oxide (319 ppbv) + real :: n2o + data n2o / 319.e-9 / +!cfc-11 (251 ppt) + real :: cfc11 + data cfc11 / 0.251e-9 / +!cfc-12 (538 ppt) + real :: cfc12 + data cfc12 / 0.538e-9 / +!cfc-22 (169 ppt) + real :: cfc22 + data cfc22 / 0.169e-9 / +!ccl4 (93 ppt) + real :: ccl4 + data ccl4 / 0.093e-9 / + +!--- set oxygen volume mixing ratio (for o2mmr=0.23143): + real :: o2 + data o2 / 0.209488 / + +!--- molecular weights and ratios for converting mmr to vmr units +! real :: amd ! Effective molecular weight of dry air (g/mol) +! real :: amw ! Molecular weight of water vapor (g/mol) +! real :: amo ! Molecular weight of ozone (g/mol) +! real :: amo2 ! Molecular weight of oxygen (g/mol) ! Atomic weights for conversion from mass to volume mixing ratios -! data amd / 28.9660 / -! data amw / 18.0160 / -! data amo / 47.9998 / -! data amo2 / 31.9999 / +! data amd / 28.9660 / +! data amw / 18.0160 / +! data amo / 47.9998 / +! data amo2 / 31.9999 / - real :: amdw ! Molecular weight of dry air / water vapor - real :: amdo ! Molecular weight of dry air / ozone - real :: amdo2 ! Molecular weight of dry air / oxygen - data amdw / 1.607793 / - data amdo / 0.603461 / - data amdo2 / 0.905190 / + real :: amdw ! Molecular weight of dry air / water vapor + real :: amdo ! Molecular weight of dry air / ozone + real :: amdo2 ! Molecular weight of dry air / oxygen + data amdw / 1.607793 / + data amdo / 0.603461 / + data amdo2 / 0.905190 / -!! - real, dimension( 1, 1:kte-kts+1 ) :: pdel ! Layer pressure thickness (mb) - - real, dimension(1, 1:kte-kts+1) :: cicewp, & ! in-cloud cloud ice water path - cliqwp, & ! in-cloud cloud liquid water path - reliq, & ! effective drop radius (microns) - reice ! ice effective drop size (microns) - real :: gliqwp, gicewp, gravmks +!--- added for top of model adjustment. Steven Cavallo NCAR/MMM December 2010 + integer,parameter:: nproflevs = 60 ! Constant, from the table + integer:: klev + + real:: wght,vark,vark1 + real,dimension(nproflevs):: pprof,tprof + +!--- weighted mean pressure and temperature profiles from midlatitude summer (MLS), +! midlatitude winter (MLW), sub-Arctic winter (SAW),sub-Arctic summer (SAS), +! and tropical (TROP) standard atmospheres. + data pprof /1000.00,855.47,731.82,626.05,535.57,458.16, & + 391.94,335.29,286.83,245.38,209.91,179.57, & + 153.62,131.41,112.42, 96.17, 82.27, 70.38, & + 60.21, 51.51, 44.06, 37.69, 32.25, 27.59, & + 23.60, 20.19, 17.27, 14.77, 12.64, 10.81, & + 9.25, 7.91, 6.77, 5.79, 4.95, 4.24, & + 3.63, 3.10, 2.65, 2.27, 1.94, 1.66, & + 1.42, 1.22, 1.04, 0.89, 0.76, 0.65, & + 0.56, 0.48, 0.41, 0.35, 0.30, 0.26, & + 0.22, 0.19, 0.16, 0.14, 0.12, 0.10/ + data tprof /286.96,281.07,275.16,268.11,260.56,253.02, & + 245.62,238.41,231.57,225.91,221.72,217.79, & + 215.06,212.74,210.25,210.16,210.69,212.14, & + 213.74,215.37,216.82,217.94,219.03,220.18, & + 221.37,222.64,224.16,225.88,227.63,229.51, & + 231.50,233.73,236.18,238.78,241.60,244.44, & + 247.35,250.33,253.32,256.30,259.22,262.12, & + 264.80,266.50,267.59,268.44,268.69,267.76, & + 266.13,263.96,261.54,258.93,256.15,253.23, & + 249.89,246.67,243.48,240.25,236.66,233.86/ + +!----------------------------------------------------------------------------------------------------------------- + +!--- all fields are ordered vertically from bottom to top (pressures are in mb): + ncol = 1 + +!--- initialize option for the calculation of the cloud optical properties: + icld = 2 ! with clouds using maximum/random cloud overlap in subroutine mcica_subcol_lw. + inflglw = 2 + iceflglw = 3 + liqflglw = 1 + +!--- latitude loop: + j_loop: do j = jts,jte + +!--- longitude loop: + i_loop: do i = its,ite + + !--- set surface emissivity in each RRTMG longwave band: + do nb = 1, nbndlw + emis(ncol,nb) = emiss(i,j) + enddo -! -! REAL :: TSFC,GLW0,OLR0,EMISS0,FP - real, dimension (1) :: landfrac, landm, snowh, icefrac - integer :: pcols, pver + !--- INITIALIZE COLUMN SOUNDING (the call to the long wave radiation code is done one column at a time + ! which is why we set ncol = 1 above): + do k = kts, kte+1 + pw1d(k) = p8w(i,k,j) / 100. + tw1d(k) = t8w(i,k,j) + enddo -! - INTEGER :: i,j,K - LOGICAL :: predicate + do k = kts, kte + p1d(k) = p3d(i,k,j) / 100. + dz1d(k) = dz8w(i,k,j) + t1d(k) = t3d(i,k,j) + qv1d(k) = amax1(qv3d(i,k,j),1.e-12) + o31d(k) = 0. + qc1d(k) = 0. + qr1d(k) = 0. + qi1d(k) = 0. + qs1d(k) = 0. + qg1d(k) = 0. + cldfra1d(k) = 0. + if(present(o33d)) o31d(k) = o33d(i,k,j) + enddo -! Added for top of model adjustment. Steven Cavallo NCAR/MMM December 2010 - INTEGER, PARAMETER :: nproflevs = 60 ! Constant, from the table - INTEGER :: L, LL, klev ! Loop indices -#if !defined(mpas) - REAL, DIMENSION( kts:nlayers+1 ) :: varint -#endif - REAL :: wght,vark,vark1 - REAL :: PPROF(nproflevs), TPROF(nproflevs) - ! Weighted mean pressure and temperature profiles from midlatitude - ! summer (MLS),midlatitude winter (MLW), sub-Arctic - ! winter (SAW),sub-Arctic summer (SAS), and tropical (TROP) - ! standard atmospheres. - DATA PPROF /1000.00,855.47,731.82,626.05,535.57,458.16, & - 391.94,335.29,286.83,245.38,209.91,179.57, & - 153.62,131.41,112.42,96.17,82.27,70.38, & - 60.21,51.51,44.06,37.69,32.25,27.59, & - 23.60,20.19,17.27,14.77,12.64,10.81, & - 9.25,7.91,6.77,5.79,4.95,4.24, & - 3.63,3.10,2.65,2.27,1.94,1.66, & - 1.42,1.22,1.04,0.89,0.76,0.65, & - 0.56,0.48,0.41,0.35,0.30,0.26, & - 0.22,0.19,0.16,0.14,0.12,0.10/ - DATA TPROF /286.96,281.07,275.16,268.11,260.56,253.02, & - 245.62,238.41,231.57,225.91,221.72,217.79, & - 215.06,212.74,210.25,210.16,210.69,212.14, & - 213.74,215.37,216.82,217.94,219.03,220.18, & - 221.37,222.64,224.16,225.88,227.63,229.51, & - 231.50,233.73,236.18,238.78,241.60,244.44, & - 247.35,250.33,253.32,256.30,259.22,262.12, & - 264.80,266.50,267.59,268.44,268.69,267.76, & - 266.13,263.96,261.54,258.93,256.15,253.23, & - 249.89,246.67,243.48,240.25,236.66,233.86/ -!------------------------------------------------------------------ -#ifdef WRF_CHEM - IF ( aer_ra_feedback == 1) then - IF ( .NOT. & - ( PRESENT(tauaerlw1) .AND. & - PRESENT(tauaerlw2) .AND. & - PRESENT(tauaerlw3) .AND. & - PRESENT(tauaerlw4) .AND. & - PRESENT(tauaerlw5) .AND. & - PRESENT(tauaerlw6) .AND. & - PRESENT(tauaerlw7) .AND. & - PRESENT(tauaerlw8) .AND. & - PRESENT(tauaerlw9) .AND. & - PRESENT(tauaerlw10) .AND. & - PRESENT(tauaerlw11) .AND. & - PRESENT(tauaerlw12) .AND. & - PRESENT(tauaerlw13) .AND. & - PRESENT(tauaerlw14) .AND. & - PRESENT(tauaerlw15) .AND. & - PRESENT(tauaerlw16) ) ) THEN - CALL wrf_error_fatal & - ('Warning: missing fields required for aerosol radiation' ) - ENDIF - ENDIF -#endif + !--- initialize the local arrays containing the different cloud water and ice condenstates: + if(icloud .gt. 0) then + do k = kts,kte + if(present(qc3d)) qc1d(k) = amax1(qc3d(i,k,j),0.) + if(present(qr3d)) qr1d(k) = amax1(qr3d(i,k,j),0.) + if(present(qi3d)) qi1d(k) = amax1(qi3d(i,k,j),0.) + if(present(qs3d)) qs1d(k) = amax1(qs3d(i,k,j),0.) + if((present(qc3d) .or. present(qi3d) .or. present(qs3d)) .and. present(cldfra3d)) & + cldfra1d(k) = cldfra3d(i,k,j) + enddo + endif -!-----CALCULATE LONG WAVE RADIATION -! -! All fields are ordered vertically from bottom to top -! Pressures are in mb - -! latitude loop - j_loop: do j = jts,jte - -! longitude loop - i_loop: do i = its,ite - - do k=kts,kte+1 - Pw1D(K) = p8w(I,K,J)/100. - Tw1D(K) = t8w(I,K,J) - enddo - - DO K=kts,kte - QV1D(K)=0. - QC1D(K)=0. - QR1D(K)=0. - QI1D(K)=0. - QS1D(K)=0. - CLDFRA1D(k)=0. - ENDDO - - DO K=kts,kte - QV1D(K)=QV3D(I,K,J) - QV1D(K)=max(0.,QV1D(K)) - IF ( PRESENT( O33D ) ) THEN - O31D(K)=O33D(I,K,J) - ENDIF - ENDDO - - DO K=kts,kte - TTEN1D(K)=0. - T1D(K)=T3D(I,K,J) - P1D(K)=P3D(I,K,J)/100. - DZ1D(K)=dz8w(I,K,J) - ENDDO - -! moist variables - - IF (ICLOUD .ne. 0) THEN - IF ( PRESENT( CLDFRA3D ) ) THEN - DO K=kts,kte - CLDFRA1D(k)=CLDFRA3D(I,K,J) - ENDDO - ENDIF - - IF (PRESENT(F_QC) .AND. PRESENT(QC3D)) THEN - IF ( F_QC) THEN - DO K=kts,kte - QC1D(K)=QC3D(I,K,J) - QC1D(K)=max(0.,QC1D(K)) - ENDDO - ENDIF - ENDIF - - IF (PRESENT(F_QR) .AND. PRESENT(QR3D)) THEN - IF ( F_QR) THEN - DO K=kts,kte - QR1D(K)=QR3D(I,K,J) - QR1D(K)=max(0.,QR1D(K)) - ENDDO - ENDIF - ENDIF - - IF ( PRESENT(F_QNDROP).AND.PRESENT(QNDROP3D)) THEN - IF (F_QNDROP) THEN - DO K=kts,kte - qndrop1d(K)=qndrop3d(I,K,J) - ENDDO - ENDIF - ENDIF - -! This logic is tortured because cannot test F_QI unless -! it is present, and order of evaluation of expressions -! is not specified in Fortran - - IF ( PRESENT ( F_QI ) ) THEN - predicate = F_QI - ELSE - predicate = .FALSE. - ENDIF - -! For MP option 3 - IF (.NOT. predicate .and. .not. warm_rain) THEN - DO K=kts,kte - IF (T1D(K) .lt. 273.15) THEN - QI1D(K)=QC1D(K) - QS1D(K)=QR1D(K) - QC1D(K)=0. - QR1D(K)=0. - ENDIF - ENDDO - ENDIF - - IF (PRESENT(F_QI) .AND. PRESENT(QI3D)) THEN - IF (F_QI) THEN - DO K=kts,kte - QI1D(K)=QI3D(I,K,J) - QI1D(K)=max(0.,QI1D(K)) - ENDDO - ENDIF - ENDIF - - IF (PRESENT(F_QS) .AND. PRESENT(QS3D)) THEN - IF (F_QS) THEN - DO K=kts,kte - QS1D(K)=QS3D(I,K,J) - QS1D(K)=max(0.,QS1D(K)) - ENDDO - ENDIF - ENDIF - - IF (PRESENT(F_QG) .AND. PRESENT(QG3D)) THEN - IF (F_QG) THEN - DO K=kts,kte - QG1D(K)=QG3D(I,K,J) - QG1D(K)=max(0.,QG1D(K)) - ENDDO - ENDIF - ENDIF - -! mji - For MP option 5 - IF ( PRESENT(F_QI) .and. PRESENT(F_QC) .and. PRESENT(F_QS) .and. PRESENT(F_ICE_PHY) ) THEN - IF ( F_QC .and. .not. F_QI .and. F_QS ) THEN - DO K=kts,kte - qi1d(k) = qs3d(i,k,j) - qc1d(k) = qc3d(i,k,j) - qi1d(k) = max(0.,qi1d(k)) - qc1d(k) = max(0.,qc1d(k)) - ENDDO - ENDIF - ENDIF - - ENDIF - -! EMISS0=EMISS(I,J) -! GLW0=0. -! OLR0=0. -! TSFC=TSK(I,J) - DO K=kts,kte - QV1D(K)=AMAX1(QV1D(K),1.E-12) - ENDDO - -! Set up input for longwave - ncol = 1 -! Add extra layer from top of model to top of atmosphere -! nlay = (kte - kts + 1) + 1 -! Edited for top of model adjustment (nlayers = kte + 1). + !--- initialize the local radiative heating rate: + do k = kts, kte + tten1d(k) = 0. + enddo -#if defined(mpas) -!MPAS specific (Laura D. Fowler - 2013-04-11): -!In contrast to WRF, MPAS columns have different model top presssures because MPAS uses a height -!coordinate system. Therefore, we define nlayers for each individual column: - nlayers = kte + nint(pw1d(kte+1)/deltap) - mpas_nlay(i,j) = nlayers-kte -! write(0,101) j,i,kme,kte,nlayers,mpas_nlay(i,j),pw1d(kte+1) -! 101 format(6i9,1x,f9.4) - if(.not.allocated(o3mmr) ) allocate(o3mmr(kts:nlayers) ) - if(.not.allocated(varint) ) allocate(varint(kts:nlayers+1) ) - if(.not.allocated(plev) ) allocate(plev(1,kts:nlayers+1) ) - if(.not.allocated(tlev) ) allocate(tlev(1,kts:nlayers+1) ) - if(.not.allocated(play) ) allocate(play(1,kts:nlayers) ) - if(.not.allocated(tlay) ) allocate(tlay(1,kts:nlayers) ) - if(.not.allocated(h2ovmr) ) allocate(h2ovmr(1,kts:nlayers) ) - if(.not.allocated(o3vmr) ) allocate(o3vmr(1,kts:nlayers) ) - if(.not.allocated(co2vmr) ) allocate(co2vmr(1,kts:nlayers) ) - if(.not.allocated(o2vmr) ) allocate(o2vmr(1,kts:nlayers) ) - if(.not.allocated(ch4vmr) ) allocate(ch4vmr(1,kts:nlayers) ) - if(.not.allocated(n2ovmr) ) allocate(n2ovmr(1,kts:nlayers) ) - if(.not.allocated(cfc11vmr)) allocate(cfc11vmr(1,kts:nlayers)) - if(.not.allocated(cfc12vmr)) allocate(cfc12vmr(1,kts:nlayers)) - if(.not.allocated(cfc22vmr)) allocate(cfc22vmr(1,kts:nlayers)) - if(.not.allocated(ccl4vmr) ) allocate(ccl4vmr(1,kts:nlayers) ) - if(.not.allocated(clwpth) ) allocate(clwpth(1,kts:nlayers) ) - if(.not.allocated(ciwpth) ) allocate(ciwpth(1,kts:nlayers) ) - if(.not.allocated(rel) ) allocate(rel(1,kts:nlayers) ) - if(.not.allocated(rei) ) allocate(rei(1,kts:nlayers) ) - if(.not.allocated(cldfrac) ) allocate(cldfrac(1,kts:nlayers) ) - if(.not.allocated(relqmcl) ) allocate(relqmcl(1,kts:nlayers) ) - if(.not.allocated(reicmcl) ) allocate(reicmcl(1,kts:nlayers) ) - if(.not.allocated(uflx) ) allocate(uflx(1,kts:nlayers+1) ) - if(.not.allocated(dflx) ) allocate(dflx(1,kts:nlayers+1) ) - if(.not.allocated(uflxc) ) allocate(uflxc(1,kts:nlayers+1) ) - if(.not.allocated(dflxc) ) allocate(dflxc(1,kts:nlayers+1) ) - if(.not.allocated(hr) ) allocate(hr(1,kts:nlayers) ) - if(.not.allocated(hrc) ) allocate(hrc(1,kts:nlayers) ) - if(.not.allocated(taucld) ) allocate(taucld(nbndlw,1,kts:nlayers) ) - if(.not.allocated(cldfmcl) ) allocate(cldfmcl(ngptlw,1,kts:nlayers)) - if(.not.allocated(clwpmcl) ) allocate(clwpmcl(ngptlw,1,kts:nlayers)) - if(.not.allocated(ciwpmcl) ) allocate(ciwpmcl(ngptlw,1,kts:nlayers)) - if(.not.allocated(taucmcl) ) allocate(taucmcl(ngptlw,1,kts:nlayers)) - if(.not.allocated(tauaer) ) allocate(tauaer(1,kts:nlayers,nbndlw) ) -#endif + !--- add extra layers to include absorption and transmission between the top of the model and the top of + ! the atmosphere: in contrast to WRF, MPAS columns have different model-top pressures since MPAS uses + ! a height coordinate system. Therefore, we define nlayers for each individual column: + nlayers = kte + max(nint(pw1d(kte+1)/deltap), 1) + mpas_nlay(i,j) = nlayers-kte +! write(0,101) j,i,kme,kte,nlayers,mpas_nlay(i,j),pw1d(kte+1),pw1d(kte+1)-mpas_nlay(i,j)*deltap +! 101 format(6i9,3(1x,f9.4)) + if(.not.allocated(o3mmr) ) allocate(o3mmr(kts:nlayers) ) + if(.not.allocated(varint) ) allocate(varint(kts:nlayers+1) ) + if(.not.allocated(plev) ) allocate(plev(1,kts:nlayers+1) ) + if(.not.allocated(tlev) ) allocate(tlev(1,kts:nlayers+1) ) + if(.not.allocated(play) ) allocate(play(1,kts:nlayers) ) + if(.not.allocated(tlay) ) allocate(tlay(1,kts:nlayers) ) + if(.not.allocated(h2ovmr) ) allocate(h2ovmr(1,kts:nlayers) ) + if(.not.allocated(o3vmr) ) allocate(o3vmr(1,kts:nlayers) ) + if(.not.allocated(co2vmr) ) allocate(co2vmr(1,kts:nlayers) ) + if(.not.allocated(o2vmr) ) allocate(o2vmr(1,kts:nlayers) ) + if(.not.allocated(ch4vmr) ) allocate(ch4vmr(1,kts:nlayers) ) + if(.not.allocated(n2ovmr) ) allocate(n2ovmr(1,kts:nlayers) ) + if(.not.allocated(cfc11vmr)) allocate(cfc11vmr(1,kts:nlayers)) + if(.not.allocated(cfc12vmr)) allocate(cfc12vmr(1,kts:nlayers)) + if(.not.allocated(cfc22vmr)) allocate(cfc22vmr(1,kts:nlayers)) + if(.not.allocated(ccl4vmr) ) allocate(ccl4vmr(1,kts:nlayers) ) + if(.not.allocated(clwpth) ) allocate(clwpth(1,kts:nlayers) ) + if(.not.allocated(ciwpth) ) allocate(ciwpth(1,kts:nlayers) ) + if(.not.allocated(cswpth) ) allocate(cswpth(1,kts:nlayers) ) + if(.not.allocated(rel) ) allocate(rel(1,kts:nlayers) ) + if(.not.allocated(rei) ) allocate(rei(1,kts:nlayers) ) + if(.not.allocated(res) ) allocate(res(1,kts:nlayers) ) + if(.not.allocated(cldfrac) ) allocate(cldfrac(1,kts:nlayers) ) + if(.not.allocated(relqmcl) ) allocate(relqmcl(1,kts:nlayers) ) + if(.not.allocated(reicmcl) ) allocate(reicmcl(1,kts:nlayers) ) + if(.not.allocated(resnmcl) ) allocate(resnmcl(1,kts:nlayers) ) + if(.not.allocated(uflx) ) allocate(uflx(1,kts:nlayers+1) ) + if(.not.allocated(dflx) ) allocate(dflx(1,kts:nlayers+1) ) + if(.not.allocated(uflxc) ) allocate(uflxc(1,kts:nlayers+1) ) + if(.not.allocated(dflxc) ) allocate(dflxc(1,kts:nlayers+1) ) + if(.not.allocated(hr) ) allocate(hr(1,kts:nlayers) ) + if(.not.allocated(hrc) ) allocate(hrc(1,kts:nlayers) ) + if(.not.allocated(taucld) ) allocate(taucld(nbndlw,1,kts:nlayers) ) + if(.not.allocated(cldfmcl) ) allocate(cldfmcl(ngptlw,1,kts:nlayers)) + if(.not.allocated(clwpmcl) ) allocate(clwpmcl(ngptlw,1,kts:nlayers)) + if(.not.allocated(ciwpmcl) ) allocate(ciwpmcl(ngptlw,1,kts:nlayers)) + if(.not.allocated(cswpmcl) ) allocate(cswpmcl(ngptlw,1,kts:nlayers)) + if(.not.allocated(taucmcl) ) allocate(taucmcl(ngptlw,1,kts:nlayers)) + if(.not.allocated(tauaer) ) allocate(tauaer(1,kts:nlayers,nbndlw) ) + + nlay = nlayers ! keep these indices the same. + + !--- initialize local arrays called in the calculation of the cloud optical properties and radiative + ! fluxes: + do n = 1, ncol + do k = kts, kte + reliq(ncol,k) = 10. + reice(ncol,k) = 10. + resnow(ncol,k) = 10. + cliqwp(ncol,k) = 0. + cicewp(ncol,k) = 0. + csnowp(ncol,k) = 0. + enddo -! Steven Cavallo, December 2010 - nlay = nlayers ! Keep these indices the same - - -! Select cloud liquid and ice optics parameterization options -! For passing in cloud optical properties directly: -! icld = 2 -! inflglw = 0 -! iceflglw = 0 -! liqflglw = 0 -! For passing in cloud physical properties; cloud optics parameterized in RRTMG: - icld = 2 - inflglw = 2 - iceflglw = 3 - liqflglw = 1 - -! Layer indexing goes bottom to top here for all fields. -! Water vapor and ozone are converted from mmr to vmr. -! Pressures are in units of mb here. - plev(ncol,1) = pw1d(1) - tlev(ncol,1) = tw1d(1) - tsfc(ncol) = tsk(i,j) - do k = kts, kte - play(ncol,k) = p1d(k) - plev(ncol,k+1) = pw1d(k+1) - pdel(ncol,k) = plev(ncol,k) - plev(ncol,k+1) - tlay(ncol,k) = t1d(k) - tlev(ncol,k+1) = tw1d(k+1) - h2ovmr(ncol,k) = qv1d(k) * amdw - co2vmr(ncol,k) = co2 - o2vmr(ncol,k) = o2 - ch4vmr(ncol,k) = ch4 - n2ovmr(ncol,k) = n2o - cfc11vmr(ncol,k) = cfc11 - cfc12vmr(ncol,k) = cfc12 - cfc22vmr(ncol,k) = cfc22 - ccl4vmr(ncol,k) = ccl4 - enddo - -! This section is replaced with a new method to deal with model top - if ( 1 == 0 ) then - -! Define profile values for extra layer from model top to top of atmosphere. -! The top layer temperature for all gridpoints is set to the top layer-1 -! temperature plus a constant (0 K) that represents an isothermal layer -! above ptop. Top layer interface temperatures are linearly interpolated -! from the layer temperatures. - - play(ncol,kte+1) = 0.5 * plev(ncol,kte+1) - tlay(ncol,kte+1) = tlev(ncol,kte+1) + 0.0 - plev(ncol,kte+2) = 1.0e-5 - tlev(ncol,kte+2) = tlev(ncol,kte+1) + 0.0 - h2ovmr(ncol,kte+1) = h2ovmr(ncol,kte) - co2vmr(ncol,kte+1) = co2vmr(ncol,kte) - o2vmr(ncol,kte+1) = o2vmr(ncol,kte) - ch4vmr(ncol,kte+1) = ch4vmr(ncol,kte) - n2ovmr(ncol,kte+1) = n2ovmr(ncol,kte) - cfc11vmr(ncol,kte+1) = cfc11vmr(ncol,kte) - cfc12vmr(ncol,kte+1) = cfc12vmr(ncol,kte) - cfc22vmr(ncol,kte+1) = cfc22vmr(ncol,kte) - ccl4vmr(ncol,kte+1) = ccl4vmr(ncol,kte) + do k = 1, nlayers + clwpth(n,k) = 0. + ciwpth(n,k) = 0. + cswpth(n,k) = 0. + rel(n,k) = 0. + rei(n,k) = 0. + res(n,k) = 0. + cldfrac(n,k) = 0. + relqmcl(n,k) = 0. + reicmcl(n,k) = 0. + resnmcl(n,k) = 0. + uflx(n,k) = 0. + uflxc(n,k) = 0. + dflx(n,k) = 0. + dflxc(n,k) = 0. + hr(n,k) = 0. + hrc(n,k) = 0. + taucld(1:nbndlw,n,k) = 0. + tauaer(n,k,1:nbndlw) = 0. + cldfmcl(1:ngptlw,n,k) = 0. + clwpmcl(1:ngptlw,n,k) = 0. + ciwpmcl(1:ngptlw,n,k) = 0. + cswpmcl(1:ngptlw,n,k) = 0. + taucmcl(1:ngptlw,n,k) = 0. + enddo + uflx(n,nlayers+1) = 0. + uflxc(n,nlayers+1) = 0. + dflx(n,nlayers+1) = 0. + dflxc(n,nlayers+1) = 0. + enddo - endif + !--- fill local arrays with input sounding. convert water vapor mass mixing ratio to volume mixing ratio: + plev(ncol,1) = pw1d(1) + tlev(ncol,1) = tw1d(1) + tsfc(ncol) = tsk(i,j) + do k = kts, kte + plev(ncol,k+1) = pw1d(k+1) + tlev(ncol,k+1) = tw1d(k+1) + play(ncol,k) = p1d(k) + pdel(ncol,k) = plev(ncol,k) - plev(ncol,k+1) + tlay(ncol,k) = t1d(k) + h2ovmr(ncol,k) = qv1d(k) * amdw + co2vmr(ncol,k) = co2 + o2vmr(ncol,k) = o2 + ch4vmr(ncol,k) = ch4 + n2ovmr(ncol,k) = n2o + cfc11vmr(ncol,k) = cfc11 + cfc12vmr(ncol,k) = cfc12 + cfc22vmr(ncol,k) = cfc22 + ccl4vmr(ncol,k) = ccl4 + enddo -! Set up values for extra layers to the top of the atmosphere. -! Temperature is calculated based on an average temperature profile given -! here in a table. The input table data is linearly interpolated to the -! column pressure. Mixing ratios are held constant except for ozone. -! Caution should be used if model top pressure is less than 5 hPa. -! Steven Cavallo, NCAR/MMM, December 2010 - ! Calculate the column pressure buffer levels above the - ! model top - do L=kte+1,nlayers,1 - plev(ncol,L+1) = plev(ncol,L) - deltap - play(ncol,L) = 0.5*(plev(ncol,L) + plev(ncol,L+1)) + !--- the sourcecode below follows Steven Cavallo's method to "fill" the atmospheric layers between the + ! top of the model and the top of the atmosphere. check if the pressure at the top of the atmosphere + ! is negative. if negative, set it to zero prior to the calculation of temperatures (tlev and tlay): + do k=kte+1,nlayers,1 + plev(ncol,k+1) = max(plev(ncol,k) - deltap, 0.00) + play(ncol,k) = 0.5*(plev(ncol,k) + plev(ncol,k+1)) enddo - ! Add zero as top level. This gets the temperature max at the - ! stratopause, reducing the downward flux errors in the top - ! levels. If zero happened to be the top level already, - ! this will add another level with zero, but will not affect - ! the radiative transfer calculation. + !--- add zero as top level. this gets the temperature max at the stratopause, reducing downward flux + ! errors in the top levels. If zero happened to be the top level already, this will add another + ! level with zero, but will not affect the radiative transfer calculation. plev(ncol,nlayers+1) = 0.00 play(ncol,nlayers) = 0.5*(plev(ncol,nlayers) + plev(ncol,nlayers+1)) - ! Interpolate the table temperatures to column pressure levels - do L=1,nlayers+1,1 - if ( PPROF(nproflevs) .lt. plev(ncol,L) ) then - do LL=2,nproflevs,1 - if ( PPROF(LL) .lt. plev(ncol,L) ) then - klev = LL - 1 + !--- interpolate the table temperatures to column pressure levels: + do k = 1, nlayers+1, 1 + if(pprof(nproflevs) .lt. plev(ncol,k)) then + do kk = 2, nproflevs, 1 + if(pprof(kk) .lt. plev(ncol,k)) then + klev = kk - 1 exit endif enddo - else klev = nproflevs endif - if (klev .ne. nproflevs ) then - vark = TPROF(klev) - vark1 = TPROF(klev+1) - wght=(plev(ncol,L)-PPROF(klev) )/( PPROF(klev+1)-PPROF(klev)) + if(klev .ne. nproflevs) then + vark = tprof(klev) + vark1 = tprof(klev+1) + wght = (plev(ncol,k)-pprof(klev) )/( pprof(klev+1)-pprof(klev)) else - vark = TPROF(klev) - vark1 = TPROF(klev) - wght = 0.0 + vark = tprof(klev) + vark1 = tprof(klev) + wght = 0.0 endif - varint(L) = wght*(vark1-vark)+vark - + varint(k) = wght*(vark1-vark)+vark enddo - ! Match the interpolated table temperature profile to WRF column - do L=kte+1,nlayers+1,1 - tlev(ncol,L) = varint(L) + (tlev(ncol,kte) - varint(kte)) - !if ( L .le. nlay ) then - tlay(ncol,L-1) = 0.5*(tlev(ncol,L) + tlev(ncol,L-1)) + !--- match the interpolated table temperature profile: + do k = kte+1, nlayers+1, 1 + tlev(ncol,k) = varint(k) + (tlev(ncol,kte) - varint(kte)) + !if(k .le. nlay) then + tlay(ncol,k-1) = 0.5*(tlev(ncol,k) + tlev(ncol,k-1)) !endif enddo - ! Now the chemical species (except for ozone) - do L=kte+1,nlayers,1 - h2ovmr(ncol,L) = h2ovmr(ncol,kte) - co2vmr(ncol,L) = co2vmr(ncol,kte) - o2vmr(ncol,L) = o2vmr(ncol,kte) - ch4vmr(ncol,L) = ch4vmr(ncol,kte) - n2ovmr(ncol,L) = n2ovmr(ncol,kte) - cfc11vmr(ncol,L) = cfc11vmr(ncol,kte) - cfc12vmr(ncol,L) = cfc12vmr(ncol,kte) - cfc22vmr(ncol,L) = cfc22vmr(ncol,kte) - ccl4vmr(ncol,L) = ccl4vmr(ncol,kte) + !--- fill water vapor and chemical species volume mixing ratios, except ozone: + do k = kte+1, nlayers, 1 + h2ovmr(ncol,k) = h2ovmr(ncol,kte) + co2vmr(ncol,k) = co2vmr(ncol,kte) + o2vmr(ncol,k) = o2vmr(ncol,kte) + ch4vmr(ncol,k) = ch4vmr(ncol,kte) + n2ovmr(ncol,k) = n2ovmr(ncol,kte) + cfc11vmr(ncol,k) = cfc11vmr(ncol,kte) + cfc12vmr(ncol,k) = cfc12vmr(ncol,kte) + cfc22vmr(ncol,k) = cfc22vmr(ncol,kte) + ccl4vmr(ncol,k) = ccl4vmr(ncol,kte) enddo -! End top of model buffer -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! Get ozone profile including amount in extra layer above model top. -! Steven Cavallo: Must pass nlay-1 into subroutine to get nlayers -! dimension for o3mmr - call inirad (o3mmr,plev,kts,nlay-1) -#if defined(mpas) -! Laura D. Fowler (2013-04-16): Added the option to use the ozone climatology from the -! CAM radiation codes, instead of the annual mean ozone in subroutine o3dat. As output -! to the subroutine vinterp_ozn, the unit of o3mmr is actually "volume mixing ratio". + !--- initialize the ozone voume mixing ratio: + call inirad(o3mmr,plev,kts,nlayers-1) if(o3input .eq. 2) then do k = 1, noznlevels o3clim1d(k) = o3clim(i,k,j) @@ -12083,370 +11879,284 @@ SUBROUTINE RRTMG_LWRAD( & o3vmr(ncol,k) = o3mmr(k)*amdo enddo endif -#else -! Steven Cavallo: Changed to nlayers from kte+1 - do k = kts, nlayers - o3vmr(ncol,k) = o3mmr(k) * amdo - IF ( PRESENT( O33D ) ) THEN - if(o3input .eq. 2)then - if(k.le.kte)then - o3vmr(ncol,k) = o31d(k) - else -! apply shifted climatology profile above model top - o3vmr(ncol,k) = o31d(kte) - o3mmr(kte)*amdo + o3mmr(k)*amdo - if(o3vmr(ncol,k) .le. 0.)o3vmr(ncol,k) = o3mmr(k)*amdo - endif - endif - ENDIF - enddo -#endif -! Set surface emissivity in each RRTMG longwave band - do nb = 1, nbndlw - emis(ncol, nb) = emiss(i,j) - enddo - -! Define cloud optical properties for radiation (inflglw = 0) -! This is approach used with older RRTM_LW; -! Cloud and precipitation paths in g/m2 -! qi=0 if no ice phase -! qs=0 if no ice phase - if (inflglw .eq. 0) then - do k = kts,kte - ro = p1d(k) / (r * t1d(k))*100. - dz = dz1d(k) - clwp(k) = ro*qc1d(k)*dz*1000. - ciwp(k) = ro*qi1d(k)*dz*1000. - plwp(k) = (ro*qr1d(k))**0.75*dz*1000. - piwp(k) = (ro*qs1d(k))**0.75*dz*1000. - enddo -! Cloud fraction and cloud optical depth; old approach used with RRTM_LW - do k = kts, kte - cldfrac(ncol,k) = cldfra1d(k) - do nb = 1, nbndlw - taucld(nb,ncol,k) = abcw*clwp(k) + abice*ciwp(k) & - +abrn*plwp(k) + absn*piwp(k) - if (taucld(nb,ncol,k) .gt. 0.01) cldfrac(ncol,k) = 1. - enddo - enddo -! Zero out cloud physical property arrays; not used when passing optical properties -! into radiation - do k = kts, kte - clwpth(ncol,k) = 0.0 - ciwpth(ncol,k) = 0.0 - rel(ncol,k) = 10.0 - rei(ncol,k) = 10.0 - enddo - endif + !--- CALCULATE CLOUD OPTICAL PROPERTIES: + if(inflglw .gt. 0) then -! Define cloud physical properties for radiation (inflglw = 1 or 2) -! Cloud fraction -! Set cloud arrays if passing cloud physical properties into radiation - if (inflglw .gt. 0) then - do k = kts, kte - cldfrac(ncol,k) = cldfra1d(k) - enddo + do k = kts, kte + cldfrac(ncol,k) = cldfra1d(k) + enddo -! Compute cloud water/ice paths and particle sizes for input to radiation (CAM method) - pcols = ncol - pver = kte - kts + 1 - gravmks = g - landfrac(ncol) = 2.-XLAND(I,J) - landm(ncol) = landfrac(ncol) - snowh(ncol) = 0.001*SNOW(I,J) - icefrac(ncol) = XICE(I,J) - -! From module_ra_cam: Convert liquid and ice mixing ratios to water paths; -! pdel is in mb here; convert back to Pa (*100.) -! Water paths are in units of g/m2 -! snow added as ice cloud (JD 091022) - do k = kts, kte - gicewp = (qi1d(k)+qs1d(k)) * pdel(ncol,k)*100.0 / gravmks * 1000.0 ! Grid box ice water path. - gliqwp = qc1d(k) * pdel(ncol,k)*100.0 / gravmks * 1000.0 ! Grid box liquid water path. - cicewp(ncol,k) = gicewp / max(0.01,cldfrac(ncol,k)) ! In-cloud ice water path. - cliqwp(ncol,k) = gliqwp / max(0.01,cldfrac(ncol,k)) ! In-cloud liquid water path. - end do - -!link the aerosol feedback to cloud -czhao - if( PRESENT( progn ) ) then - if (progn == 1) then -!jdfcz if(prescribe==0) then - - pi = 4.*atan(1.0) - third=1./3. - rhoh2o=1.e3 - relconst=3/(4.*pi*rhoh2o) -! minimun liquid water path to calculate rel -! corresponds to optical depth of 1.e-3 for radius 4 microns. - lwpmin=3.e-5 - do k = kts, kte - reliq(ncol,k) = 10. - if( PRESENT( F_QNDROP ) ) then - if( F_QNDROP ) then - if ( qc1d(k)*pdel(ncol,k).gt.lwpmin.and. & - qndrop1d(k).gt.1000. ) then - reliq(ncol,k)=(relconst*qc1d(k)/qndrop1d(k))**third ! effective radius in m -! apply scaling from Martin et al., JAS 51, 1830. - reliq(ncol,k)=1.1*reliq(ncol,k) - reliq(ncol,k)=reliq(ncol,k)*1.e6 ! convert from m to microns - reliq(ncol,k)=max(reliq(ncol,k),4.) - reliq(ncol,k)=min(reliq(ncol,k),20.) - end if - end if - end if - end do -!jdfcz else ! prescribe -! following Kiehl - call relcalc(ncol, pcols, pver, tlay, landfrac, landm, icefrac, reliq, snowh) -! write(0,*) 'lw prescribe aerosol',maxval(qndrop3d) -!jdfcz endif - else ! progn - call relcalc(ncol, pcols, pver, tlay, landfrac, landm, icefrac, reliq, snowh) - endif - else !present(progn) - call relcalc(ncol, pcols, pver, tlay, landfrac, landm, icefrac, reliq, snowh) - endif - -! following Kristjansson and Mitchell - call reicalc(ncol, pcols, pver, tlay, reice) - -! Limit upper bound of reice for Fu ice parameterization and convert -! from effective radius to generalized effective size (*1.0315; Fu, 1996) - if (iceflglw .eq. 3) then - do k = kts, kte - reice(ncol,k) = reice(ncol,k) * 1.0315 - reice(ncol,k) = min(140.0,reice(ncol,k)) - end do - endif + !--- zero out cloud optical properties here (not used when passing physical properties to rrtmg_lw as + ! taucld is calculated in radiation: + do k = kts, kte + do nb = 1, nbndlw + taucld(nb,ncol,k) = 0.0 + enddo + enddo -! Set cloud physical property arrays - do k = kts, kte - clwpth(ncol,k) = cliqwp(ncol,k) - ciwpth(ncol,k) = cicewp(ncol,k) - rel(ncol,k) = reliq(ncol,k) - rei(ncol,k) = reice(ncol,k) - enddo + pcols = ncol + pver = kte - kts + 1 + gravmks = g + + if(has_reqc .ne. 0) then + !--- fill the effective radius for cloud water with that calculated in the Thompson cloud + ! microphysics parameterization: + inflglw = 3 + do k = kts, kte + reliq(ncol,k) = max(2.5,re_cloud(i,k,j)*1.e6) + if(reliq(ncol,k).le.2.5 .and. cldfrac(ncol,k).gt.0. .and. & + (xland(i,j)-1.5).gt.0.) then !--- ocean. + reliq(ncol,k) = 10.5 + elseif(reliq(ncol,k).le.2.5 .and. cldfrac(ncol,k).gt.0. .and. & + (xland(i,j)-1.5).lt.0.) then !--- land. + reliq(ncol,k) = 7.5 + endif + enddo -! Zero out cloud optical properties here; not used when passing physical properties -! to radiation and taucld is calculated in radiation - do k = kts, kte - do nb = 1, nbndlw - taucld(nb,ncol,k) = 0.0 - enddo - enddo - endif + else -! No clouds are allowed in the extra layer from model top to TOA - ! Steven Cavallo: Edited out for buffer adjustment below - if ( 1 == 0 ) then + !--- calculate the effective radius for cloud water in layers below the model top. we added the + ! dimensions pcols, pver so that the calls to subroutines relcalc and reicalc remain the same + ! as in WRF although the two dimensions are duplicate and not needed inside the subroutines: + landfrac(ncol) = 2.-xland(i,j) + landm(ncol) = landfrac(ncol) + snowh(ncol) = 0.001*snow(i,j) + icefrac(ncol) = xice(i,j) + call relcalc(ncol,pcols,pver,tlay,landfrac,landm,icefrac,reliq,snowh) + endif - clwpth(ncol,kte+1) = 0. - ciwpth(ncol,kte+1) = 0. - rel(ncol,kte+1) = 10. - rei(ncol,kte+1) = 10. - cldfrac(ncol,kte+1) = 0. - do nb = 1, nbndlw - taucld(nb,ncol,kte+1) = 0. - enddo + if(has_reqi .ne. 0) then + !--- fill the effective radius for cloud ice with that calculated in the Thompson cloud + ! microphysics parameterization: + inflglw = 4 + iceflglw = 4 + do k = kts, kte + reice(ncol,k) = max(5.,re_ice(i,k,j)*1.e6) + if(reice(ncol,k).le.5. .and. cldfrac(ncol,k).gt.0.) then + idx_rei = int(t3d(i,k,j)-179.) + idx_rei = min(max(idx_rei,1),75) + corr = t3d(i,k,j) - int(t3d(i,k,j)) + reice(ncol,k) = retab(idx_rei)*(1.-corr) + retab(idx_rei+1)*corr + reice(ncol,k) = max(reice(ncol,k),5.0) + endif + enddo - endif + else - ! Buffer adjustment. Steven Cavallo December 2010 - do k=kte+1,nlayers - clwpth(ncol,k) = 0. - ciwpth(ncol,k) = 0. - rel(ncol,k) = 10. - rei(ncol,k) = 10. - cldfrac(ncol,k) = 0. - do nb = 1,nbndlw - taucld(nb,ncol,k) = 0. - enddo - enddo - - iplon = 1 - irng = 0 - permuteseed = 150 - -! Sub-column generator for McICA - call mcica_subcol_lw(iplon, ncol, nlay, icld, permuteseed, irng, play, & - cldfrac, ciwpth, clwpth, rei, rel, taucld, cldfmcl, & - ciwpmcl, clwpmcl, reicmcl, relqmcl, taucmcl) - -!-------------------------------------------------------------------------- -! Aerosol optical depth, single scattering albedo and asymmetry parameter -czhao 03/2010 -!-------------------------------------------------------------------------- -! Aerosol optical depth by layer for each RRTMG longwave band -! No aerosols in layer above model top (kte+1) -! Steven Cavallo: Upper bound of loop changed to nlayers from kte+1 -! do nb = 1, nbndlw -! do k = kts, kte+1 -! tauaer(ncol,k,nb) = 0. -! enddo -! enddo - -! ... Aerosol effects. Added aerosol feedbacks from Chem , 03/2010 -czhao -! - do nb = 1, nbndlw - do k = kts,nlayers - tauaer(ncol,k,nb) = 0. - end do - end do + !--- calculate the effective radius for cloud ice in layers below the model top, following + ! Kristjansson and Mitchell. limit upper bound of reice for Fu ice parameterization and + ! convert effective radius to generalized effective size (*1.0315; Fu, 1996): -#ifdef WRF_CHEM - IF ( AER_RA_FEEDBACK == 1) then -! do nb = 1, nbndlw - do k = kts,kte !wig - if(tauaerlw1(i,k,j).gt.thresh .and. tauaerlw16(i,k,j).gt.thresh) then - tauaer(ncol,k,1)=tauaerlw1(i,k,j) - tauaer(ncol,k,2)=tauaerlw2(i,k,j) - tauaer(ncol,k,3)=tauaerlw3(i,k,j) - tauaer(ncol,k,4)=tauaerlw4(i,k,j) - tauaer(ncol,k,5)=tauaerlw5(i,k,j) - tauaer(ncol,k,6)=tauaerlw6(i,k,j) - tauaer(ncol,k,7)=tauaerlw7(i,k,j) - tauaer(ncol,k,8)=tauaerlw8(i,k,j) - tauaer(ncol,k,9)=tauaerlw9(i,k,j) - tauaer(ncol,k,10)=tauaerlw10(i,k,j) - tauaer(ncol,k,11)=tauaerlw11(i,k,j) - tauaer(ncol,k,12)=tauaerlw12(i,k,j) - tauaer(ncol,k,13)=tauaerlw13(i,k,j) - tauaer(ncol,k,14)=tauaerlw14(i,k,j) - tauaer(ncol,k,15)=tauaerlw15(i,k,j) - tauaer(ncol,k,16)=tauaerlw16(i,k,j) - endif - enddo ! k -! end do ! nb - -!wig beg - do nb = 1, nbndlw - slope = 0. !use slope as a sum holder - do k = kts,kte - slope = slope + tauaer(ncol,k,nb) - end do - if( slope < 0. ) then - write(msg,'("ERROR: Negative total lw optical depth of ",f8.2," at point i,j,nb=",3i5)') slope,i,j,nb - call wrf_error_fatal(msg) - else if( slope > 5. ) then - call wrf_message("-------------------------") - write(msg,'("WARNING: Large total lw optical depth of ",f8.2," at point i,j,nb=",3i5)') slope,i,j,nb - call wrf_message(msg) - - call wrf_message("Diagnostics 1: k, tauaerlw1, tauaerlw16") - do k=kts,kte - write(msg,'(i4,2f8.2)') k, tauaerlw1(i,k,j), tauaerlw16(i,k,j) - call wrf_message(msg) - end do - call wrf_message("-------------------------") - endif - enddo ! nb - endif ! aer_ra_feedback -#endif + call reicalc(ncol,pcols,pver,tlay,reice) -! Call RRTMG longwave radiation model - call rrtmg_lw & - (ncol ,nlay ,icld , & - play ,plev ,tlay ,tlev ,tsfc , & - h2ovmr ,o3vmr ,co2vmr ,ch4vmr ,n2ovmr ,o2vmr , & - cfc11vmr,cfc12vmr,cfc22vmr,ccl4vmr ,emis , & - inflglw ,iceflglw,liqflglw,cldfmcl , & - taucmcl ,ciwpmcl ,clwpmcl ,reicmcl ,relqmcl , & - tauaer , & - uflx ,dflx ,hr ,uflxc ,dflxc, hrc) + if(iceflglw .eq. 3) then + do k = kts, kte + reice(ncol,k) = reice(ncol,k) * 1.0315 + reice(ncol,k) = min(140.0,reice(ncol,k)) + enddo + endif + endif -! Output downard surface flux, and outgoing longwave flux and cloud forcing -! at the top of atmosphere (W/m2) - glw(i,j) = dflx(1,1) -! olr(i,j) = uflx(1,kte+2) -! lwcf(i,j) = uflxc(1,kte+2) - uflx(1,kte+2) -! Steven Cavallo: Changed OLR to be valid at the top of atmosphere instead -! of top of model. Dec 2010. - olr(i,j) = uflx(1,nlayers+1) - lwcf(i,j) = uflxc(1,nlayers+1) - uflx(1,nlayers+1) - - if (present(lwupt)) then -! Output up and down toa fluxes for total and clear sky - lwupt(i,j) = uflx(1,kte+2) - lwuptc(i,j) = uflxc(1,kte+2) - lwdnt(i,j) = dflx(1,kte+2) - lwdntc(i,j) = dflxc(1,kte+2) -! Output up and down surface fluxes for total and clear sky - lwupb(i,j) = uflx(1,1) - lwupbc(i,j) = uflxc(1,1) - lwdnb(i,j) = dflx(1,1) - lwdnbc(i,j) = dflxc(1,1) - endif + if(has_reqs .ne. 0) then + !--- fill the effective radius for snow with that calculated in the Thompson cloud + ! microphysics parameterization: + inflglw = 5 + iceflglw = 5 + do k = kts, kte + resnow(ncol,k) = max(10.,re_snow(i,k,j)*1.e6) + enddo + else + do k = kts, kte + resnow(ncol,k) = 10. + enddo + endif -! Output up and down layer fluxes for total and clear sky. -! Vertical ordering is from bottom to top in units of W m-2. - if ( present (lwupflx) ) then - do k=kts,kte+2 - lwupflx(i,k,j) = uflx(1,k) - lwupflxc(i,k,j) = uflxc(1,k) - lwdnflx(i,k,j) = dflx(1,k) - lwdnflxc(i,k,j) = dflxc(1,k) - enddo - endif + !--- calculate the cloud liquid water path in units of g/m2 below the model top: + do k = kts, kte + gliqwp = qc1d(k) * pdel(ncol,k)*100.0 / gravmks * 1000.0 !grid box liquid water path. + cliqwp(ncol,k) = gliqwp / max(0.01,cldfrac(ncol,k)) !in-cloud liquid water path. + enddo -! Output heating rate tendency; convert heating rate from K/d to K/s -! Heating rate arrays are ordered vertically from bottom to top here. - do k=kts,kte - tten1d(k) = hr(ncol,k)/86400. - rthratenlw(i,k,j) = tten1d(k)/pi3d(i,k,j) - enddo - -! -#if defined(mpas) || defined(hydrostatic_core) -!MPAS specific (Laura D. Fowler - 2013-04-11): deallocate all local arrays. -!coordinate system. Therefore, we define nlayers for each individual column: - if(allocated(o3mmr) ) deallocate(o3mmr ) - if(allocated(varint) ) deallocate(varint ) - if(allocated(plev) ) deallocate(plev ) - if(allocated(tlev) ) deallocate(tlev ) - if(allocated(play) ) deallocate(play ) - if(allocated(tlay) ) deallocate(tlay ) - if(allocated(h2ovmr) ) deallocate(h2ovmr ) - if(allocated(o3vmr) ) deallocate(o3vmr ) - if(allocated(co2vmr) ) deallocate(co2vmr ) - if(allocated(o2vmr) ) deallocate(o2vmr ) - if(allocated(ch4vmr) ) deallocate(ch4vmr ) - if(allocated(n2ovmr) ) deallocate(n2ovmr ) - if(allocated(cfc11vmr)) deallocate(cfc11vmr) - if(allocated(cfc12vmr)) deallocate(cfc12vmr) - if(allocated(cfc22vmr)) deallocate(cfc22vmr) - if(allocated(ccl4vmr) ) deallocate(ccl4vmr ) - if(allocated(clwpth) ) deallocate(clwpth ) - if(allocated(ciwpth) ) deallocate(ciwpth ) - if(allocated(rel) ) deallocate(rel ) - if(allocated(rei) ) deallocate(rei ) - if(allocated(cldfrac) ) deallocate(cldfrac ) - if(allocated(relqmcl) ) deallocate(relqmcl ) - if(allocated(reicmcl) ) deallocate(reicmcl ) - if(allocated(uflx) ) deallocate(uflx ) - if(allocated(dflx) ) deallocate(dflx ) - if(allocated(uflxc) ) deallocate(uflxc ) - if(allocated(dflxc) ) deallocate(dflxc ) - if(allocated(hr) ) deallocate(hr ) - if(allocated(hrc) ) deallocate(hrc ) - if(allocated(taucld) ) deallocate(taucld ) - if(allocated(cldfmcl) ) deallocate(cldfmcl ) - if(allocated(clwpmcl) ) deallocate(clwpmcl ) - if(allocated(ciwpmcl) ) deallocate(ciwpmcl ) - if(allocated(taucmcl) ) deallocate(taucmcl ) - if(allocated(tauaer) ) deallocate(tauaer ) -#endif + !--- calculate the cloud ice path in units of g/m2 below the model top: + if(iceflglw .eq. 3)then + do k = kts, kte + gicewp = (qi1d(k)+qs1d(k)) * pdel(ncol,k)*100.0 / gravmks * 1000.0 !grid box ice water path. + cicewp(ncol,k) = gicewp / max(0.01,cldfrac(ncol,k)) !in-cloud ice water path. + enddo + elseif(iceflglw .ge. 4) then + do k = kts, kte + gicewp = qi1d(k) * pdel(ncol,k)*100.0 / gravmks * 1000.0 !grid box ice water path. + cicewp(ncol,k) = gicewp / max(0.01,cldfrac(ncol,k)) !in-cloud ice water path. + enddo + endif - end do i_loop - end do j_loop + !--- calculate the snow path in units of g/m2 below the model top: + if(iceflglw.eq.5)then + do k = kts, kte + snow_mass_factor = 1.0 + if(resnow(ncol,k) .gt. 130.)then + snow_mass_factor = (130.0/resnow(ncol,k))*(130.0/resnow(ncol,k)) + resnow(ncol,k) = 130.0 + endif + gsnowp = qs1d(k) * pdel(ncol,k)*100.0 / gravmks * 1000.0 ! grid box snow path. + csnowp(ncol,k) = snow_mass_factor * gsnowp / max(0.01,cldfrac(ncol,k)) + enddo + endif + + !--- set the cloud physical property between the model top and the top of the atmosphere. do not allow + ! clouds between the model top and the top of the atmosphere: + do k = kts, kte + clwpth(ncol,k) = cliqwp(ncol,k) + ciwpth(ncol,k) = cicewp(ncol,k) + cswpth(ncol,k) = csnowp(ncol,k) + rel(ncol,k) = reliq(ncol,k) + rei(ncol,k) = reice(ncol,k) + res(ncol,k) = resnow(ncol,k) + enddo + do k = kte+1, nlayers + clwpth(ncol,k) = 0. + ciwpth(ncol,k) = 0. + cswpth(ncol,k) = 0. + rel(ncol,k) = 10. + rei(ncol,k) = 10. + res(ncol,k) = 10. + cldfrac(ncol,k) = 0. + do nb = 1, nbndlw + taucld(nb,ncol,k) = 0. + enddo + enddo -!------------------------------------------------------------------- + endif - END SUBROUTINE RRTMG_LWRAD + !--- sub-column generator for McICA: + iplon = 1 + irng = 0 + permuteseed = 150 + + call mcica_subcol_lw & + (iplon , ncol , nlay , icld , permuteseed , irng , play , & + cldfrac , ciwpth , clwpth , cswpth , rei , rel , res , & + taucld , cldfmcl , ciwpmcl , clwpmcl , cswpmcl , reicmcl , relqmcl , & + resnmcl , taucmcl) + + !--- initialization of aerosol optical depths: + do nb = 1, nbndlw + do k = kts, nlayers + tauaer(ncol,k,nb) = 0. + enddo + enddo - -!------------------------------------------------------------------------- + + + !--- CALL TO THE RRTMG LONG WAVE RADIATION MODEL: + call rrtmg_lw & + (ncol , nlay , icld , play , plev , tlay , & + tlev , tsfc , h2ovmr , o3vmr , co2vmr , ch4vmr , & + n2ovmr , o2vmr , cfc11vmr , cfc12vmr , cfc22vmr , ccl4vmr , & + emis , inflglw , iceflglw , liqflglw , cldfmcl , taucmcl , & + ciwpmcl , clwpmcl , cswpmcl , reicmcl , relqmcl , resnmcl , & + tauaer , uflx , dflx , hr , uflxc , dflxc , & + hrc) + + + + !--- OUTPUTS: + glw(i,j) = dflx(1,1) + olr(i,j) = uflx(1,nlayers+1) + lwcf(i,j) = uflxc(1,nlayers+1) - uflx(1,nlayers+1) + + if(present(lwupt)) then + !output up and down toa fluxes for total and clear sky: + lwupt(i,j) = uflx(1,nlayers+1) + lwuptc(i,j) = uflxc(1,nlayers+1) + lwdnt(i,j) = dflx(1,nlayers+1) + lwdntc(i,j) = dflxc(1,nlayers+1) + !output up and down surface fluxes for total and clear sky: + lwupb(i,j) = uflx(1,1) + lwupbc(i,j) = uflxc(1,1) + lwdnb(i,j) = dflx(1,1) + lwdnbc(i,j) = dflxc(1,1) + endif + + if(present(lwupflx)) then + !output up and down fluxes: + do k=kts,nlayers+1 + lwupflx(i,k,j) = uflx(1,k) + lwupflxc(i,k,j) = uflxc(1,k) + lwdnflx(i,k,j) = dflx(1,k) + lwdnflxc(i,k,j) = dflxc(1,k) + enddo + endif + + !--- output heating rate tendency. convert heating rate from K per day to K per second: + do k = kts, kte + tten1d(k) = hr(ncol,k)/86400. + rthratenlw(i,k,j) = tten1d(k)/pi3d(i,k,j) + enddo + + !--- output the effective radii for cloud water, cloud ice, and snow: + if(icloud.gt.0 .and. present(rre_cloud) .and. present(rre_ice) .and. present(rre_snow)) then + do k = kts, kte + if(cldfra3d(i,k,j) .gt. 0.) then + rre_cloud(i,k,j) = rel(ncol,k) + rre_ice(i,k,j) = rei(ncol,k) + rre_snow(i,k,j) = res(ncol,k) + endif + enddo + endif + + !--- deallocate local column arrays: + if(allocated(o3mmr) ) deallocate(o3mmr ) + if(allocated(varint) ) deallocate(varint ) + if(allocated(plev) ) deallocate(plev ) + if(allocated(tlev) ) deallocate(tlev ) + if(allocated(play) ) deallocate(play ) + if(allocated(tlay) ) deallocate(tlay ) + if(allocated(h2ovmr) ) deallocate(h2ovmr ) + if(allocated(o3vmr) ) deallocate(o3vmr ) + if(allocated(co2vmr) ) deallocate(co2vmr ) + if(allocated(o2vmr) ) deallocate(o2vmr ) + if(allocated(ch4vmr) ) deallocate(ch4vmr ) + if(allocated(n2ovmr) ) deallocate(n2ovmr ) + if(allocated(cfc11vmr)) deallocate(cfc11vmr) + if(allocated(cfc12vmr)) deallocate(cfc12vmr) + if(allocated(cfc22vmr)) deallocate(cfc22vmr) + if(allocated(ccl4vmr) ) deallocate(ccl4vmr ) + if(allocated(clwpth) ) deallocate(clwpth ) + if(allocated(ciwpth) ) deallocate(ciwpth ) + if(allocated(cswpth) ) deallocate(cswpth ) + if(allocated(rel) ) deallocate(rel ) + if(allocated(rei) ) deallocate(rei ) + if(allocated(res) ) deallocate(res ) + if(allocated(cldfrac) ) deallocate(cldfrac ) + if(allocated(relqmcl) ) deallocate(relqmcl ) + if(allocated(reicmcl) ) deallocate(reicmcl ) + if(allocated(resnmcl) ) deallocate(resnmcl ) + if(allocated(uflx) ) deallocate(uflx ) + if(allocated(dflx) ) deallocate(dflx ) + if(allocated(uflxc) ) deallocate(uflxc ) + if(allocated(dflxc) ) deallocate(dflxc ) + if(allocated(hr) ) deallocate(hr ) + if(allocated(hrc) ) deallocate(hrc ) + if(allocated(taucld) ) deallocate(taucld ) + if(allocated(cldfmcl) ) deallocate(cldfmcl ) + if(allocated(clwpmcl) ) deallocate(clwpmcl ) + if(allocated(ciwpmcl) ) deallocate(ciwpmcl ) + if(allocated(cswpmcl) ) deallocate(cswpmcl ) + if(allocated(taucmcl) ) deallocate(taucmcl ) + if(allocated(tauaer) ) deallocate(tauaer ) + + end do i_loop !end longitude loop. + + end do j_loop !end latitude loop. + + end subroutine rrtmg_lwrad + +!================================================================================================================= SUBROUTINE INIRAD (O3PROF,Plev, kts, kte) !------------------------------------------------------------------------- IMPLICIT NONE diff --git a/src/core_atmosphere/physics/physics_wrf/module_ra_rrtmg_sw.F b/src/core_atmosphere/physics/physics_wrf/module_ra_rrtmg_sw.F index aab9cafab3..414fc6b41a 100644 --- a/src/core_atmosphere/physics/physics_wrf/module_ra_rrtmg_sw.F +++ b/src/core_atmosphere/physics/physics_wrf/module_ra_rrtmg_sw.F @@ -1398,8 +1398,8 @@ module mcica_subcol_gen_sw !------------------------------------------------------------------ subroutine mcica_subcol_sw(iplon, ncol, nlay, icld, permuteseed, irng, play, & - cldfrac, ciwp, clwp, rei, rel, tauc, ssac, asmc, fsfc, & - cldfmcl, ciwpmcl, clwpmcl, reicmcl, relqmcl, & + cldfrac, ciwp, clwp, cswp, rei, rel, res, tauc, ssac, asmc, fsfc, & + cldfmcl, ciwpmcl, clwpmcl, cswpmcl, reicmcl, relqmcl, resnmcl, & taucmcl, ssacmcl, asmcmcl, fsfcmcl) ! ----- Input ----- @@ -1435,10 +1435,14 @@ subroutine mcica_subcol_sw(iplon, ncol, nlay, icld, permuteseed, irng, play, & ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: clwp(:,:) ! in-cloud liquid water path ! Dimensions: (ncol,nlay) + real(kind=rb), intent(in) :: cswp(:,:) ! in-cloud snow water path + ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: rei(:,:) ! cloud ice particle size ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: rel(:,:) ! cloud liquid particle size ! Dimensions: (ncol,nlay) + real(kind=rb), intent(in) :: res(:,:) ! cloud snow particle size + ! Dimensions: (ncol,nlay) ! ----- Output ----- ! Atmosphere/clouds - cldprmc [mcica] @@ -1448,10 +1452,14 @@ subroutine mcica_subcol_sw(iplon, ncol, nlay, icld, permuteseed, irng, play, & ! Dimensions: (ngptsw,ncol,nlay) real(kind=rb), intent(out) :: clwpmcl(:,:,:) ! in-cloud liquid water path [mcica] ! Dimensions: (ngptsw,ncol,nlay) + real(kind=rb), intent(out) :: cswpmcl(:,:,:) ! in-cloud snow water path [mcica] + ! Dimensions: (ngptsw,ncol,nlay) real(kind=rb), intent(out) :: relqmcl(:,:) ! liquid particle size (microns) ! Dimensions: (ncol,nlay) real(kind=rb), intent(out) :: reicmcl(:,:) ! ice partcle size (microns) ! Dimensions: (ncol,nlay) + real(kind=rb), intent(out) :: resnmcl(:,:) ! snow partcle size (microns) + ! Dimensions: (ncol,nlay) real(kind=rb), intent(out) :: taucmcl(:,:,:) ! in-cloud optical depth [mcica] ! Dimensions: (ngptsw,ncol,nlay) real(kind=rb), intent(out) :: ssacmcl(:,:,:) ! in-cloud single scattering albedo [mcica] @@ -1487,6 +1495,7 @@ subroutine mcica_subcol_sw(iplon, ncol, nlay, icld, permuteseed, irng, play, & reicmcl(:ncol,:nlay) = rei(:ncol,:nlay) relqmcl(:ncol,:nlay) = rel(:ncol,:nlay) + resnmcl(:ncol,:nlay) = res(:ncol,:nlay) pmid(:ncol,:nlay) = play(:ncol,:nlay)*1.e2_rb ! Convert input ice and liquid cloud water paths to specific humidity ice and liquid components @@ -1505,16 +1514,16 @@ subroutine mcica_subcol_sw(iplon, ncol, nlay, icld, permuteseed, irng, play, & ! enddo ! Generate the stochastic subcolumns of cloud optical properties for the shortwave; - call generate_stochastic_clouds_sw (ncol, nlay, nsubcsw, icld, irng, pmid, cldfrac, clwp, ciwp, & - tauc, ssac, asmc, fsfc, cldfmcl, clwpmcl, ciwpmcl, & + call generate_stochastic_clouds_sw (ncol, nlay, nsubcsw, icld, irng, pmid, cldfrac, clwp, ciwp, cswp, & + tauc, ssac, asmc, fsfc, cldfmcl, clwpmcl, ciwpmcl, cswpmcl, & taucmcl, ssacmcl, asmcmcl, fsfcmcl, permuteseed) end subroutine mcica_subcol_sw !------------------------------------------------------------------------------------------------- - subroutine generate_stochastic_clouds_sw(ncol, nlay, nsubcol, icld, irng, pmid, cld, clwp, ciwp, & - tauc, ssac, asmc, fsfc, cld_stoch, clwp_stoch, ciwp_stoch, & + subroutine generate_stochastic_clouds_sw(ncol, nlay, nsubcol, icld, irng, pmid, cld, clwp, ciwp, cswp, & + tauc, ssac, asmc, fsfc, cld_stoch, clwp_stoch, ciwp_stoch, cswp_stoch, & tauc_stoch, ssac_stoch, asmc_stoch, fsfc_stoch, changeSeed) !------------------------------------------------------------------------------------------------- @@ -1599,6 +1608,8 @@ subroutine generate_stochastic_clouds_sw(ncol, nlay, nsubcol, icld, irng, pmid, ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: ciwp(:,:) ! in-cloud ice water path (g/m2) ! Dimensions: (ncol,nlay) + real(kind=rb), intent(in) :: cswp(:,:) ! in-cloud snow water path (g/m2) + ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: tauc(:,:,:) ! in-cloud optical depth (non-delta scaled) ! Dimensions: (nbndsw,ncol,nlay) real(kind=rb), intent(in) :: ssac(:,:,:) ! in-cloud single scattering albedo (non-delta scaled) @@ -1614,6 +1625,8 @@ subroutine generate_stochastic_clouds_sw(ncol, nlay, nsubcol, icld, irng, pmid, ! Dimensions: (ngptsw,ncol,nlay) real(kind=rb), intent(out) :: ciwp_stoch(:,:,:) ! subcolumn in-cloud ice water path ! Dimensions: (ngptsw,ncol,nlay) + real(kind=rb), intent(out) :: cswp_stoch(:,:,:) ! subcolumn in-cloud snow water path + ! Dimensions: (ngptsw,ncol,nlay) real(kind=rb), intent(out) :: tauc_stoch(:,:,:) ! subcolumn in-cloud optical depth ! Dimensions: (ngptsw,ncol,nlay) real(kind=rb), intent(out) :: ssac_stoch(:,:,:) ! subcolumn in-cloud single scattering albedo @@ -1843,6 +1856,7 @@ subroutine generate_stochastic_clouds_sw(ncol, nlay, nsubcol, icld, irng, pmid, cld_stoch(isubcol,i,ilev) = 1._rb clwp_stoch(isubcol,i,ilev) = clwp(i,ilev) ciwp_stoch(isubcol,i,ilev) = ciwp(i,ilev) + cswp_stoch(isubcol,i,ilev) = cswp(i,ilev) n = ngb(isubcol) - ngbm tauc_stoch(isubcol,i,ilev) = tauc(n,i,ilev) ssac_stoch(isubcol,i,ilev) = ssac(n,i,ilev) @@ -1852,6 +1866,7 @@ subroutine generate_stochastic_clouds_sw(ncol, nlay, nsubcol, icld, irng, pmid, cld_stoch(isubcol,i,ilev) = 0._rb clwp_stoch(isubcol,i,ilev) = 0._rb ciwp_stoch(isubcol,i,ilev) = 0._rb + cswp_stoch(isubcol,i,ilev) = 0._rb tauc_stoch(isubcol,i,ilev) = 0._rb ssac_stoch(isubcol,i,ilev) = 1._rb asmc_stoch(isubcol,i,ilev) = 0._rb @@ -1960,7 +1975,7 @@ module rrtmg_sw_cldprmc ! ---------------------------------------------------------------------------- subroutine cldprmc_sw(nlayers, inflag, iceflag, liqflag, cldfmc, & - ciwpmc, clwpmc, reicmc, relqmc, & + ciwpmc, clwpmc, cswpmc, reicmc, relqmc, resnmc, & taormc, taucmc, ssacmc, asmcmc, fsfcmc) ! ---------------------------------------------------------------------------- @@ -1982,6 +1997,10 @@ subroutine cldprmc_sw(nlayers, inflag, iceflag, liqflag, cldfmc, & ! Dimensions: (ngptsw,nlayers) real(kind=rb), intent(in) :: clwpmc(:,:) ! cloud liquid water path [mcica] ! Dimensions: (ngptsw,nlayers) + real(kind=rb), intent(in) :: cswpmc(:,:) ! cloud snow water path [mcica] + ! Dimensions: (ngptsw,nlayers) + real(kind=rb), intent(in) :: resnmc(:) ! cloud snow particle effective radius (microns) + ! Dimensions: (nlayers) real(kind=rb), intent(in) :: relqmc(:) ! cloud liquid particle effective radius (microns) ! Dimensions: (nlayers) real(kind=rb), intent(in) :: reicmc(:) ! cloud ice particle effective radius (microns) @@ -2018,21 +2037,27 @@ subroutine cldprmc_sw(nlayers, inflag, iceflag, liqflag, cldfmc, & real(kind=rb) :: cwp ! total cloud water path real(kind=rb) :: radliq ! cloud liquid droplet radius (microns) real(kind=rb) :: radice ! cloud ice effective size (microns) + real(kind=rb) :: radsno ! cloud snow effective size (microns) real(kind=rb) :: factor real(kind=rb) :: fint real(kind=rb) :: taucldorig_a, taucloud_a, ssacloud_a, ffp, ffp1, ffpssa real(kind=rb) :: tauiceorig, scatice, ssaice, tauice, tauliqorig, scatliq, ssaliq, tauliq + real(kind=rb) :: tausnoorig, scatsno, ssasno, tausno real(kind=rb) :: fdelta(ngptsw) real(kind=rb) :: extcoice(ngptsw), gice(ngptsw) real(kind=rb) :: ssacoice(ngptsw), forwice(ngptsw) real(kind=rb) :: extcoliq(ngptsw), gliq(ngptsw) real(kind=rb) :: ssacoliq(ngptsw), forwliq(ngptsw) + real(kind=rb) :: extcosno(ngptsw), gsno(ngptsw) + real(kind=rb) :: ssacosno(ngptsw), forwsno(ngptsw) + + CHARACTER*80 errmess ! Initialize - hvrclc = '$Revision: 1.3 $' +!jm not thread safe hvrclc = '$Revision: 1.3 $' ! Some of these initializations are done elsewhere do lay = 1, nlayers @@ -2049,7 +2074,7 @@ subroutine cldprmc_sw(nlayers, inflag, iceflag, liqflag, cldfmc, & ! Main g-point interval loop do ig = 1, ngptsw - cwp = ciwpmc(ig,lay) + clwpmc(ig,lay) + cwp = ciwpmc(ig,lay) + clwpmc(ig,lay) + cswpmc(ig,lay) if (cldfmc(ig,lay) .ge. cldmin .and. & (cwp .ge. cldmin .or. taucmc(ig,lay) .ge. cldmin)) then @@ -2074,16 +2099,21 @@ subroutine cldprmc_sw(nlayers, inflag, iceflag, liqflag, cldfmc, & stop 'INFLAG = 1 OPTION NOT AVAILABLE WITH MCICA' ! (inflag=2): Separate treatement of ice clouds and water clouds. - elseif (inflag .eq. 2) then + elseif (inflag .ge. 2) then radice = reicmc(lay) ! Calculation of absorption coefficients due to ice clouds. - if (ciwpmc(ig,lay) .eq. 0.0_rb) then + if ((ciwpmc(ig,lay)+cswpmc(ig,lay)) .eq. 0.0_rb) then extcoice(ig) = 0.0_rb ssacoice(ig) = 0.0_rb gice(ig) = 0.0_rb forwice(ig) = 0.0_rb + extcosno(ig) = 0.0_rb + ssacosno(ig) = 0.0_rb + gsno(ig) = 0.0_rb + forwsno(ig) = 0.0_rb + ! (iceflag = 1): ! Note: This option uses Ebert and Curry approach for all particle sizes similar to ! CAM3 implementation, though this is somewhat unjustified for large ice particles @@ -2138,8 +2168,13 @@ subroutine cldprmc_sw(nlayers, inflag, iceflag, liqflag, cldfmc, & ! For iceflag=3 option, ice particle generalized effective size is limited to 5.0 to 140.0 microns - elseif (iceflag .eq. 3) then - if (radice .lt. 5.0_rb .or. radice .gt. 140.0_rb) stop 'ICE GENERALIZED EFFECTIVE SIZE OUT OF BOUNDS' + elseif (iceflag .ge. 3) then + if (radice .lt. 5.0_rb .or. radice .gt. 140.0_rb) then + write(errmess,'(A,i5,i5,f8.2,f8.2)' ) & + 'ERROR: ICE GENERALIZED EFFECTIVE SIZE OUT OF BOUNDS' & + ,ig, lay, ciwpmc(ig,lay), radice + call wrf_error_fatal(errmess) + end if factor = (radice - 2._rb)/3._rb index = int(factor) if (index .eq. 46) index = 45 @@ -2153,8 +2188,14 @@ subroutine cldprmc_sw(nlayers, inflag, iceflag, liqflag, cldfmc, & (asyice3(index+1,ib) - asyice3(index,ib)) fdelta(ig) = fdlice3(index,ib) + fint * & (fdlice3(index+1,ib) - fdlice3(index,ib)) - if (fdelta(ig) .lt. 0.0_rb) stop 'FDELTA LESS THAN 0.0' - if (fdelta(ig) .gt. 1.0_rb) stop 'FDELTA GT THAN 1.0' + if (fdelta(ig) .lt. 0.0_rb) then + write(errmess, *) 'FDELTA LESS THAN 0.0' + call wrf_error_fatal(errmess) + end if + if (fdelta(ig) .gt. 1.0_rb) then + write(errmess, *) 'FDELTA GT THAN 1.0' + call wrf_error_fatal(errmess) + end if forwice(ig) = fdelta(ig) + 0.5_rb / ssacoice(ig) ! See Fu 1996 p. 2067 if (forwice(ig) .gt. gice(ig)) forwice(ig) = gice(ig) @@ -2167,6 +2208,75 @@ subroutine cldprmc_sw(nlayers, inflag, iceflag, liqflag, cldfmc, & endif +!!!!!!!!!!!!!!!!!! Mukul !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +!!!! INSERT THE EQUIVALENT SNOW VARIABLE CODE HERE +!!!! Although far from perfect, the snow will utilize the +!!!! same lookup table constants as cloud ice. Changes +!!!! to those constants for larger particle snow would be +!!!! an improvement. +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + if (cswpmc(ig,lay).gt.0.0_rb .and. iceflag .eq. 5) then + radsno = resnmc(lay) + if (radsno .lt. 5.0_rb .or. radsno .gt. 140.0_rb) then + write(errmess,'(A,i5,i5,f8.2,f8.2)' ) & + 'ERROR: SNOW GENERALIZED EFFECTIVE SIZE OUT OF BOUNDS' & + ,ig, lay, cswpmc(ig,lay), radsno + call wrf_error_fatal(errmess) + end if + factor = (radsno - 2._rb)/3._rb + index = int(factor) + if (index .eq. 46) index = 45 + fint = factor - float(index) + ib = ngb(ig) + extcosno(ig) = extice3(index,ib) + fint * & + (extice3(index+1,ib) - extice3(index,ib)) + ssacosno(ig) = ssaice3(index,ib) + fint * & + (ssaice3(index+1,ib) - ssaice3(index,ib)) + gsno(ig) = asyice3(index,ib) + fint * & + (asyice3(index+1,ib) - asyice3(index,ib)) + fdelta(ig) = fdlice3(index,ib) + fint * & + (fdlice3(index+1,ib) - fdlice3(index,ib)) + if (fdelta(ig) .lt. 0.0_rb) then + write(errmess, *) 'FDELTA LESS THAN 0.0' + call wrf_error_fatal(errmess) + end if + if (fdelta(ig) .gt. 1.0_rb) then + write(errmess, *) 'FDELTA GT THAN 1.0' + call wrf_error_fatal(errmess) + end if + forwsno(ig) = fdelta(ig) + 0.5_rb / ssacosno(ig) +! See Fu 1996 p. 2067 + if (forwsno(ig) .gt. gsno(ig)) forwsno(ig) = gsno(ig) +! Check to ensure all calculated quantities are within physical limits. + if (extcosno(ig) .lt. 0.0_rb) then + write(errmess, *) 'SNOW EXTINCTION LESS THAN 0.0' + call wrf_error_fatal(errmess) + end if + if (ssacosno(ig) .gt. 1.0_rb) then + write(errmess, *) 'SNOW SSA GRTR THAN 1.0' + call wrf_error_fatal(errmess) + end if + if (ssacosno(ig) .lt. 0.0_rb) then + write(errmess, *) 'SNOW SSA LESS THAN 0.0' + call wrf_error_fatal(errmess) + end if + if (gsno(ig) .gt. 1.0_rb) then + write(errmess, *) 'SNOW ASYM GRTR THAN 1.0' + call wrf_error_fatal(errmess) + end if + if (gsno(ig) .lt. 0.0_rb) then + write(errmess, *) 'SNOW ASYM LESS THAN 0.0' + call wrf_error_fatal(errmess) + end if + else + extcosno(ig) = 0.0_rb + ssacosno(ig) = 0.0_rb + gsno(ig) = 0.0_rb + forwsno(ig) = 0.0_rb + endif + + ! Calculation of absorption coefficients due to water clouds. if (clwpmc(ig,lay) .eq. 0.0_rb) then extcoliq(ig) = 0.0_rb @@ -2200,28 +2310,55 @@ subroutine cldprmc_sw(nlayers, inflag, iceflag, liqflag, cldfmc, & if (gliq(ig) .lt. 0.0_rb) stop 'LIQUID ASYM LESS THAN 0.0' endif - tauliqorig = clwpmc(ig,lay) * extcoliq(ig) - tauiceorig = ciwpmc(ig,lay) * extcoice(ig) - taormc(ig,lay) = tauliqorig + tauiceorig - - ssaliq = ssacoliq(ig) * (1._rb - forwliq(ig)) / & - (1._rb - forwliq(ig) * ssacoliq(ig)) - tauliq = (1._rb - forwliq(ig) * ssacoliq(ig)) * tauliqorig - ssaice = ssacoice(ig) * (1._rb - forwice(ig)) / & - (1._rb - forwice(ig) * ssacoice(ig)) - tauice = (1._rb - forwice(ig) * ssacoice(ig)) * tauiceorig - scatliq = ssaliq * tauliq - scatice = ssaice * tauice - taucmc(ig,lay) = tauliq + tauice + if (iceflag .lt. 5) then + tauliqorig = clwpmc(ig,lay) * extcoliq(ig) + tauiceorig = ciwpmc(ig,lay) * extcoice(ig) + taormc(ig,lay) = tauliqorig + tauiceorig + + ssaliq = ssacoliq(ig) * (1._rb - forwliq(ig)) / & + (1._rb - forwliq(ig) * ssacoliq(ig)) + tauliq = (1._rb - forwliq(ig) * ssacoliq(ig)) * tauliqorig + ssaice = ssacoice(ig) * (1._rb - forwice(ig)) / & + (1._rb - forwice(ig) * ssacoice(ig)) + tauice = (1._rb - forwice(ig) * ssacoice(ig)) * tauiceorig + scatliq = ssaliq * tauliq + scatice = ssaice * tauice + scatsno = 0.0_rb + taucmc(ig,lay) = tauliq + tauice + else + tauliqorig = clwpmc(ig,lay) * extcoliq(ig) + tauiceorig = ciwpmc(ig,lay) * extcoice(ig) + tausnoorig = cswpmc(ig,lay) * extcosno(ig) + taormc(ig,lay) = tauliqorig + tauiceorig + tausnoorig + + ssaliq = ssacoliq(ig) * (1._rb - forwliq(ig)) / & + (1._rb - forwliq(ig) * ssacoliq(ig)) + tauliq = (1._rb - forwliq(ig) * ssacoliq(ig)) * tauliqorig + ssaice = ssacoice(ig) * (1._rb - forwice(ig)) / & + (1._rb - forwice(ig) * ssacoice(ig)) + tauice = (1._rb - forwice(ig) * ssacoice(ig)) * tauiceorig + ssasno = ssacosno(ig) * (1._rb - forwsno(ig)) / & + (1._rb - forwsno(ig) * ssacosno(ig)) + tausno = (1._rb - forwsno(ig) * ssacosno(ig)) * tausnoorig + scatliq = ssaliq * tauliq + scatice = ssaice * tauice + scatsno = ssasno * tausno + taucmc(ig,lay) = tauliq + tauice + tausno + endif ! Ensure non-zero taucmc and scatice if(taucmc(ig,lay).eq.0.) taucmc(ig,lay) = cldmin if(scatice.eq.0.) scatice = cldmin + if(scatsno.eq.0.) scatsno = cldmin - ssacmc(ig,lay) = (scatliq + scatice) / taucmc(ig,lay) + if (iceflag .lt. 5) then + ssacmc(ig,lay) = (scatliq + scatice) / taucmc(ig,lay) + else + ssacmc(ig,lay) = (scatliq + scatice + scatsno) / taucmc(ig,lay) + endif - if (iceflag .eq. 3) then + if (iceflag .eq. 3 .or. iceflag.eq.4) then ! In accordance with the 1996 Fu paper, equation A.3, ! the moments for ice were calculated depending on whether using spheres ! or hexagonal ice crystals. @@ -2231,6 +2368,12 @@ subroutine cldprmc_sw(nlayers, inflag, iceflag, liqflag, cldfmc, & (scatliq*(gliq(ig)**istr - forwliq(ig)) / & (1.0_rb - forwliq(ig)) + scatice * ((gice(ig)-forwice(ig))/ & (1.0_rb - forwice(ig)))**istr) + elseif (iceflag .eq. 5) then + istr = 1 + asmcmc(ig,lay) = (1.0_rb/(scatliq+scatice+scatsno)) & + * (scatliq*(gliq(ig)**istr - forwliq(ig))/(1.0_rb - forwliq(ig)) & + + scatice * ((gice(ig)-forwice(ig))/(1.0_rb - forwice(ig))) & + + scatsno * ((gsno(ig)-forwsno(ig))/(1.0_rb - forwsno(ig)))**istr) else ! This code is the standard method for delta-m scaling. @@ -2382,7 +2525,7 @@ subroutine reftra_sw(nlayers, lrtchk, pgg, prmuz, ptau, pw, & ! Initialize - hvrrft = '$Revision: 1.3 $' +!jm not thread safe hvrrft = '$Revision: 1.3 $' zsr3=sqrt(3._rb) zwcrit=0.9999995_rb @@ -3131,7 +3274,7 @@ subroutine taumol_sw(nlayers, & ! real(kind=rb), intent(out) :: ssa(:,:) ! single scattering albedo (inactive) ! Dimensions: (nlayers,ngptsw) - hvrtau = '$Revision: 1.3 $' +!jm not thread safe hvrtau = '$Revision: 1.3 $' ! Calculate gaseous optical depth and planck fractions for each spectral band. @@ -4472,7 +4615,7 @@ subroutine rrtmg_sw_ini(cpdair) ! BPADE Inverse of the Pade approximation constant ! - hvrini = '$Revision: 1.3 $' +!jm not thread safe hvrini = '$Revision: 1.3 $' ! Initialize model data call swdatinit(cpdair) @@ -4570,7 +4713,7 @@ subroutine swdatinit(cpdair) use rrsw_con, only: heatfac, grav, planck, boltz, & clight, avogad, alosmt, gascon, radcn1, radcn2, & - sbcnst, secdy + sbcnst, secdy, oneminus, pi use rrsw_vsn save @@ -4616,6 +4759,11 @@ subroutine swdatinit(cpdair) ! (W cm-2 K-4) secdy = 8.6400e4_rb ! Number of seconds per day ! (s d-1) + +!jm 20141107 moved here for thread safety + oneminus = 1.0_rb - 1.e-06_rb ! zepsec + pi = 2._rb * asin(1._rb) + ! ! units are generally cgs ! @@ -8594,14 +8742,15 @@ subroutine rrtmg_sw & coszen ,adjes ,dyofyr ,scon , & inflgsw ,iceflgsw,liqflgsw,cldfmcl , & taucmcl ,ssacmcl ,asmcmcl ,fsfcmcl , & - ciwpmcl ,clwpmcl ,reicmcl ,relqmcl , & + ciwpmcl ,clwpmcl ,cswpmcl ,reicmcl ,relqmcl ,resnmcl, & tauaer ,ssaaer ,asmaer ,ecaer , & - swuflx ,swdflx ,swhr ,swuflxc ,swdflxc ,swhrc, & + swuflx ,swdflx ,swhr ,swuflxc ,swdflxc ,swhrc, aer_opt, & ! --------- Add the following four compenants for ssib shortwave down radiation ---! ! ------------------- by Zhenxin 2011-06-20 --------------------------------! - sibvisdir, sibvisdif, sibnirdir, sibnirdif & - ) + sibvisdir, sibvisdif, sibnirdir, sibnirdif, & ! ---------------------- End, Zhenxin 2011-06-20 --------------------------------! + swdkdir,swdkdif & ! jararias, 2013/08/10 + ) ! ------- Description ------- @@ -8768,6 +8917,8 @@ subroutine rrtmg_sw & ! Dimensions: (ngptsw,ncol,nlay) real(kind=rb), intent(in) :: clwpmcl(:,:,:) ! In-cloud liquid water path (g/m2) ! Dimensions: (ngptsw,ncol,nlay) + real(kind=rb), intent(in) :: cswpmcl(:,:,:) ! In-cloud snow water path (g/m2) + ! Dimensions: (ngptsw,ncol,nlay) real(kind=rb), intent(in) :: reicmcl(:,:) ! Cloud ice effective radius (microns) ! Dimensions: (ncol,nlay) ! specific definition of reicmcl depends on setting of iceflglw: @@ -8782,6 +8933,8 @@ subroutine rrtmg_sw & ! [dge = 1.0315 * r_ec] real(kind=rb), intent(in) :: relqmcl(:,:) ! Cloud water drop effective radius (microns) ! Dimensions: (ncol,nlay) + real(kind=rb), intent(in) :: resnmcl(:,:) ! Cloud snow effective radius (microns) + ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: tauaer(:,:,:) ! Aerosol optical depth (iaer=10 only) ! Dimensions: (ncol,nlay,nbndsw) ! (non-delta scaled) @@ -8818,6 +8971,15 @@ subroutine rrtmg_sw & real(kind=rb), intent(out) :: swhrc(:,:) ! Clear sky shortwave radiative heating rate (K/d) ! Dimensions: (ncol,nlay) + integer, intent(in) :: aer_opt + real(kind=rb), intent(out) :: & + swdkdir(:,:), & ! Total shortwave downward direct flux (W/m2), Dimensions: (ncol,nlay) jararias, 2013/08/10 + swdkdif(:,:) ! Total shortwave downward diffuse flux (W/m2), Dimensions: (ncol,nlay) jararias, 2013/08/10 + + + + + ! ----- Local ----- ! Control @@ -8915,8 +9077,10 @@ subroutine rrtmg_sw & real(kind=rb) :: cldfmc(ngptsw,nlay+1) ! cloud fraction [mcica] real(kind=rb) :: ciwpmc(ngptsw,nlay+1) ! in-cloud ice water path [mcica] real(kind=rb) :: clwpmc(ngptsw,nlay+1) ! in-cloud liquid water path [mcica] + real(kind=rb) :: cswpmc(ngptsw,nlay+1) ! in-cloud snow water path [mcica] real(kind=rb) :: relqmc(nlay+1) ! liquid particle effective radius (microns) real(kind=rb) :: reicmc(nlay+1) ! ice particle effective size (microns) + real(kind=rb) :: resnmc(nlay+1) ! snow particle effective size (microns) real(kind=rb) :: taucmc(ngptsw,nlay+1) ! in-cloud optical depth [mcica] real(kind=rb) :: taormc(ngptsw,nlay+1) ! unscaled in-cloud optical depth [mcica] real(kind=rb) :: ssacmc(ngptsw,nlay+1) ! in-cloud single scattering albedo [mcica] @@ -8986,8 +9150,8 @@ subroutine rrtmg_sw & iout = 0 !BSINGH(PNNL) initializing iout to zero(Might be wrong!) as this variable is never initialized but used in spcvmc_sw zepsec = 1.e-06_rb zepzen = 1.e-10_rb - oneminus = 1.0_rb - zepsec - pi = 2._rb * asin(1._rb) +!jm not thread safe oneminus = 1.0_rb - zepsec +!jm not thread safe pi = 2._rb * asin(1._rb) istart = jpb1 iend = jpb2 @@ -9017,7 +9181,11 @@ subroutine rrtmg_sw & ! input aerosol optical depth at 0.55 microns for each aerosol type (ecaer) ! iaer = 10, input total aerosol optical depth, single scattering albedo ! and asymmetry parameter (tauaer, ssaaer, asmaer) directly + if ( aer_opt.eq.0 .or. aer_opt.eq.2 .or. aer_opt.eq.3) then iaer = 10 + else if ( aer_opt .eq. 1 ) then + iaer = 6 + endif ! Call model and data initialization, compute lookup tables, perform ! reduction of g-points from 224 to 112 for input absorption @@ -9039,11 +9207,11 @@ subroutine rrtmg_sw & play, plev, tlay, tlev, tsfc, h2ovmr, & o3vmr, co2vmr, ch4vmr, n2ovmr, o2vmr, & adjes, dyofyr, scon, inflgsw, iceflgsw, liqflgsw, & - cldfmcl, taucmcl, ssacmcl, asmcmcl, fsfcmcl, ciwpmcl, clwpmcl, & - reicmcl, relqmcl, tauaer, ssaaer, asmaer, & + cldfmcl, taucmcl, ssacmcl, asmcmcl, fsfcmcl, ciwpmcl, clwpmcl, cswpmcl, & + reicmcl, relqmcl, resnmcl, tauaer, ssaaer, asmaer, & nlayers, pavel, pz, pdp, tavel, tz, tbound, coldry, wkl, & adjflux, solvar, inflag, iceflag, liqflag, cldfmc, taucmc, & - ssacmc, asmcmc, fsfcmc, ciwpmc, clwpmc, reicmc, relqmc, & + ssacmc, asmcmc, fsfcmc, ciwpmc, clwpmc, cswpmc, reicmc, relqmc, resnmc, & taua, ssaa, asma) ! For cloudy atmosphere, use cldprop to set cloud optical properties based on @@ -9053,7 +9221,7 @@ subroutine rrtmg_sw & ! optical properties are transferred to rrtmg_sw arrays in cldprop. call cldprmc_sw(nlayers, inflag, iceflag, liqflag, cldfmc, & - ciwpmc, clwpmc, reicmc, relqmc, & + ciwpmc, clwpmc, cswpmc, reicmc, relqmc, resnmc, & taormc, taucmc, ssacmc, asmcmc, fsfcmc) icpr = 1 @@ -9213,6 +9381,9 @@ subroutine rrtmg_sw & ! Direct/diffuse fluxes dirdflux(i) = zbbfddir(i) difdflux(i) = swdflx(iplon,i) - dirdflux(i) + swdkdir(iplon,i) = dirdflux(i) ! all-sky direct flux jararias, 2013/08/10 + swdkdif(iplon,i) = difdflux(i) ! all-sky diffuse flux jararias, 2013/08/10 + ! UV/visible direct/diffuse fluxes dirdnuv(i) = zuvfddir(i) difdnuv(i) = zuvfd(i) - dirdnuv(i) @@ -9281,12 +9452,12 @@ subroutine inatm_sw (iplon, nlay, icld, iaer, & play, plev, tlay, tlev, tsfc, h2ovmr, & o3vmr, co2vmr, ch4vmr, n2ovmr, o2vmr, & adjes, dyofyr, scon, inflgsw, iceflgsw, liqflgsw, & - cldfmcl, taucmcl, ssacmcl, asmcmcl, fsfcmcl, ciwpmcl, clwpmcl, & - reicmcl, relqmcl, tauaer, ssaaer, asmaer, & + cldfmcl, taucmcl, ssacmcl, asmcmcl, fsfcmcl, ciwpmcl, clwpmcl, cswpmcl, & + reicmcl, relqmcl, resnmcl, tauaer, ssaaer, asmaer, & nlayers, pavel, pz, pdp, tavel, tz, tbound, coldry, wkl, & adjflux, solvar, inflag, iceflag, liqflag, cldfmc, taucmc, & - ssacmc, asmcmc, fsfcmc, ciwpmc, clwpmc, reicmc, relqmc, & - taua, ssaa, asma) + ssacmc, asmcmc, fsfcmc, ciwpmc, clwpmc, cswpmc, reicmc, relqmc, resnmc, & + taua, ssaa, asma) !*************************************************************************** ! ! Input atmospheric profile from GCM, and prepare it for use in RRTMG_SW. @@ -9355,10 +9526,14 @@ subroutine inatm_sw (iplon, nlay, icld, iaer, & ! Dimensions: (ngptsw,ncol,nlay) real(kind=rb), intent(in) :: clwpmcl(:,:,:) ! In-cloud liquid water path (g/m2) ! Dimensions: (ngptsw,ncol,nlay) + real(kind=rb), intent(in) :: cswpmcl(:,:,:) ! In-cloud snow water path (g/m2) + ! Dimensions: (ngptsw,ncol,nlay) real(kind=rb), intent(in) :: reicmcl(:,:) ! Cloud ice effective size (microns) ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: relqmcl(:,:) ! Cloud water drop effective radius (microns) ! Dimensions: (ncol,nlay) + real(kind=rb), intent(in) :: resnmcl(:,:) ! Cloud snow effective radius (microns) + ! Dimensions: (ncol,nlay) real(kind=rb), intent(in) :: tauaer(:,:,:) ! Aerosol optical depth ! Dimensions: (ncol,nlay,nbndsw) @@ -9417,10 +9592,14 @@ subroutine inatm_sw (iplon, nlay, icld, iaer, & ! Dimensions: (ngptsw,nlay) real(kind=rb), intent(out) :: clwpmc(:,:) ! in-cloud liquid water path ! Dimensions: (ngptsw,nlay) + real(kind=rb), intent(out) :: cswpmc(:,:) ! in-cloud snow path + ! Dimensions: (ngptsw,nlay) real(kind=rb), intent(out) :: relqmc(:) ! liquid particle effective radius (microns) ! Dimensions: (nlay) real(kind=rb), intent(out) :: reicmc(:) ! ice particle effective size (microns) ! Dimensions: (nlay) + real(kind=rb), intent(out) :: resnmc(:) ! snow particle effective size (microns) + ! Dimensions: (nlay) ! ----- Local ----- real(kind=rb), parameter :: amd = 28.9660_rb ! Effective molecular weight of dry air (g/mol) @@ -9460,8 +9639,10 @@ subroutine inatm_sw (iplon, nlay, icld, iaer, & fsfcmc(:,:) = 0.0_rb ciwpmc(:,:) = 0.0_rb clwpmc(:,:) = 0.0_rb + cswpmc(:,:) = 0.0_rb reicmc(:) = 0.0_rb relqmc(:) = 0.0_rb + resnmc(:) = 0.0_rb taua(:,:) = 0.0_rb ssaa(:,:) = 1.0_rb asma(:,:) = 0.0_rb @@ -9595,9 +9776,15 @@ subroutine inatm_sw (iplon, nlay, icld, iaer, & fsfcmc(ig,l) = fsfcmcl(ig,iplon,l) ciwpmc(ig,l) = ciwpmcl(ig,iplon,l) clwpmc(ig,l) = clwpmcl(ig,iplon,l) + if (iceflag.eq.5) then + cswpmc(ig,l)=cswpmcl(ig,iplon,l) + endif enddo reicmc(l) = reicmcl(iplon,l) relqmc(l) = relqmcl(iplon,l) + if (iceflag.eq.5) then + resnmc(l) = resnmcl(iplon,l) + endif enddo ! If an extra layer is being used in RRTMG, set all cloud properties to zero in the extra layer. @@ -9623,7 +9810,7 @@ MODULE module_ra_rrtmg_sw #if defined(mpas) !MPAS specific (Laura D. Fowler): -use mpas_atmphys_constants,only: cp +use mpas_atmphys_constants,only: cp,g=>gravity use mpas_atmphys_o3climatology,only: vinterp_ozn !> add-ons and modifications to sourcecode: @@ -9632,11 +9819,29 @@ MODULE module_ra_rrtmg_sw !> of time-varying trace gases: added option to use the ozone climatology !> from the CAM radiation codes. !> Laura D. Fowler (birch.mmm.ucar.edu) / 2013-07-08. +!> * cleaned-up the subroutine rrtmg_swrad in preparation for the implementation of the calculation of the +!> cloud optical properties when the effective radii for cloud water, cloud ice, and snow are provided by +!> the cloud microphysics schemes (note that for now, only the Thompson cloud microphysics scheme has the +!> option to calculate cloud radii). With the -g option, results are exactly the same as the original +!> subroutine. +!> Laura D. Fowler (laura@ucar.edu) / 2016-07-05. +!> * updated module_ra_rrtmg_sw.F using module_ra_rrtmg_sw.F from WRF version 3.8, namely to update the +!> calculation of the cloud optical properties to include the radiative effect of snow. +!> Laura D. Fowler (laura@ucar.edu / 2016-07-05). +!> * added the effective radii for cloud water, cloud ice, and snow calculated in the Thompson cloud +!> microphysics scheme as inputs to the subroutine rrtmg_swrad. revised the initialization of arrays rel, +!> rei, and res, accordingly. +!> Laura D. Fowler (laura@ucar.edu) / 2016-07-07. !MPAS specfic end. #else -use module_model_constants, only : cp +use module_model_constants,only : cp USE module_wrf_error +#if (HWRF == 1) +USE module_state_description, ONLY : FER_MP_HIRES, FER_MP_HIRES_ADVECT, ETAMP_HWRF +#else +USE module_state_description, ONLY : FER_MP_HIRES, FER_MP_HIRES_ADVECT +#endif !USE module_dm #endif @@ -9645,1028 +9850,598 @@ MODULE module_ra_rrtmg_sw use rrtmg_sw_rad, only: rrtmg_sw use mcica_subcol_gen_sw, only: mcica_subcol_sw -use module_ra_rrtmg_lw, only : inirad, o3data, relcalc, reicalc +use module_ra_rrtmg_lw, only : inirad, o3data, relcalc, reicalc, retab ! mcica_random_numbers, randomNumberSequence, & ! new_RandomNumberSequence, getRandomReal CONTAINS -!------------------------------------------------------------------ - SUBROUTINE RRTMG_SWRAD( & - rthratensw, & - swupt, swuptc, swdnt, swdntc, & - swupb, swupbc, swdnb, swdnbc, & -! swupflx, swupflxc, swdnflx, swdnflxc, & - swcf, gsw, & - xtime, gmt, xlat, xlong, & - radt, degrad, declin, & - coszr, julday, solcon, & - albedo, t3d, t8w, tsk, & - p3d, p8w, pi3d, rho3d, & - dz8w, cldfra3d, r, g, & - icloud, warm_rain, & - f_ice_phy, f_rain_phy, & - xland, xice, snow, & - qv3d, qc3d, qr3d, & - qi3d, qs3d, qg3d, & - o3input, o33d, & - alswvisdir, alswvisdif, & !Zhenxin ssib alb comp (06/20/2011) - alswnirdir, alswnirdif, & !Zhenxin ssib alb comp (06/20/2011) - swvisdir, swvisdif, & !Zhenxin ssib swr comp (06/20/2011) - swnirdir, swnirdif, & !Zhenxin ssib swi comp (06/20/2011) - sf_surface_physics, & !Zhenxin - f_qv, f_qc, f_qr, f_qi, f_qs, f_qg, & - tauaer300,tauaer400,tauaer600,tauaer999, & ! czhao - gaer300,gaer400,gaer600,gaer999, & ! czhao - waer300,waer400,waer600,waer999, & ! czhao - aer_ra_feedback, & -!jdfcz progn,prescribe, & - progn, & - qndrop3d,f_qndrop, & !czhao - ids,ide, jds,jde, kds,kde, & - ims,ime, jms,jme, kms,kme, & - its,ite, jts,jte, kts,kte, & - swupflx, swupflxc, swdnflx, swdnflxc & -#if defined(mpas) - ,noznlevels,pin,o3clim & -#endif - ) -!------------------------------------------------------------------ - IMPLICIT NONE -!------------------------------------------------------------------ - LOGICAL, INTENT(IN ) :: warm_rain -! - INTEGER, INTENT(IN ) :: ids,ide, jds,jde, kds,kde, & - ims,ime, jms,jme, kms,kme, & - its,ite, jts,jte, kts,kte - - INTEGER, INTENT(IN ) :: ICLOUD -! - REAL, DIMENSION( ims:ime, kms:kme, jms:jme ) , & - INTENT(IN ) :: dz8w, & - t3d, & - t8w, & - p3d, & - p8w, & - pi3d, & - rho3d - - REAL, DIMENSION( ims:ime, kms:kme, jms:jme ) , & - INTENT(INOUT) :: RTHRATENSW - - REAL, DIMENSION( ims:ime, jms:jme ) , & - INTENT(INOUT) :: GSW, & - SWCF, & - COSZR - - INTEGER, INTENT(IN ) :: JULDAY - REAL, INTENT(IN ) :: RADT,DEGRAD, & - XTIME,DECLIN,SOLCON,GMT - - REAL, DIMENSION( ims:ime, jms:jme ) , & - INTENT(IN ) :: XLAT, & - XLONG, & - XLAND, & - XICE, & - SNOW, & - TSK, & - ALBEDO -! -!!! ------------------- Zhenxin (2011-06/20) ------------------ - REAL, DIMENSION( ims:ime, jms:jme ) , & - OPTIONAL , & - INTENT(IN) :: ALSWVISDIR, & ! ssib albedo of sw and lw - ALSWVISDIF, & - ALSWNIRDIR, & - ALSWNIRDIF - - REAL, DIMENSION( ims:ime, jms:jme ) , & - OPTIONAL , & - INTENT(OUT) :: SWVISDIR, & - SWVISDIF, & - SWNIRDIR, & - SWNIRDIF ! ssib sw dir and diff rad - INTEGER, INTENT(IN) :: sf_surface_physics ! ssib para - -! ----------------------- end Zhenxin -------------------------- -! - REAL, INTENT(IN ) :: R,G -! -! Optional -! - REAL, DIMENSION( ims:ime, kms:kme, jms:jme ) , & - OPTIONAL , & - INTENT(IN ) :: & - CLDFRA3D, & - QV3D, & - QC3D, & - QR3D, & - QI3D, & - QS3D, & - QG3D, & - QNDROP3D - - real pi,third,relconst,lwpmin,rhoh2o - - REAL, DIMENSION( ims:ime, kms:kme, jms:jme ) , & - OPTIONAL , & - INTENT(IN ) :: & - F_ICE_PHY, & - F_RAIN_PHY - - LOGICAL, OPTIONAL, INTENT(IN) :: & - F_QV,F_QC,F_QR,F_QI,F_QS,F_QG,F_QNDROP - -! Optional - REAL, DIMENSION( ims:ime, kms:kme, jms:jme ), OPTIONAL , & - INTENT(IN ) :: tauaer300,tauaer400,tauaer600,tauaer999, & ! czhao - gaer300,gaer400,gaer600,gaer999, & ! czhao - waer300,waer400,waer600,waer999 ! czhao - - INTEGER, INTENT(IN ), OPTIONAL :: aer_ra_feedback -!jdfcz INTEGER, INTENT(IN ), OPTIONAL :: progn,prescribe - INTEGER, INTENT(IN ), OPTIONAL :: progn -! Ozone - REAL, DIMENSION( ims:ime, kms:kme, jms:jme ) , & - OPTIONAL , & - INTENT(IN ) :: O33D - INTEGER, OPTIONAL, INTENT(IN ) :: o3input - -#if defined(mpas) -!MPAS specific (Laura D. Fowler. 2013-05-14): Added the CAM ozone climatology. -!input arguments: +!================================================================================================================= + subroutine rrtmg_swrad( & + p3d,p8w,pi3d,t3d,t8w,dz8w,qv3d,qc3d,qr3d, & + qi3d,qs3d,qg3d,cldfra3d,o33d,tsk,albedo, & + xland,xice,snow,coszr,xtime,gmt,julday,radt, & + degrad,declin,solcon,xlat,xlong,icloud,o3input, & + noznlevels,pin,o3clim,gsw,swcf,rthratensw, & + has_reqc,has_reqi,has_reqs,re_cloud, & + re_ice,re_snow, & + swupt, swuptc, swdnt, swdntc, & + swupb, swupbc, swdnb, swdnbc, & + swupflx, swupflxc, swdnflx, swdnflxc, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + its,ite, jts,jte, kts,kte & + ) + + implicit none + +!================================================================================================================= + +!--- input arguments: + integer,intent(in):: ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + its,ite, jts,jte, kts,kte + integer,intent(in):: icloud,has_reqc,has_reqi,has_reqs + integer,intent(in):: julday + integer,intent(in),optional:: o3input + + real,intent(in):: radt,degrad,xtime,declin,solcon,gmt + real,intent(in),dimension(ims:ime,jms:jme):: xlat,xlong + real,intent(in),dimension(ims:ime,jms:jme):: albedo,tsk,snow,xice,xland + real,intent(in),dimension(ims:ime,kms:kme,jms:jme):: t3d,p3d,pi3d + real,intent(in),dimension(ims:ime,kms:kme,jms:jme):: dz8w,p8w,t8w + + real,intent(in),dimension(ims:ime,kms:kme,jms:jme):: re_cloud,re_ice,re_snow + real,intent(in),dimension(ims:ime,kms:kme,jms:jme),optional:: & + cldfra3d,qv3d,qc3d,qr3d,qi3d,qs3d,qg3d,o33d + +!--- additional input arguments to use the CAM ozone climatology: integer,intent(in):: noznlevels real,intent(in),dimension(1:noznlevels),optional:: pin real,intent(in),dimension(ims:ime,1:noznlevels,jms:jme),optional:: o3clim -!local variables: - real,dimension(1:noznlevels):: o3clim1d -#endif - !wavelength corresponding to wavenum1 and wavenum2 (cm-1) - real, save :: wavemin(nbndsw) ! Min wavelength (um) of 14 intervals - data wavemin /3.077,2.500,2.150,1.942,1.626,1.299, & - 1.242,0.778,0.625,0.442,0.345,0.263,0.200,3.846/ - real, save :: wavemax(nbndsw) ! Max wavelength (um) of interval - data wavemax/3.846,3.077,2.500,2.150,1.942,1.626, & - 1.299,1.242,0.778,0.625,0.442,0.345,0.263,12.195/ - real wavemid(nbndsw) ! Mid wavelength (um) of interval - real, parameter :: thresh=1.e-9 - real ang,slope - character(len=200) :: msg - -! Top of atmosphere and surface shortwave fluxes (W m-2) - REAL, DIMENSION( ims:ime, jms:jme ), & - OPTIONAL, INTENT(INOUT) :: & - SWUPT,SWUPTC,SWDNT,SWDNTC, & - SWUPB,SWUPBC,SWDNB,SWDNBC - -! Layer shortwave fluxes (including extra layer above model top) -! Vertical ordering is from bottom to top (W m-2) - REAL, DIMENSION( ims:ime, kms:kme+2, jms:jme ), & - OPTIONAL, INTENT(OUT) :: & - SWUPFLX,SWUPFLXC,SWDNFLX,SWDNFLXC - -! LOCAL VARS - - REAL, DIMENSION( kts:kte+1 ) :: Pw1D, & - Tw1D - - REAL, DIMENSION( kts:kte ) :: TTEN1D, & - CLDFRA1D, & - DZ1D, & - P1D, & - T1D, & - QV1D, & - QC1D, & - QR1D, & - QI1D, & - QS1D, & - QG1D, & - O31D, & - qndrop1d - -! Added local arrays for RRTMG - integer :: ncol, & - nlay, & - icld, & - inflgsw, & - iceflgsw, & - liqflgsw -! Dimension with extra layer from model top to TOA - real, dimension( 1, kts:kte+2 ) :: plev, & - tlev - real, dimension( 1, kts:kte+1 ) :: play, & - tlay, & - h2ovmr, & - o3vmr, & - co2vmr, & - o2vmr, & - ch4vmr, & - n2ovmr - real, dimension( kts:kte+1 ) :: o3mmr -! Surface albedo (for UV/visible and near-IR spectral regions, -! and for direct and diffuse radiation) - real, dimension( 1 ) :: asdir, & - asdif, & - aldir, & - aldif -! Dimension with extra layer from model top to TOA, -! though no clouds are allowed in extra layer - real, dimension( 1, kts:kte+1 ) :: clwpth, & - ciwpth, & - rel, & - rei, & - cldfrac, & - relqmcl, & - reicmcl - real, dimension( nbndsw, 1, kts:kte+1 ) :: taucld, & - ssacld, & - asmcld, & - fsfcld - real, dimension( ngptsw, 1, kts:kte+1 ) :: cldfmcl, & - clwpmcl, & - ciwpmcl, & - taucmcl, & - ssacmcl, & - asmcmcl, & - fsfcmcl - real, dimension( 1, kts:kte+1, nbndsw ) :: tauaer, & - ssaaer, & - asmaer - real, dimension( 1, kts:kte+1, naerec ) :: ecaer - -! Output arrays contain extra layer from model top to TOA - real, dimension( 1, kts:kte+2 ) :: swuflx, & - swdflx, & - swuflxc, & - swdflxc, & - sibvisdir, & ! Zhenxin 2011-06-20 - sibvisdif, & - sibnirdir, & - sibnirdif ! Zhenxin 2011-06-20 - real, dimension( 1, kts:kte+1 ) :: swhr, & - swhrc - - real, dimension ( 1 ) :: tsfc, & - ps, & - coszen - real :: ro, & - dz, & - adjes, & - scon - integer :: dyofyr - -! Set trace gas volume mixing ratios, 2005 values, IPCC (2007) -! carbon dioxide (379 ppmv) - real :: co2 - data co2 / 379.e-6 / -! methane (1774 ppbv) - real :: ch4 - data ch4 / 1774.e-9 / -! nitrous oxide (319 ppbv) - real :: n2o - data n2o / 319.e-9 / -! Set oxygen volume mixing ratio (for o2mmr=0.23143) - real :: o2 - data o2 / 0.209488 / - - integer :: iplon, irng, permuteseed - integer :: nb - -! For old lw cloud property specification -! Cloud and precipitation absorption coefficients -! real :: abcw,abice,abrn,absn -! data abcw /0.144/ -! data abice /0.0735/ -! data abrn /0.330e-3/ -! data absn /2.34e-3/ - -! Molecular weights and ratios for converting mmr to vmr units -! real :: amd ! Effective molecular weight of dry air (g/mol) -! real :: amw ! Molecular weight of water vapor (g/mol) -! real :: amo ! Molecular weight of ozone (g/mol) -! real :: amo2 ! Molecular weight of oxygen (g/mol) +!--- inout arguments: + real,intent(inout),dimension(ims:ime,jms:jme):: coszr,gsw,swcf + real,intent(inout),dimension(ims:ime,jms:jme),optional:: & + swupt,swuptc,swdnt,swdntc,swupb,swupbc,swdnb,swdnbc + + real,intent(inout),dimension(ims:ime,kms:kme,jms:jme):: rthratensw + +!--- output arguments: + real,intent(out),dimension(ims:ime,kms:kme+2,jms:jme ),optional:: & + swupflx,swupflxc,swdnflx,swdnflxc + +!local variables and arrays: + logical:: dorrsw + + integer:: na,nb,ncol,nlay,icld,inflgsw,iceflgsw,liqflgsw + integer:: dyofyr + integer:: iplon,irng,permuteseed + integer:: pcols,pver + integer:: idx_rei + integer:: i,j,k,n + + real:: coszrs,xt24,tloctm,hrang,xxlat,adjes,scon + real:: ro,dz + real:: corr + real:: gliqwp,gicewp,gsnowp,gravmks + real:: snow_mass_factor + real,dimension(1):: tsfc,landfrac,landm,snowh,icefrac + real,dimension(1):: asdir,asdif,aldir,aldif,coszen + real,dimension(1,1:kte-kts+1):: pdel,cicewp,cliqwp,csnowp,reliq,reice,resnow + real,dimension(kts:kte):: & + tten1d,cldfra1d,dz1d,p1d,t1d,qv1d,qc1d,qr1d,qi1d,qs1d,qg1d,o31d + real,dimension(kts:kte+1):: pw1d,tw1d + real,dimension(kts:kte+1):: o3mmr + +!--- additional local variables and arrays needed to include additional layers between the model top +! and the top of the atmosphere: + real,dimension(1,kts:kte+1):: play,tlay,h2ovmr,o3vmr,co2vmr,o2vmr,ch4vmr,n2ovmr + real,dimension(1,kts:kte+1):: clwpth,ciwpth,cswpth,rel,rei,res,cldfrac,relqmcl,reicmcl,resnmcl + real,dimension(1,kts:kte+1):: swhr,swhrc + + real,dimension(1,kts:kte+2):: plev,tlev + real,dimension(1,kts:kte+2):: swuflx,swdflx,swuflxc,swdflxc + real,dimension(1,kts:kte+2):: sibvisdir,sibvisdif,sibnirdir,sibnirdif + real,dimension(1,kts:kte+2):: swdkdir,swdkdif + + real,dimension(1,kts:kte+1,nbndsw):: tauaer,ssaaer,asmaer + + real,dimension(nbndsw,1,kts:kte+1):: taucld,ssacld,asmcld,fsfcld + real,dimension(ngptsw,1,kts:kte+1):: cldfmcl,clwpmcl,ciwpmcl,cswpmcl,taucmcl,ssacmcl,asmcmcl,fsfcmcl + +!--- additional local variables and arrays needed for the CAM ozone climatologyL + real,dimension(1:noznlevels):: o3clim1d + +!--- additional local variables related to the implementation of aerosols in rrtmg_swrad in WRF 3.8. +! In WRF 3.8, these variables are in the argument list of subroutine rrtmg_swrad, but are made +! local here: + integer:: aer_opt + real,dimension(1,kts:kte+1,naerec):: ecaer + +!--- set trace gas volume mixing ratios, 2005 values, IPCC (2007): +!carbon dioxide (379 ppmv) + real :: co2 + data co2 / 379.e-6 / +!methane (1774 ppbv) + real :: ch4 + data ch4 / 1774.e-9 / +!nitrous oxide (319 ppbv) + real :: n2o + data n2o / 319.e-9 / + +!--- set oxygen volume mixing ratio (for o2mmr=0.23143): + real :: o2 + data o2 / 0.209488 / + +!--- molecular weights and ratios for converting mmr to vmr units +! real :: amd ! Effective molecular weight of dry air (g/mol) +! real :: amw ! Molecular weight of water vapor (g/mol) +! real :: amo ! Molecular weight of ozone (g/mol) +! real :: amo2 ! Molecular weight of oxygen (g/mol) ! Atomic weights for conversion from mass to volume mixing ratios -! data amd / 28.9660 / -! data amw / 18.0160 / -! data amo / 47.9998 / -! data amo2 / 31.9999 / +! data amd / 28.9660 / +! data amw / 18.0160 / +! data amo / 47.9998 / +! data amo2 / 31.9999 / - real :: amdw ! Molecular weight of dry air / water vapor - real :: amdo ! Molecular weight of dry air / ozone - real :: amdo2 ! Molecular weight of dry air / oxygen - data amdw / 1.607793 / - data amdo / 0.603461 / - data amdo2 / 0.905190 / - -!! - real, dimension(1, 1:kte-kts+1 ) :: pdel ! Layer pressure thickness (mb) - - real, dimension(1, 1:kte-kts+1) :: cicewp, & ! in-cloud cloud ice water path - cliqwp, & ! in-cloud cloud liquid water path - reliq, & ! effective drop radius (microns) - reice ! ice effective drop size (microns) - real :: gliqwp, gicewp, gravmks + real :: amdw ! Molecular weight of dry air / water vapor + real :: amdo ! Molecular weight of dry air / ozone + real :: amdo2 ! Molecular weight of dry air / oxygen + data amdw / 1.607793 / + data amdo / 0.603461 / + data amdo2 / 0.905190 / + +!----------------------------------------------------------------------------------------------------------------- + +!--- all fields are ordered vertically from bottom to top (pressures are in mb): + ncol = 1 + +!--- initialize option for the calculation of the cloud optical properties: + icld = 2 ! with clouds using maximum/random cloud overlap in subroutine mcica_subcol_lw. + inflgsw = 2 + iceflgsw = 3 + liqflgsw = 1 + +!--- latitude loop: + j_loop: do j = jts,jte + +!--- longitude loop: + i_loop: do i = its,ite + + !--- calculate the cosine of the solar zenith angle at the current time step to determine if the sun is + ! above or below the horizon (xt24 is the fractional part of simulation days plus half of radt in + ! units of minutes, julian is in days, and radt is in minutes). do not call rrtmg_sw is night-time: + dorrsw = .true. + xt24 = mod(xtime+radt*0.5,1440.) + tloctm = gmt + xt24/60. + xlong(i,j)/15. + hrang = 15. * (tloctm-12.) * degrad + xxlat = xlat(i,j) * degrad + coszrs = sin(xxlat) * sin(declin) + cos(xxlat) * cos(declin) * cos(hrang) + coszr(i,j) = coszrs + + if (coszrs.le.0.0) dorrsw = .false. + + if(dorrsw) then -! -! REAL :: TSFC,GLW0,OLR0,EMISS0,FP - REAL :: FP + !--- INITIALIZE COLUMN SOUNDING (the call to the short wave radiation code is done one column + ! at a time): + do k = kts, kte+1 + pw1d(k) = p8w(i,k,j) / 100. + tw1d(k) = t8w(i,k,j) + enddo -! real, dimension(1:ite-its+1 ) :: clat ! latitude in radians for columns - real :: coszrs ! Cosine of solar zenith angle for present latitude - logical :: dorrsw ! Flag to allow shortwave calculation + do k = kts, kte + p1d(k) = p3d(i,k,j) / 100. + dz1d(k) = dz8w(i,k,j) + t1d(k) = t3d(i,k,j) + qv1d(k) = amax1(qv3d(i,k,j),1.e-12) + o31d(k) = 0. + qc1d(k) = 0. + qr1d(k) = 0. + qi1d(k) = 0. + qs1d(k) = 0. + qg1d(k) = 0. + cldfra1d(k) = 0. + if(present(o33d)) o31d(k) = o33d(i,k,j) + enddo - real, dimension (1) :: landfrac, landm, snowh, icefrac + !--- initialize the local arrays containing the different cloud water and ice condenstates: + if(icloud .gt. 0) then + do k = kts,kte + if(present(qc3d)) qc1d(k) = amax1(qc3d(i,k,j),0.) + if(present(qr3d)) qr1d(k) = amax1(qr3d(i,k,j),0.) + if(present(qi3d)) qi1d(k) = amax1(qi3d(i,k,j),0.) + if(present(qs3d)) qs1d(k) = amax1(qs3d(i,k,j),0.) + if((present(qc3d) .or. present(qi3d) .or. present(qs3d)) .and. present(cldfra3d)) & + cldfra1d(k) = cldfra3d(i,k,j) + enddo + endif - integer :: pcols, pver + !--- initialize the local radiative heating rate: + do k = kts, kte + tten1d(k) = 0. + enddo - REAL :: XT24, TLOCTM, HRANG, XXLAT + !--- add extra layer to include absorption between the top of the model and the top of the atmosphere: + nlay = (kte-kts+1) + 1 + + !--- initialize local arrays called in the calculation of the cloud optical properties and radiative + ! fluxes: + do n = 1, ncol + do k = kts, kte + reliq(ncol,k) = 10. + reice(ncol,k) = 10. + resnow(ncol,k) = 10. + cliqwp(ncol,k) = 0. + cicewp(ncol,k) = 0. + csnowp(ncol,k) = 0. + enddo - INTEGER :: i,j,K, na - LOGICAL :: predicate + do k = 1, nlay + clwpth(n,k) = 0. + ciwpth(n,k) = 0. + cswpth(n,k) = 0. + rel(n,k) = 0. + rei(n,k) = 0. + res(n,k) = 0. + cldfrac(n,k) = 0. + relqmcl(n,k) = 0. + reicmcl(n,k) = 0. + resnmcl(n,k) = 0. + swuflx(n,k) = 0. + swuflxc(n,k) = 0. + swdflx(n,k) = 0. + swdflxc(n,k) = 0. + swhr(n,k) = 0. + swhrc(n,k) = 0. + taucld(1:nbndsw,n,k) = 0. + tauaer(n,k,1:nbndsw) = 0. + ssaaer(n,k,1:nbndsw) = 0. + asmaer(n,k,1:nbndsw) = 0. + cldfmcl(1:ngptsw,n,k) = 0. + clwpmcl(1:ngptsw,n,k) = 0. + ciwpmcl(1:ngptsw,n,k) = 0. + cswpmcl(1:ngptsw,n,k) = 0. + taucmcl(1:ngptsw,n,k) = 0. + enddo + do k = 1, nlay + 1 + sibvisdir(ncol,k) = 0. + sibvisdif(ncol,k) = 0. + sibnirdir(ncol,k) = 0. + sibnirdif(ncol,k) = 0. + swdkdir(n,k) = 0. + swdkdif(n,k) = 0. + enddo + swuflx(n,nlay+1) = 0. + swuflxc(n,nlay+1) = 0. + swdflx(n,nlay+1) = 0. + swdflxc(n,nlay+1) = 0. + enddo -!------------------------------------------------------------------ -#ifdef WRF_CHEM - IF ( aer_ra_feedback == 1) then - IF ( .NOT. & - ( PRESENT(tauaer300) .AND. & - PRESENT(tauaer400) .AND. & - PRESENT(tauaer600) .AND. & - PRESENT(tauaer999) .AND. & - PRESENT(gaer300) .AND. & - PRESENT(gaer400) .AND. & - PRESENT(gaer600) .AND. & - PRESENT(gaer999) .AND. & - PRESENT(waer300) .AND. & - PRESENT(waer400) .AND. & - PRESENT(waer600) .AND. & - PRESENT(waer999) ) ) THEN - CALL wrf_error_fatal & - ('Warning: missing fields required for aerosol radiation' ) - ENDIF - ENDIF -#endif + !--- initialization of aerosol optical properties: + aer_opt = 0 + do n = 1, ncol + do k = 1, nlay + do na = 1, naerec + ecaer(n,k,na) = 0. + enddo + enddo + enddo -!-----CALCULATE SHORT WAVE RADIATION -! -! All fields are ordered vertically from bottom to top -! Pressures are in mb + !--- fill local arrays with input sounding. convert water vapor mass mixing ratio to + ! volume mixing ratio: + plev(ncol,1) = pw1d(1) + tlev(ncol,1) = tw1d(1) + tsfc(ncol) = tsk(i,j) + do k = kts, kte + plev(ncol,k+1) = pw1d(k+1) + tlev(ncol,k+1) = tw1d(k+1) + play(ncol,k) = p1d(k) + pdel(ncol,k) = plev(ncol,k) - plev(ncol,k+1) + tlay(ncol,k) = t1d(k) + h2ovmr(ncol,k) = qv1d(k) * amdw + co2vmr(ncol,k) = co2 + o2vmr(ncol,k) = o2 + ch4vmr(ncol,k) = ch4 + n2ovmr(ncol,k) = n2o + enddo + plev(ncol,kte+2) = 1.0e-5 + tlev(ncol,kte+2) = tlev(ncol,kte+1) + 0.0 + play(ncol,kte+1) = 0.5 * plev(ncol,kte+1) + tlay(ncol,kte+1) = tlev(ncol,kte+1) + 0.0 + h2ovmr(ncol,kte+1) = h2ovmr(ncol,kte) + co2vmr(ncol,kte+1) = co2vmr(ncol,kte) + o2vmr(ncol,kte+1) = o2vmr(ncol,kte) + ch4vmr(ncol,kte+1) = ch4vmr(ncol,kte) + n2ovmr(ncol,kte+1) = n2ovmr(ncol,kte) + + !--- initialize the ozone voume mixing ratio: + call inirad(o3mmr,plev,kts,kte) + if(o3input .eq. 2) then + do k = 1, noznlevels + o3clim1d(k) = o3clim(i,k,j) + enddo + call vinterp_ozn(1,ncol,ncol,kte+1,play,pin,noznlevels,o3clim1d,o3mmr) + do k = kts,kte+1 + o3vmr(ncol,k) = o3mmr(k) + enddo + else + do k = kts,kte+1 + o3vmr(ncol,k) = o3mmr(k)*amdo + enddo + endif -! latitude loop - j_loop: do j = jts,jte + !--- initialize the surface albedo: + asdir(ncol) = albedo(i,j) + asdif(ncol) = albedo(i,j) + aldir(ncol) = albedo(i,j) + aldif(ncol) = albedo(i,j) -! longitude loop - i_loop: do i = its,ite + !--- set solar constant: + scon = solcon + !--- set cosine of solar zenith angle: + coszen(ncol) = coszrs + !--- in MPAS, the solar constant is already provided with eccentricity adjustment, so do not do + ! this here: + dyofyr = 0 + adjes = 1.0 -! -! Do shortwave by default, deactivate below if sun below horizon - dorrsw = .true. -! Cosine solar zenith angle for current time step -! -! xt24 is the fractional part of simulation days plus half of radt expressed in -! units of minutes -! julian is in days -! radt is in minutes - xt24 = mod(xtime+radt*0.5,1440.) - tloctm = gmt + xt24/60. + xlong(i,j)/15. - hrang = 15. * (tloctm-12.) * degrad - xxlat = xlat(i,j) * degrad -! clat(i) = xxlat - coszrs = sin(xxlat) * sin(declin) + cos(xxlat) * cos(declin) * cos(hrang) - coszr(i,j) = coszrs -! Set flag to prevent shortwave calculation when sun below horizon - if (coszrs.le.0.0) dorrsw = .false. -! Perform shortwave calculation if sun above horizon - if (dorrsw) then + !--- CALCULATE CLOUD OPTICAL PROPERTIES: + if(inflgsw .gt. 0) then - do k=kts,kte+1 - Pw1D(K) = p8w(I,K,J)/100. - Tw1D(K) = t8w(I,K,J) - enddo + do k = kts, kte + cldfrac(ncol,k) = cldfra1d(k) + enddo - DO K=kts,kte - QV1D(K)=0. - QC1D(K)=0. - QR1D(K)=0. - QI1D(K)=0. - QS1D(K)=0. - CLDFRA1D(k)=0. - QNDROP1D(k)=0. - ENDDO - - DO K=kts,kte - QV1D(K)=QV3D(I,K,J) - QV1D(K)=max(0.,QV1D(K)) - IF ( PRESENT( O33D ) ) THEN - O31D(K)=O33D(I,K,J) - ENDIF - ENDDO - - DO K=kts,kte - TTEN1D(K)=0. - T1D(K)=t3d(I,K,J) - P1D(K)=p3d(I,K,J)/100. - DZ1D(K)=dz8w(I,K,J) - ENDDO - -! moist variables - - IF (ICLOUD .ne. 0) THEN - IF ( PRESENT( CLDFRA3D ) ) THEN - DO K=kts,kte - CLDFRA1D(k)=CLDFRA3D(I,K,J) - ENDDO - ENDIF - - IF (PRESENT(F_QC) .AND. PRESENT(QC3D)) THEN - IF ( F_QC) THEN - DO K=kts,kte - QC1D(K)=QC3D(I,K,J) - QC1D(K)=max(0.,QC1D(K)) - ENDDO - ENDIF - ENDIF - - IF (PRESENT(F_QR) .AND. PRESENT(QR3D)) THEN - IF ( F_QR) THEN - DO K=kts,kte - QR1D(K)=QR3D(I,K,J) - QR1D(K)=max(0.,QR1D(K)) - ENDDO - ENDIF - ENDIF - - IF ( PRESENT(F_QNDROP).AND.PRESENT(QNDROP3D)) THEN - IF (F_QNDROP) THEN - DO K=kts,kte - qndrop1d(K)=qndrop3d(I,K,J) - ENDDO - ENDIF - ENDIF - -! This logic is tortured because cannot test F_QI unless -! it is present, and order of evaluation of expressions -! is not specified in Fortran - - IF ( PRESENT ( F_QI ) ) THEN - predicate = F_QI - ELSE - predicate = .FALSE. - ENDIF - -! For MP option 3 - IF (.NOT. predicate .and. .not. warm_rain) THEN - DO K=kts,kte - IF (T1D(K) .lt. 273.15) THEN - QI1D(K)=QC1D(K) - QS1D(K)=QR1D(K) - QC1D(K)=0. - QR1D(K)=0. - ENDIF - ENDDO - ENDIF - - IF (PRESENT(F_QI) .AND. PRESENT(QI3D)) THEN - IF (F_QI) THEN - DO K=kts,kte - QI1D(K)=QI3D(I,K,J) - QI1D(K)=max(0.,QI1D(K)) - ENDDO - ENDIF - ENDIF - - IF (PRESENT(F_QS) .AND. PRESENT(QS3D)) THEN - IF (F_QS) THEN - DO K=kts,kte - QS1D(K)=QS3D(I,K,J) - QS1D(K)=max(0.,QS1D(K)) - ENDDO - ENDIF - ENDIF - - IF (PRESENT(F_QG) .AND. PRESENT(QG3D)) THEN - IF (F_QG) THEN - DO K=kts,kte - QG1D(K)=QG3D(I,K,J) - QG1D(K)=max(0.,QG1D(K)) - ENDDO - ENDIF - ENDIF - -! mji - For MP option 5 - IF ( PRESENT(F_QI) .and. PRESENT(F_QC) .and. PRESENT(F_QS) .and. PRESENT(F_ICE_PHY) ) THEN - IF ( F_QC .and. .not. F_QI .and. F_QS ) THEN - DO K=kts,kte - qi1d(k) = qs3d(i,k,j) - qc1d(k) = qc3d(i,k,j) - qi1d(k) = max(0.,qi1d(k)) - qc1d(k) = max(0.,qc1d(k)) - ENDDO - ENDIF - ENDIF - - ENDIF - -! EMISS0=EMISS(I,J) -! GLW0=0. -! OLR0=0. -! TSFC=TSK(I,J) - DO K=kts,kte - QV1D(K)=AMAX1(QV1D(K),1.E-12) - ENDDO - -! Set up input for shortwave - ncol = 1 -! Add extra layer from top of model to top of atmosphere - nlay = (kte - kts + 1) + 1 - -! Select cloud liquid and ice optics parameterization options -! For passing in cloud optical properties directly: -! icld = 2 -! inflgsw = 0 -! iceflgsw = 0 -! liqflgsw = 0 -! For passing in cloud physical properties; cloud optics parameterized in RRTMG: - icld = 2 - inflgsw = 2 - iceflgsw = 3 - liqflgsw = 1 - -! Set cosine of solar zenith angle - coszen(ncol) = coszrs -! Set solar constant - scon = solcon -! For Earth/Sun distance adjustment in RRTMG -! dyofyr = julday -! adjes = 0.0 -! For WRF, solar constant is already provided with eccentricity adjustment, -! so do not do this in RRTMG - dyofyr = 0 - adjes = 1.0 - -! Layer indexing goes bottom to top here for all fields. -! Water vapor and ozone are converted from mmr to vmr. -! Pressures are in units of mb here. - plev(ncol,1) = pw1d(1) - tlev(ncol,1) = tw1d(1) - tsfc(ncol) = tsk(i,j) - do k = kts, kte - play(ncol,k) = p1d(k) - plev(ncol,k+1) = pw1d(k+1) - pdel(ncol,k) = plev(ncol,k) - plev(ncol,k+1) - tlay(ncol,k) = t1d(k) - tlev(ncol,k+1) = tw1d(k+1) - h2ovmr(ncol,k) = qv1d(k) * amdw - co2vmr(ncol,k) = co2 - o2vmr(ncol,k) = o2 - ch4vmr(ncol,k) = ch4 - n2ovmr(ncol,k) = n2o - enddo + !--- zero out cloud optical properties here (not used when passing physical properties to rrtmg_sw): + do k = kts, kte + do nb = 1, nbndsw + taucld(nb,ncol,k) = 0.0 + ssacld(nb,ncol,k) = 1.0 + asmcld(nb,ncol,k) = 0.0 + fsfcld(nb,ncol,k) = 0.0 + enddo + enddo -! Define profile values for extra layer from model top to top of atmosphere. -! The top layer temperature for all gridpoints is set to the top layer-1 -! temperature plus a constant (0 K) that represents an isothermal layer -! above ptop. Top layer interface temperatures are linearly interpolated -! from the layer temperatures. - - play(ncol,kte+1) = 0.5 * plev(ncol,kte+1) - tlay(ncol,kte+1) = tlev(ncol,kte+1) + 0.0 - plev(ncol,kte+2) = 1.0e-5 - tlev(ncol,kte+2) = tlev(ncol,kte+1) + 0.0 - tlev(ncol,kte+2) = tlev(ncol,kte+1) + 0.0 - h2ovmr(ncol,kte+1) = h2ovmr(ncol,kte) - co2vmr(ncol,kte+1) = co2vmr(ncol,kte) - o2vmr(ncol,kte+1) = o2vmr(ncol,kte) - ch4vmr(ncol,kte+1) = ch4vmr(ncol,kte) - n2ovmr(ncol,kte+1) = n2ovmr(ncol,kte) - -! Get ozone profile including amount in extra layer above model top - call inirad (o3mmr,plev,kts,kte) + pcols = ncol + pver = kte - kts + 1 + gravmks = g + + if(has_reqc .ne. 0) then + !--- fill the effective radius for cloud water with that calculated in the Thompson cloud + ! microphysics parameterization: + inflgsw = 3 + do k = kts, kte + reliq(ncol,k) = max(2.5,re_cloud(i,k,j)*1.e6) + if(reliq(ncol,k).le.2.5 .and. cldfrac(ncol,k).gt.0. .and. & + (xland(i,j)-1.5).gt.0.) then !--- ocean. + reliq(ncol,k) = 10.5 + elseif(reliq(ncol,k).le.2.5 .and. cldfrac(ncol,k).gt.0. .and. & + (xland(i,j)-1.5).lt.0.) then !--- land. + reliq(ncol,k) = 7.5 + endif + enddo + + else + + !--- calculate the effective radius for cloud water in layers below the model top. we added the + ! dimensions pcols, pver so that the calls to subroutines relcalc and reicalc remain the same + ! as in WRF although the two dimensions are duplicate and not needed inside the subroutines: + landfrac(ncol) = 2.-xland(i,j) + landm(ncol) = landfrac(ncol) + snowh(ncol) = 0.001*snow(i,j) + icefrac(ncol) = xice(i,j) + + call relcalc(ncol,pcols,pver,tlay,landfrac,landm,icefrac,reliq,snowh) + endif -#if defined(mpas) -! Laura D. Fowler (2013-07-08): Added the option to use the ozone climatology from the -! CAM radiation codes, instead of the annual mean ozone in subroutine o3dat. As output -! to the subroutine vinterp_ozn, the unit of o3mmr is actually "volume mixing ratio". - if(o3input .eq. 2) then - do k = 1, noznlevels - o3clim1d(k) = o3clim(i,k,j) - enddo - call vinterp_ozn(1,ncol,ncol,kte+1,play,pin,noznlevels,o3clim1d,o3mmr) - do k = kts,kte+1 - o3vmr(ncol,k) = o3mmr(k) - enddo - else - do k = kts,kte+1 - o3vmr(ncol,k) = o3mmr(k) * amdo - enddo - endif -#else - do k = kts, kte+1 - o3vmr(ncol,k) = o3mmr(k) * amdo - IF ( PRESENT( O33D ) ) THEN - if(o3input .eq. 2)then - if(k.le.kte)then - o3vmr(ncol,k) = o31d(k) - else -! apply shifted climatology profile above model top - o3vmr(ncol,k) = o31d(kte) - o3mmr(kte)*amdo + o3mmr(k)*amdo - if(o3vmr(ncol,k) .le. 0.)o3vmr(ncol,k) = o3mmr(k)*amdo + if(has_reqi .ne. 0) then + !--- fill the effective radius for cloud ice with that calculated in the Thompson cloud + ! microphysics parameterization: + inflgsw = 4 + iceflgsw = 4 + do k = kts, kte + reice(ncol,k) = max(5.,re_ice(i,k,j)*1.e6) + if(reice(ncol,k).le.5. .and. cldfrac(ncol,k).gt.0.) then + idx_rei = int(t3d(i,k,j)-179.) + idx_rei = min(max(idx_rei,1),75) + corr = t3d(i,k,j) - int(t3d(i,k,j)) + reice(ncol,k) = retab(idx_rei)*(1.-corr) + retab(idx_rei+1)*corr + reice(ncol,k) = max(reice(ncol,k),5.0) + endif + enddo + + else + + !--- calculate the effective radius for cloud ice in layers below the model top, following + ! Kristjansson and Mitchell. limit upper bound of reice for Fu ice parameterization and + ! convert effective radius to generalized effective size (*1.0315; Fu, 1996): + + call reicalc(ncol,pcols,pver,tlay,reice) + + if(iceflgsw .eq. 3) then + do k = kts, kte + reice(ncol,k) = reice(ncol,k) * 1.0315 + reice(ncol,k) = min(140.0,reice(ncol,k)) + enddo endif endif - ENDIF - enddo -#endif - - -! Set surface albedo for direct and diffuse radiation in UV/visible and -! near-IR spectral regions -! -------------- Zhenxin 2011-06-20 ----------- ! - -! ------- 1. Commented by Zhenxin 2011-06-20 for SSiB coupling modified ---- ! -! asdir(ncol) = albedo(i,j) -! asdif(ncol) = albedo(i,j) -! aldir(ncol) = albedo(i,j) -! aldif(ncol) = albedo(i,j) -! ------- End of Comments ------ ! - -! ------- 2. New Addiation ------ ! - IF ( sf_surface_physics .eq. 8 .AND. XLAND(i,j) .LT. 1.5) THEN - asdir(ncol) = ALSWVISDIR(I,J) - asdif(ncol) = ALSWVISDIF(I,J) - aldir(ncol) = ALSWNIRDIR(I,J) - aldif(ncol) = ALSWNIRDIF(I,J) - ELSE - asdir(ncol) = albedo(i,j) - asdif(ncol) = albedo(i,j) - aldir(ncol) = albedo(i,j) - aldif(ncol) = albedo(i,j) - ENDIF - -! ---------- End of Addiation ------! -! ---------- End of fds_Zhenxin 2011-06-20 --------------! - -! Define cloud optical properties for radiation (inflgsw = 0) -! This option is not currently active -! Cloud and precipitation paths in g/m2 -! qi=0 if no ice phase -! qs=0 if no ice phase - if (inflgsw .eq. 0) then - -! Set cloud fraction and cloud optical properties here; not yet active - do k = kts, kte - cldfrac(ncol,k) = cldfra1d(k) - do nb = 1, nbndsw - taucld(nb,ncol,k) = 0.0 - ssacld(nb,ncol,k) = 1.0 - asmcld(nb,ncol,k) = 0.0 - fsfcld(nb,ncol,k) = 0.0 - enddo - enddo - -! Zero out cloud physical property arrays; not used when passing optical properties -! into radiation - do k = kts, kte - clwpth(ncol,k) = 0.0 - ciwpth(ncol,k) = 0.0 - rel(ncol,k) = 10.0 - rei(ncol,k) = 10. - enddo - endif - -! Define cloud physical properties for radiation (inflgsw = 1 or 2) -! Cloud fraction -! Set cloud arrays if passing cloud physical properties into radiation - if (inflgsw .gt. 0) then - do k = kts, kte - cldfrac(ncol,k) = cldfra1d(k) - enddo - -! Compute cloud water/ice paths and particle sizes for input to radiation (CAM method) - pcols = ncol - pver = kte - kts + 1 - gravmks = g - landfrac(ncol) = 2.-XLAND(I,J) - landm(ncol) = landfrac(ncol) - snowh(ncol) = 0.001*SNOW(I,J) - icefrac(ncol) = XICE(I,J) - -! From module_ra_cam: Convert liquid and ice mixing ratios to water paths; -! pdel is in mb here; convert back to Pa (*100.) -! Water paths are in units of g/m2 -! snow added as ice cloud (JD 091022) - do k = kts, kte - gicewp = (qi1d(k)+qs1d(k)) * pdel(ncol,k)*100.0 / gravmks * 1000.0 ! Grid box ice water path. - gliqwp = qc1d(k) * pdel(ncol,k)*100.0 / gravmks * 1000.0 ! Grid box liquid water path. - cicewp(ncol,k) = gicewp / max(0.01,cldfrac(ncol,k)) ! In-cloud ice water path. - cliqwp(ncol,k) = gliqwp / max(0.01,cldfrac(ncol,k)) ! In-cloud liquid water path. - end do - -!link the aerosol feedback to cloud -czhao - if( PRESENT( progn ) ) then - if (progn == 1) then -!jdfcz if(prescribe==0) then - - pi = 4.*atan(1.0) - third=1./3. - rhoh2o=1.e3 - relconst=3/(4.*pi*rhoh2o) -! minimun liquid water path to calculate rel -! corresponds to optical depth of 1.e-3 for radius 4 microns. - lwpmin=3.e-5 - do k = kts, kte - reliq(ncol,k) = 10. - if( PRESENT( F_QNDROP ) ) then - if( F_QNDROP ) then - if ( qc1d(k)*pdel(ncol,k).gt.lwpmin.and. & - qndrop1d(k).gt.1000. ) then - reliq(ncol,k)=(relconst*qc1d(k)/qndrop1d(k))**third ! effective radius in m -! apply scaling from Martin et al., JAS 51, 1830. - reliq(ncol,k)=1.1*reliq(ncol,k) - reliq(ncol,k)=reliq(ncol,k)*1.e6 ! convert from m to microns - reliq(ncol,k)=max(reliq(ncol,k),4.) - reliq(ncol,k)=min(reliq(ncol,k),20.) - end if - end if - end if - end do -!jdfcz else ! prescribe -! following Kiehl - call relcalc(ncol, pcols, pver, tlay, landfrac, landm, icefrac, reliq, snowh) -! write(0,*) 'sw prescribe aerosol',maxval(qndrop3d) -!jdfcz endif - else ! progn - call relcalc(ncol, pcols, pver, tlay, landfrac, landm, icefrac, reliq, snowh) - endif - else !progn - call relcalc(ncol, pcols, pver, tlay, landfrac, landm, icefrac, reliq, snowh) - endif - -! following Kristjansson and Mitchell - call reicalc(ncol, pcols, pver, tlay, reice) - -#if 0 - if (i==80.and.j==30) then -#if defined( DM_PARALLEL ) && ! defined( STUBMPI) - if( PRESENT( progn ) ) write(0,*) 'aerosol indirect',progn - write(0,*)'sw water eff radius',reliq(ncol,10),reliq(ncol,20),reliq(ncol,25) - write(0,*)'sw ice eff radius',reice(ncol,10),reice(ncol,20),reice(ncol,25) -#endif - endif -#endif - -! Limit upper bound of reice for Fu ice parameterization and convert -! from effective radius to generalized effective size (*1.0315; Fu, 1996) - if (iceflgsw .eq. 3) then - do k = kts, kte - reice(ncol,k) = reice(ncol,k) * 1.0315 - reice(ncol,k) = min(140.0,reice(ncol,k)) - end do - endif -! Set cloud physical property arrays - do k = kts, kte - clwpth(ncol,k) = cliqwp(ncol,k) - ciwpth(ncol,k) = cicewp(ncol,k) - rel(ncol,k) = reliq(ncol,k) - rei(ncol,k) = reice(ncol,k) - enddo + if(has_reqs .ne. 0) then + !--- fill the effective radius for snow with that calculated in the Thompson cloud + ! microphysics parameterization: + inflgsw = 5 + iceflgsw = 5 + do k = kts, kte + resnow(ncol,k) = max(10.,re_snow(i,k,j)*1.e6) + enddo + else + do k = kts, kte + resnow(ncol,k) = 10. + enddo + endif -! Zero out cloud optical properties here, calculated in radiation - do k = kts, kte - do nb = 1, nbndsw - taucld(nb,ncol,k) = 0.0 - ssacld(nb,ncol,k) = 1.0 - asmcld(nb,ncol,k) = 0.0 - fsfcld(nb,ncol,k) = 0.0 - enddo - enddo - endif + !--- calculate the cloud liquid water path in units of g/m2 below the model top: + do k = kts, kte + gliqwp = qc1d(k) * pdel(ncol,k)*100.0 / gravmks * 1000.0 !grid box liquid water path. + cliqwp(ncol,k) = gliqwp / max(0.01,cldfrac(ncol,k)) !in-cloud liquid water path. + enddo -! No clouds are allowed in the extra layer from model top to TOA - clwpth(ncol,kte+1) = 0. - ciwpth(ncol,kte+1) = 0. - rel(ncol,kte+1) = 10. - rei(ncol,kte+1) = 10. - cldfrac(ncol,kte+1) = 0. - do nb = 1, nbndsw - taucld(nb,ncol,kte+1) = 0. - ssacld(nb,ncol,kte+1) = 1. - asmcld(nb,ncol,kte+1) = 0. - fsfcld(nb,ncol,kte+1) = 0. - enddo + !--- calculate the cloud ice path in units of g/m2 below the model top: + if(iceflgsw .eq. 3)then + do k = kts, kte + gicewp = (qi1d(k)+qs1d(k)) * pdel(ncol,k)*100.0 / gravmks * 1000.0 !grid box ice water path. + cicewp(ncol,k) = gicewp / max(0.01,cldfrac(ncol,k)) !in-cloud ice water path. + enddo + elseif(iceflgsw .ge. 4) then + do k = kts, kte + gicewp = qi1d(k) * pdel(ncol,k)*100.0 / gravmks * 1000.0 !grid box ice water path. + cicewp(ncol,k) = gicewp / max(0.01,cldfrac(ncol,k)) !in-cloud ice water path. + enddo + endif - iplon = 1 - irng = 0 - permuteseed = 1 + !--- calculate the snow path in units of g/m2 below the model top: + if(iceflgsw.eq.5)then + do k = kts, kte + snow_mass_factor = 1.0 + if(resnow(ncol,k) .gt. 130.)then + snow_mass_factor = (130.0/resnow(ncol,k))*(130.0/resnow(ncol,k)) + resnow(ncol,k) = 130.0 + endif + gsnowp = qs1d(k) * pdel(ncol,k)*100.0 / gravmks * 1000.0 !grid box snow path. + csnowp(ncol,k) = snow_mass_factor * gsnowp / max(0.01,cldfrac(ncol,k)) + enddo + endif -! Sub-column generator for McICA + !--- set the cloud physical property between the model top and the top of the atmosphere. + ! do not allow clouds between the model top and the top of the atmosphere: + do k = kts, kte + clwpth(ncol,k) = cliqwp(ncol,k) + ciwpth(ncol,k) = cicewp(ncol,k) + cswpth(ncol,k) = csnowp(ncol,k) + rel(ncol,k) = reliq(ncol,k) + rei(ncol,k) = reice(ncol,k) + res(ncol,k) = resnow(ncol,k) + enddo + !---- no clouds are allowed in the extra layer from model top to TOA: + clwpth(ncol,kte+1) = 0. + ciwpth(ncol,kte+1) = 0. + cswpth(ncol,kte+1) = 0. + rel(ncol,kte+1) = 10. + rei(ncol,kte+1) = 10. + res(ncol,kte+1) = 10. + cldfrac(ncol,kte+1) = 0. + do nb = 1, nbndsw + taucld(nb,ncol,kte+1) = 0. + ssacld(nb,ncol,kte+1) = 1. + asmcld(nb,ncol,kte+1) = 0. + fsfcld(nb,ncol,kte+1) = 0. + enddo - call mcica_subcol_sw(iplon, ncol, nlay, icld, permuteseed, irng, play, & - cldfrac, ciwpth, clwpth, rei, rel, taucld, ssacld, asmcld, fsfcld, & - cldfmcl, ciwpmcl, clwpmcl, reicmcl, relqmcl, & - taucmcl, ssacmcl, asmcmcl, fsfcmcl) + endif + + !--- sub-column generator for McICA: + iplon = 1 + irng = 0 + permuteseed = 1 + + call mcica_subcol_sw & + (iplon , ncol , nlay , icld , permuteseed , irng , play , & + cldfrac , ciwpth , clwpth , cswpth , rei , rel , res , & + taucld , ssacld , asmcld , fsfcld , cldfmcl , ciwpmcl , clwpmcl , & + cswpmcl , reicmcl , relqmcl , resnmcl , taucmcl , ssacmcl , asmcmcl , & + fsfcmcl) + + !--- initialization of aerosol optical properties: + do nb = 1, nbndsw + do k = kts, kte+1 + tauaer(ncol,k,nb) = 0. + ssaaer(ncol,k,nb) = 1. + asmaer(ncol,k,nb) = 0. + enddo + enddo -!-------------------------------------------------------------------------- -! Aerosol optical depth, single scattering albedo and asymmetry parameter -czhao 03/2010 -!-------------------------------------------------------------------------- -! by layer for each RRTMG shortwave band -! No aerosols in top layer above model top (kte+1). -!cz do nb = 1, nbndsw -!cz do k = kts, kte+1 -!cz tauaer(ncol,k,nb) = 0. -!cz ssaaer(ncol,k,nb) = 1. -!cz asmaer(ncol,k,nb) = 0. -!cz enddo -!cz enddo - -! ... Aerosol effects. Added aerosol feedbacks from Chem , 03/2010 -czhao -! - do nb = 1, nbndsw - do k = kts,kte+1 - tauaer(ncol,k,nb) = 0. - ssaaer(ncol,k,nb) = 1. - asmaer(ncol,k,nb) = 0. - end do - end do + do na = 1, naerec + do k = kts, kte+1 + ecaer(ncol,k,na) = 0. + enddo + enddo -#ifdef WRF_CHEM - IF ( AER_RA_FEEDBACK == 1) then - do nb = 1, nbndsw - wavemid(nb)=0.5*(wavemin(nb)+wavemax(nb)) ! um - do k = kts,kte !wig - -! convert optical properties at 300,400,600, and 999 to conform to the band wavelengths -! tauaer - use angstrom exponent - if(tauaer300(i,k,j).gt.thresh .and. tauaer999(i,k,j).gt.thresh) then - ang=alog(tauaer300(i,k,j)/tauaer999(i,k,j))/alog(999./300.) - tauaer(ncol,k,nb)=tauaer400(i,k,j)*(0.4/wavemid(nb))**ang - !tauaer(ncol,k,nb)=tauaer600(i,k,j)*(0.6/wavemid(nb))**ang - if (i==30.and.j==49.and.k==2.and.nb==12) then - write(0,*) 'TAU from 600 vs 400 in RRTMG',tauaer600(i,k,j),tauaer400(i,k,j) - print*, 'TAU from 600 vs 400 in RRTMG',tauaer600(i,k,j),tauaer400(i,k,j) - write(0,*) tauaer600(i,k,j)*(0.6/wavemid(nb))**ang,tauaer400(i,k,j)*(0.4/wavemid(nb))**ang - print*, tauaer600(i,k,j)*(0.6/wavemid(nb))**ang,tauaer400(i,k,j)*(0.4/wavemid(nb))**ang - endif -! ssa - linear interpolation; extrapolation - slope=(waer600(i,k,j)-waer400(i,k,j))/.2 - ssaaer(ncol,k,nb) = slope*(wavemid(nb)-.6)+waer600(i,k,j) - if(ssaaer(ncol,k,nb).lt.0.4) ssaaer(ncol,k,nb)=0.4 - if(ssaaer(ncol,k,nb).ge.1.0) ssaaer(ncol,k,nb)=1.0 -! g - linear interpolation;extrapolation - slope=(gaer600(i,k,j)-gaer400(i,k,j))/.2 - asmaer(ncol,k,nb) = slope*(wavemid(nb)-.6)+gaer600(i,k,j) ! notice reversed varaibles - if(asmaer(ncol,k,nb).lt.0.5) asmaer(ncol,k,nb)=0.5 - if(asmaer(ncol,k,nb).ge.1.0) asmaer(ncol,k,nb)=1.0 - endif - end do ! k - end do ! nb - -!wig beg - do nb = 1, nbndsw - slope = 0. !use slope as a sum holder - do k = kts,kte - slope = slope + tauaer(ncol,k,nb) - end do - if( slope < 0. ) then - write(msg,'("ERROR: Negative total optical depth of ",f8.2," at point i,j,nb=",3i5)') slope,i,j,nb - call wrf_error_fatal(msg) - else if( slope > 6. ) then - call wrf_message("-------------------------") - write(msg,'("WARNING: Large total sw optical depth of ",f8.2," at point i,j,nb=",3i5)') slope,i,j,nb - call wrf_message(msg) - - call wrf_message("Diagnostics 1: k, tauaer300, tauaer400, tauaer600, tauaer999, tauaer") - do k=kts,kte - write(msg,'(i4,5f8.2)') k, tauaer300(i,k,j), tauaer400(i,k,j), & - tauaer600(i,k,j), tauaer999(i,k,j),tauaer(ncol,k,nb) - call wrf_message(msg) - !czhao set an up-limit here to avoid segmentation fault - !from extreme AOD - tauaer(ncol,k,nb)=tauaer(ncol,k,nb)*6.0/slope - end do - - call wrf_message("Diagnostics 2: k, gaer300, gaer400, gaer600, gaer999") - do k=kts,kte - write(msg,'(i4,4f8.2)') k, gaer300(i,k,j), gaer400(i,k,j), & - gaer600(i,k,j), gaer999(i,k,j) - call wrf_message(msg) - end do - - call wrf_message("Diagnostics 3: k, waer300, waer400, waer600, waer999") - do k=kts,kte - write(msg,'(i4,4f8.2)') k, waer300(i,k,j), waer400(i,k,j), & - waer600(i,k,j), waer999(i,k,j) - call wrf_message(msg) - end do - - call wrf_message("Diagnostics 4: k, ssaal, asyal, taual") - do k=kts-1,kte - write(msg,'(i4,3f8.2)') k, ssaaer(i,k,nb), asmaer(i,k,nb), tauaer(i,k,nb) - call wrf_message(msg) - end do - call wrf_message("-------------------------") - endif - enddo ! nb - endif ! aer_ra_feedback -#endif -! Zero array for input of aerosol optical thickness for use with -! ECMWF aerosol types (not used) - do na = 1, naerec - do k = kts, kte+1 - ecaer(ncol,k,na) = 0. - enddo - enddo + !--- CALL TO THE RRTMG SHORT WAVE RADIATION MODEL: + call rrtmg_sw & + (ncol , nlay , icld , play , plev , tlay , & + tlev , tsfc , h2ovmr , o3vmr , co2vmr , ch4vmr , & + n2ovmr , o2vmr , asdir , asdif , aldir , aldif , & + coszen , adjes , dyofyr , scon , inflgsw , iceflgsw , & + liqflgsw , cldfmcl , taucmcl , ssacmcl , asmcmcl , fsfcmcl , & + ciwpmcl , clwpmcl , cswpmcl , reicmcl , relqmcl , resnmcl , & + tauaer , ssaaer , asmaer , ecaer , swuflx , swdflx , & + swhr , swuflxc , swdflxc , swhrc , & + aer_opt , & + sibvisdir , sibvisdif , sibnirdir , sibnirdif , & !added for ssib coupling. + swdkdir , swdkdif) + + !--- OUTPUTS: + gsw(i,j) = swdflx(1,1) - swuflx(1,1) + swcf(i,j) = (swdflx(1,kte+2)-swuflx(1,kte+2)) - (swdflxc(1,kte+2)-swuflxc(1,kte+2)) + + if(present(swupt)) then + !output up and down toa fluxes for total and clear sky: + swupt(i,j) = swuflx(1,kte+2) + swuptc(i,j) = swuflxc(1,kte+2) + swdnt(i,j) = swdflx(1,kte+2) + swdntc(i,j) = swdflxc(1,kte+2) + !output up and down surface fluxes for total and clear sky: + swupb(i,j) = swuflx(1,1) + swupbc(i,j) = swuflxc(1,1) + swdnb(i,j) = swdflx(1,1) + swdnbc(i,j) = swdflxc(1,1) + endif + + if(present (swupflx)) then + do k = kts, kte+2 + swupflx(i,k,j) = swuflx(1,k) + swupflxc(i,k,j) = swuflxc(1,k) + swdnflx(i,k,j) = swdflx(1,k) + swdnflxc(i,k,j) = swdflxc(1,k) + enddo + endif -! Call RRTMG shortwave radiation model + !--- output heating rate tendency; convert heating rate from K/d to K/s: + do k = kts, kte + tten1d(k) = swhr(ncol,k)/86400. + rthratensw(i,k,j) = tten1d(k)/pi3d(i,k,j) + enddo - call rrtmg_sw & - (ncol ,nlay ,icld , & - play ,plev ,tlay ,tlev ,tsfc , & - h2ovmr , o3vmr ,co2vmr ,ch4vmr ,n2ovmr ,o2vmr , & - asdir ,asdif ,aldir ,aldif , & - coszen ,adjes ,dyofyr ,scon , & - inflgsw ,iceflgsw,liqflgsw,cldfmcl , & - taucmcl ,ssacmcl ,asmcmcl ,fsfcmcl , & - ciwpmcl ,clwpmcl ,reicmcl ,relqmcl , & - tauaer ,ssaaer ,asmaer ,ecaer , & - swuflx ,swdflx ,swhr ,swuflxc ,swdflxc ,swhrc, & -! ----- Zhenxin added for ssib coupiling 2011-06-20 --------! - sibvisdir, sibvisdif, sibnirdir, sibnirdif & - ) -! -------------------- End of addiation by Zhenxin 2011-06-20 ------! - -! Output net absorbed shortwave surface flux and shortwave cloud forcing -! at the top of atmosphere (W/m2) - gsw(i,j) = swdflx(1,1) - swuflx(1,1) - swcf(i,j) = (swdflx(1,kte+2) - swuflx(1,kte+2)) - (swdflxc(1,kte+2) - swuflxc(1,kte+2)) - - if (present(swupt)) then -! Output up and down toa fluxes for total and clear sky - swupt(i,j) = swuflx(1,kte+2) - swuptc(i,j) = swuflxc(1,kte+2) - swdnt(i,j) = swdflx(1,kte+2) - swdntc(i,j) = swdflxc(1,kte+2) -! Output up and down surface fluxes for total and clear sky - swupb(i,j) = swuflx(1,1) - swupbc(i,j) = swuflxc(1,1) - swdnb(i,j) = swdflx(1,1) -! Added by Zhenxin for 4 compenants of swdown radiation - swvisdir(i,j) = sibvisdir(1,1) - swvisdif(i,j) = sibvisdif(1,1) - swnirdir(i,j) = sibnirdir(1,1) - swnirdif(i,j) = sibnirdif(1,1) -! Ended, Zhenxin (2011/06/20) - swdnbc(i,j) = swdflxc(1,1) - endif + else -! Output up and down layer fluxes for total and clear sky. -! Vertical ordering is from bottom to top in units of W m-2. - if ( present (swupflx) ) then - do k=kts,kte+2 - swupflx(i,k,j) = swuflx(1,k) - swupflxc(i,k,j) = swuflxc(1,k) - swdnflx(i,k,j) = swdflx(1,k) - swdnflxc(i,k,j) = swdflxc(1,k) - enddo - endif + if(present(swupt)) then + !output up and down toa fluxes for total and clear sky: + swupt(i,j) = 0. + swuptc(i,j) = 0. + swdnt(i,j) = 0. + swdntc(i,j) = 0. + !output up and down surface fluxes for total and clear sky: + swupb(i,j) = 0. + swupbc(i,j) = 0. + swdnb(i,j) = 0. + swdnbc(i,j) = 0. + endif -! Output heating rate tendency; convert heating rate from K/d to K/s -! Heating rate arrays are ordered vertically from bottom to top here. - do k=kts,kte - tten1d(k) = swhr(ncol,k)/86400. - rthratensw(i,k,j) = tten1d(k)/pi3d(i,k,j) - enddo - else - if (present(swupt)) then -! Output up and down toa fluxes for total and clear sky - swupt(i,j) = 0. - swuptc(i,j) = 0. - swdnt(i,j) = 0. - swdntc(i,j) = 0. -! Output up and down surface fluxes for total and clear sky - swupb(i,j) = 0. - swupbc(i,j) = 0. - swdnb(i,j) = 0. - swdnbc(i,j) = 0. - swvisdir(i,j) = 0. ! Add by Zhenxin (2011/06/20) - swvisdif(i,j) = 0. - swnirdir(i,j) = 0. - swnirdif(i,j) = 0. ! Add by Zhenxin (2011/06/20) - endif + endif - endif -! - end do i_loop - end do j_loop + end do i_loop !end longitude loop. + end do j_loop !end latitude loop. -!------------------------------------------------------------------- + end subroutine rrtmg_swrad - END SUBROUTINE RRTMG_SWRAD +!----------------------------------------------------------------------------------------------------------------- !ldf (2013-03-11): This section of the module is moved to module_physics_rrtmg_swinit.F in !./../core_physics to accomodate differences in the mpi calls between WRF and MPAS.I thought diff --git a/src/core_atmosphere/physics/physics_wrf/module_sf_bem.F b/src/core_atmosphere/physics/physics_wrf/module_sf_bem.F index 3515bc3d89..2619567359 100644 --- a/src/core_atmosphere/physics/physics_wrf/module_sf_bem.F +++ b/src/core_atmosphere/physics/physics_wrf/module_sf_bem.F @@ -3,8 +3,8 @@ MODULE module_sf_bem ! Variables and constants used in the BEM module ! ----------------------------------------------------------------------- +use mpas_abort, only : mpas_dmpar_global_abort #ifdef mpas -!USE mpas_dmpar, only : mpas_dmpar_global_abort #define FATAL_ERROR(M) call mpas_dmpar_global_abort( M ) #else #define FATAL_ERROR(M) write(0,*) M ; stop diff --git a/src/core_atmosphere/physics/physics_wrf/module_sf_bep.F b/src/core_atmosphere/physics/physics_wrf/module_sf_bep.F index 602ae15e3f..8bf465b461 100644 --- a/src/core_atmosphere/physics/physics_wrf/module_sf_bep.F +++ b/src/core_atmosphere/physics/physics_wrf/module_sf_bep.F @@ -1,7 +1,7 @@ MODULE module_sf_bep +use mpas_abort, only : mpas_dmpar_global_abort #ifdef mpas -!USE mpas_dmpar, only : mpas_dmpar_global_abort #define FATAL_ERROR(M) call mpas_dmpar_global_abort( M ) #else #define FATAL_ERROR(M) write(0,*) M ; stop diff --git a/src/core_atmosphere/physics/physics_wrf/module_sf_bep_bem.F b/src/core_atmosphere/physics/physics_wrf/module_sf_bep_bem.F index f6da2724c2..e5c4418373 100644 --- a/src/core_atmosphere/physics/physics_wrf/module_sf_bep_bem.F +++ b/src/core_atmosphere/physics/physics_wrf/module_sf_bep_bem.F @@ -1,7 +1,7 @@ MODULE module_sf_bep_bem +use mpas_abort, only : mpas_dmpar_global_abort #ifdef mpas -!USE mpas_dmpar, only : mpas_dmpar_global_abort #define FATAL_ERROR(M) call mpas_dmpar_global_abort( M ) #else #define FATAL_ERROR(M) write(0,*) M ; stop diff --git a/src/core_atmosphere/physics/physics_wrf/module_sf_mynn.F b/src/core_atmosphere/physics/physics_wrf/module_sf_mynn.F new file mode 100644 index 0000000000..554d1f05ec --- /dev/null +++ b/src/core_atmosphere/physics/physics_wrf/module_sf_mynn.F @@ -0,0 +1,1848 @@ +!================================================================================================================= +! copied for implementation in MPAS from WRF version 3.6.1. + +! modifications made to sourcecode: +! * used preprocessing option to replace module_model_constants with mpas_atmphys_constants. +! Laura D. Fowler (laura@ucar.edu / 2014-09-25). +! * used preprocessing option to include the actual mean distance between cell centers. +! Laura D. Fowler (laura@ucar.edu / 2015-01-06). +! * used "dummy" variables in the call to mym_condensation. +! Laura D. Fowler (laura@ucar.edu / 2016-10-28). + +!================================================================================================================= + +MODULE module_sf_mynn + +!------------------------------------------------------------------- +!Modifications implemented by Joseph Olson NOAA/GSD/AMB - CU/CIRES +!for WRFv3.4 and WRFv3.4.1: +! +! BOTH LAND AND WATER: +!1) Calculation of stability parameter (z/L) taken from Li et al. (2010 BLM) +! for first iteration of first time step; afterwards, exact calculation. +!2) Fixed isflux=0 option to turn off scalar fluxes, but keep momentum +! fluxes for idealized studies (credit: Anna Fitch). +!3) Kinematic viscosity now varies with temperature +!4) Uses Monin-Obukhov flux-profile relationships more consistent with +! those used in the MYNN PBL code. +!5) Allows negative QFX, similar to MYJ scheme +! +! LAND only: +!1) iz0tlnd option is now available with the following options: +! (default) =0: Zilitinkevich (1995) +! =1: Czil_new (modified according to Chen & Zhang 2008) +! =2: Modified Yang et al (2002, 2008) - generalized for all landuse +! =3: constant zt = z0/7.4 (original form; Garratt 1992) +! =4: Pan et al. (1994) with RUC mods for z_q, zili for z_t +!2) Relaxed u* minimum from 0.1 to 0.01 +! +! WATER only: +!1) isftcflx option is now available with the following options: +! (default) =0: z0, zt, and zq from COARE3.0 (Fairall et al 2003) +! =1: z0 from Davis et al (2008), zt & zq from COARE3.0 +! =2: z0 from Davis et al (2008), zt & zq from Garratt (1992) +! =3: z0 from Taylor and Yelland (2004), zt and zq from COARE3.0 +! =4: z0 from Zilitinkevich (2001), zt & zq from COARE3.0 +! +! SNOW/ICE only: +!1) Added Andreas (2002) snow/ice parameterization for thermal and +! moisture roughness to help reduce the cool/moist bias in the arctic +! region. +! +!NOTE: This code was primarily tested in combination with the RUC LSM. +! Performance with the Noah (or other) LSM is relatively unknown. +!------------------------------------------------------------------- + +#if defined(mpas) + use mpas_atmphys_constants,only: p1000mb => P0,cp,xlv,ep_2 + use module_bl_mynn,only: tv0,mym_condensation + use module_sf_sfclay,only: sfclayinit + + implicit none + private + public:: mynn_sf_init_driver, & + sfclay_mynn + +#else + USE module_model_constants, only: & + &p1000mb, cp, xlv, ep_2 + + USE module_sf_sfclay, ONLY: sfclayinit + USE module_bl_mynn, only: tv0, mym_condensation + USE module_wrf_error +!------------------------------------------------------------------- + IMPLICIT NONE +!------------------------------------------------------------------- +#endif + + REAL, PARAMETER :: xlvcp=xlv/cp, ep_3=1.-ep_2 + + REAL, PARAMETER :: wmin=0.1 ! Minimum wind speed + REAL, PARAMETER :: VCONVC=1.0 + REAL, PARAMETER :: SNOWZ0=0.012 + + REAL, DIMENSION(0:1000 ),SAVE :: PSIMTB,PSIHTB + +CONTAINS + +!------------------------------------------------------------------- + SUBROUTINE mynn_sf_init_driver(allowed_to_read) + + LOGICAL, INTENT(in) :: allowed_to_read + + !Fill the PSIM and PSIH tables. The subroutine "sfclayinit" + !can be found in module_sf_sfclay.F. This subroutine returns + !the forms from Dyer and Hicks (1974). + + CALL sfclayinit(allowed_to_read) + + END SUBROUTINE mynn_sf_init_driver + +!------------------------------------------------------------------- + SUBROUTINE SFCLAY_mynn(U3D,V3D,T3D,QV3D,P3D,dz8w, & + CP,G,ROVCP,R,XLV,PSFCPA,CHS,CHS2,CQS2,CPM, & + ZNT,UST,PBLH,MAVAIL,ZOL,MOL,REGIME,PSIM,PSIH, & + XLAND,HFX,QFX,LH,TSK,FLHC,FLQC,QGH,QSFC,RMOL, & + U10,V10,TH2,T2,Q2,SNOWH, & + GZ1OZ0,WSPD,BR,ISFFLX,DX, & + SVP1,SVP2,SVP3,SVPT0,EP1,EP2, & + KARMAN,itimestep,ch,th3d,pi3d,qc3d,rho3d, & + tsq,qsq,cov,sh3d,el_pbl,qcg, & +!JOE-add output +! z0zt_ratio,BulkRi,wstar,qstar,resist,logres, & +!JOE-end + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + its,ite, jts,jte, kts,kte, & + ustm,ck,cka,cd,cda,isftcflx,iz0tlnd, & + bl_mynn_cloudpdf & +#if defined(mpas) + ,dxCell & +#endif + ) +!------------------------------------------------------------------- + IMPLICIT NONE +!------------------------------------------------------------------- +!-- U3D 3D u-velocity interpolated to theta points (m/s) +!-- V3D 3D v-velocity interpolated to theta points (m/s) +!-- T3D 3D temperature (K) +!-- QV3D 3D water vapor mixing ratio (Kg/Kg) +!-- P3D 3D pressure (Pa) +!-- RHO3D 3D density (kg/m3) +!-- dz8w 3D dz between full levels (m) +!-- CP heat capacity at constant pressure for dry air (J/kg/K) +!-- G acceleration due to gravity (m/s^2) +!-- ROVCP R/CP +!-- R gas constant for dry air (J/kg/K) +!-- XLV latent heat of vaporization for water (J/kg) +!-- PSFCPA surface pressure (Pa) +!-- ZNT roughness length (m) +!-- UST u* in similarity theory (m/s) +!-- USTM u* in similarity theory (m/s) w* added to WSPD. This is +! used to couple with TKE scheme but not in MYNN. +! (as of now, USTM = UST in this version) +!-- PBLH PBL height from previous time (m) +!-- MAVAIL surface moisture availability (between 0 and 1) +!-- ZOL z/L height over Monin-Obukhov length +!-- MOL T* (similarity theory) (K) +!-- RMOL Reciprocal of M-O length (/m) +!-- REGIME flag indicating PBL regime (stable, unstable, etc.) +!-- PSIM similarity stability function for momentum +!-- PSIH similarity stability function for heat +!-- XLAND land mask (1 for land, 2 for water) +!-- HFX upward heat flux at the surface (W/m^2) +!-- QFX upward moisture flux at the surface (kg/m^2/s) +!-- LH net upward latent heat flux at surface (W/m^2) +!-- TSK surface temperature (K) +!-- FLHC exchange coefficient for heat (W/m^2/K) +!-- FLQC exchange coefficient for moisture (kg/m^2/s) +!-- CHS heat/moisture exchange coefficient for LSM (m/s) +!-- QGH lowest-level saturated mixing ratio +!-- QSFC qv (specific humidity) at the surface +!-- QSFCMR qv (mixing ratio) at the surface +!-- U10 diagnostic 10m u wind +!-- V10 diagnostic 10m v wind +!-- TH2 diagnostic 2m theta (K) +!-- T2 diagnostic 2m temperature (K) +!-- Q2 diagnostic 2m mixing ratio (kg/kg) +!-- SNOWH Snow height (m) +!-- GZ1OZ0 log((z1+ZNT)/ZNT) where ZNT is roughness length +!-- WSPD wind speed at lowest model level (m/s) +!-- BR bulk Richardson number in surface layer +!-- ISFFLX isfflx=1 for surface heat and moisture fluxes +!-- DX horizontal grid size (m) +!-- SVP1 constant for saturation vapor pressure (=0.6112 kPa) +!-- SVP2 constant for saturation vapor pressure (=17.67 dimensionless) +!-- SVP3 constant for saturation vapor pressure (=29.65 K) +!-- SVPT0 constant for saturation vapor pressure (=273.15 K) +!-- EP1 constant for virtual temperature (Rv/Rd - 1) (dimensionless) +!-- EP2 constant for spec. hum. calc (Rd/Rv = 0.622) (dimensionless) +!-- EP3 constant for spec. hum. calc (1 - Rd/Rv = 0.378 ) (dimensionless) +!-- KARMAN Von Karman constant +!-- ck enthalpy exchange coeff at 10 meters +!-- cd momentum exchange coeff at 10 meters +!-- cka enthalpy exchange coeff at the lowest model level +!-- cda momentum exchange coeff at the lowest model level +!-- isftcflx =0: z0, zt, and zq from COARE3.0 (Fairall et al 2003) +! (water =1: z0 from Davis et al (2008), zt & zq from COARE3.0 +! only) =2: z0 from Davis et al (2008), zt & zq from Garratt (1992) +! =3: z0 from Taylor and Yelland (2004), zt and zq from COARE3.0 +! =4: z0 from Zilitinkevich (2001), zt & zq from COARE3.0 +!-- iz0tlnd =0: Zilitinkevich (1995) with Czil=0.14, +! (land =1: Czil_new (modified according to Chen & Zhang 2008) +! only) =2: Modified Yang et al (2002, 2008) - generalized for all landuse +! =3: constant zt = z0/7.4 (Garratt 1992) +! =4: Pan et al (1994) for zq; ZIlitintevich for zt +!-- bl_mynn_cloudpdf =0: Mellor & Yamada +! =1: Kuwano et al. +!-- el_pbl = mixing length from PBL scheme (meters) +!-- Sh3d = Stability finction for heat (unitless) +!-- cov = T'q' from PBL scheme +!-- tsq = T'T' from PBL scheme +!-- qsq = q'q' from PBL scheme +! +!-- ids start index for i in domain +!-- ide end index for i in domain +!-- jds start index for j in domain +!-- jde end index for j in domain +!-- kds start index for k in domain +!-- kde end index for k in domain +!-- ims start index for i in memory +!-- ime end index for i in memory +!-- jms start index for j in memory +!-- jme end index for j in memory +!-- kms start index for k in memory +!-- kme end index for k in memory +!-- its start index for i in tile +!-- ite end index for i in tile +!-- jts start index for j in tile +!-- jte end index for j in tile +!-- kts start index for k in tile +!-- kte end index for k in tile +!================================================================= +! SCALARS +!=================================== + INTEGER, INTENT(IN) :: ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + its,ite, jts,jte, kts,kte + INTEGER, INTENT(IN) :: itimestep + REAL, INTENT(IN) :: SVP1,SVP2,SVP3,SVPT0 + REAL, INTENT(IN) :: EP1,EP2,KARMAN + REAL, INTENT(IN) :: CP,G,ROVCP,R,XLV,DX +!NAMELIST OPTIONS: + INTEGER, INTENT(IN) :: ISFFLX + INTEGER, OPTIONAL, INTENT(IN) :: ISFTCFLX, IZ0TLND,& + bl_mynn_cloudpdf +!=================================== +! 3D VARIABLES +!=================================== + REAL, DIMENSION( ims:ime, kms:kme, jms:jme ) , & + INTENT(IN ) :: dz8w, & + QV3D, & + P3D, & + T3D, & + QC3D, & + U3D,V3D, & + RHO3D,th3d,pi3d,tsq,qsq,cov,sh3d,el_pbl +!=================================== +! 2D VARIABLES +!=================================== + REAL, DIMENSION( ims:ime, jms:jme ) , & + INTENT(IN ) :: MAVAIL, & + PBLH, & + XLAND, & + TSK, & + QCG, & + PSFCPA , & + SNOWH + +#if defined(mpas) +!MPAS specific (Laura D. Fowler - 2014-12-02): + real,intent(in),dimension(ims:ime,jms:jme),optional:: dxCell +!MPAS specific end. +#endif + + REAL, DIMENSION( ims:ime, jms:jme ) , & + INTENT(OUT ) :: U10,V10, & + TH2,T2,Q2 + + REAL, OPTIONAL, DIMENSION( ims:ime, jms:jme ) , & + INTENT(OUT) :: ck,cka,cd,cda,ustm +! + REAL, DIMENSION( ims:ime, jms:jme ) , & + INTENT(INOUT) :: REGIME, & + HFX, & + QFX, & + LH, & + MOL,RMOL, & + QSFC, QGH, & + ZNT, & + ZOL, & + UST, & + CPM, & + CHS2, & + CQS2, & + CHS, & + CH, & + FLHC,FLQC, & + GZ1OZ0,WSPD,BR, & + PSIM,PSIH + +!ADDITIONAL OUTPUT +!JOE-begin + REAL, DIMENSION( ims:ime, jms:jme ) :: z0zt_ratio, & + BulkRi,wstar,qstar,resist,logres +!JOE-end +!=================================== +! 1D LOCAL ARRAYS +!=================================== + REAL, DIMENSION( its:ite ) :: U1D, & + V1D, & + QV1D, & + P1D, & + T1D,QC1D, & + RHO1D, & + dz8w1d + + ! VARIABLE FOR PASSING TO MYM_CONDENSATION + REAL, DIMENSION(kts:kts+1 ) :: dummy1,dummy2,dummy3,dummy4, & + dummy5,dummy6,dummy7,dummy8, & + dummy9,dummy10 + + REAL, DIMENSION( its:ite ) :: vt1,vq1 + REAL, DIMENSION(kts:kts+1) :: thl, qw, vt, vq + REAL :: ql + + INTEGER :: I,J,K,itf,jtf,ktf +!----------------------------------------------------------- + + itf=MIN0(ite,ide-1) + jtf=MIN0(jte,jde-1) + ktf=MIN0(kte,kde-1) + + DO J=jts,jte + DO i=its,ite + dz8w1d(I) = dz8w(i,kts,j) + U1D(i) =U3D(i,kts,j) + V1D(i) =V3D(i,kts,j) + QV1D(i)=QV3D(i,kts,j) + QC1D(i)=QC3D(i,kts,j) + P1D(i) =P3D(i,kts,j) + T1D(i) =T3D(i,kts,j) + RHO1D(i)=RHO3D(i,kts,j) + ENDDO + + IF (itimestep==1) THEN +! write(0,*) +! write(0,*) '--- sfc_mynn itimestep = ', itimestep +! write(0,*) '--- initialize vt1, vq1, ust, mol, qsfc, qstar' +! write(0,*) + DO i=its,ite + vt1(i)=0. + vq1(i)=0. + UST(i,j)=MAX(0.025*SQRT(U1D(i)*U1D(i) + V1D(i)*V1D(i)),0.001) + MOL(i,j)=0. ! Tstar + QSFC(i,j)=QV3D(i,kts,j)/(1.+QV3D(i,kts,j)) + qstar(i,j)=0.0 + ENDDO + ELSE +! write(0,*) +! write(0,*) '--- sfc_mynn itimestep = ', itimestep +! write(0,*) '--- call mym_condensation:' +! write(0,*) + DO i=its,ite + do k = kts,kts+1 + ql = qc3d(i,k,j)/(1.+qc3d(i,k,j)) + qw(k) = qv3d(i,k,j)/(1.+qv3d(i,k,j)) + ql + thl(k) = th3d(i,k,j)-xlvcp*ql/pi3d(i,k,j) + dummy1(k) = dz8w(i,k,j) + dummy2(k) = thl(k) + dummy3(k) = qw(k) + dummy4(k) = p3d(i,k,j) + dummy5(k) = pi3d(i,k,j) + dummy6(k) = tsq(i,k,j) + dummy7(k) = qsq(i,k,j) + dummy8(k) = cov(i,k,j) + dummy9(k) = Sh3d(i,k,j) + dummy10(k) = el_pbl(i,k,j) + end do + + ! NOTE: The last grid number is kts+1 instead of kte. + CALL mym_condensation (kts,kts+1, & + & dummy1,dummy2,dummy3, & + & dummy4,dummy5,dummy6, & + & dummy7,dummy8,dummy9, & + & dummy10, & + & bl_mynn_cloudpdf, & + & vt(kts:kts+1), vq(kts:kts+1)) + vt1(i) = vt(kts) + vq1(i) = vq(kts) + ENDDO + ENDIF + + CALL SFCLAY1D_mynn(J,U1D,V1D,T1D,QV1D,P1D,dz8w1d,rho1d, & + CP,G,ROVCP,R,XLV,PSFCPA(ims,j),CHS(ims,j),CHS2(ims,j),& + CQS2(ims,j),CPM(ims,j),PBLH(ims,j), RMOL(ims,j), & + ZNT(ims,j),UST(ims,j),MAVAIL(ims,j),ZOL(ims,j), & + MOL(ims,j),REGIME(ims,j),PSIM(ims,j),PSIH(ims,j), & + XLAND(ims,j),HFX(ims,j),QFX(ims,j),TSK(ims,j), & + U10(ims,j),V10(ims,j),TH2(ims,j),T2(ims,j), & + Q2(ims,j),FLHC(ims,j),FLQC(ims,j),SNOWH(ims,j), & + QGH(ims,j),QSFC(ims,j),LH(ims,j), & + GZ1OZ0(ims,j),WSPD(ims,j),BR(ims,j),ISFFLX,DX, & + SVP1,SVP2,SVP3,SVPT0,EP1,EP2,KARMAN, & + ch(ims,j),vt1,vq1,qc1d,qcg(ims,j),itimestep, & +!JOE-begin additional output + z0zt_ratio(ims,j),BulkRi(ims,j),wstar(ims,j), & + qstar(ims,j),resist(ims,j),logres(ims,j), & +!JOE-end + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + its,ite, jts,jte, kts,kte & +#if defined(mpas) +!MPAS specific (Laura D. Fowler - 2014-12-02): + ,isftcflx,iz0tlnd, & + USTM(ims,j),CK(ims,j),CKA(ims,j), & + CD(ims,j),CDA(ims,j),dxCell(ims,j) & +#else + ,isftcflx,iz0tlnd, & + USTM(ims,j),CK(ims,j),CKA(ims,j), & + CD(ims,j),CDA(ims,j) & +#endif + ) + + ENDDO + + END SUBROUTINE SFCLAY_MYNN + +!------------------------------------------------------------------- + SUBROUTINE SFCLAY1D_mynn(J,U1D,V1D,T1D,QV1D,P1D,dz8w1d,rho1d, & + CP,G,ROVCP,R,XLV,PSFCPA,CHS,CHS2,CQS2,CPM, & + PBLH,RMOL,ZNT,UST,MAVAIL,ZOL,MOL,REGIME, & + PSIM,PSIH,XLAND,HFX,QFX,TSK, & + U10,V10,TH2,T2,Q2,FLHC,FLQC,SNOWH,QGH, & + QSFC,LH,GZ1OZ0,WSPD,BR,ISFFLX,DX, & + SVP1,SVP2,SVP3,SVPT0,EP1,EP2, & + KARMAN,ch,vt1,vq1,qc1d,qcg,itimestep, & +!JOE-additional output + zratio,BRi,wstar,qstar,resist,logres, & +!JOE-end + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + its,ite, jts,jte, kts,kte & + ,isftcflx, iz0tlnd, & +#if defined(mpas) +!MPAS specific (Laura D. Fowler - 2014-12-02): + ustm,ck,cka,cd,cda,dxCell & +#else + ustm,ck,cka,cd,cda & +#endif + ) + +!------------------------------------------------------------------- + IMPLICIT NONE +!------------------------------------------------------------------- +! SCALARS +!----------------------------- + INTEGER, INTENT(IN) :: ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + its,ite, jts,jte, kts,kte, & + J, itimestep + + REAL, PARAMETER :: XKA=2.4E-5 !molecular diffusivity + REAL, PARAMETER :: PRT=1. !prandlt number + REAL, INTENT(IN) :: SVP1,SVP2,SVP3,SVPT0,EP1,EP2 + REAL, INTENT(IN) :: KARMAN,CP,G,ROVCP,R,XLV,DX + +!----------------------------- +! NAMELIST OPTIONS +!----------------------------- + INTEGER, INTENT(IN) :: ISFFLX + INTEGER, OPTIONAL, INTENT(IN ) :: ISFTCFLX, IZ0TLND + +!----------------------------- +! 1D ARRAYS +!----------------------------- + REAL, DIMENSION( ims:ime ), INTENT(IN) :: MAVAIL, & + PBLH, & + XLAND, & + TSK, & + PSFCPA, & + QCG, & + SNOWH + + REAL, DIMENSION( its:ite ), INTENT(IN) :: U1D,V1D, & + QV1D,P1D, & + T1D,QC1d, & + dz8w1d, & + RHO1D, & + vt1,vq1 + + REAL, DIMENSION( ims:ime ), INTENT(INOUT) :: REGIME, & + HFX,QFX,LH, & + MOL,RMOL, & + QGH,QSFC, & + ZNT, & + ZOL, & + UST, & + CPM, & + CHS2,CQS2, & + CHS,CH, & + FLHC,FLQC, & + GZ1OZ0, & + WSPD, & + BR, & + PSIM,PSIH + + ! DIAGNOSTIC OUTPUT + REAL, DIMENSION( ims:ime ), INTENT(OUT) :: U10,V10, & + TH2,T2,Q2 + + REAL, OPTIONAL, DIMENSION( ims:ime ) , & + INTENT(OUT) :: ck,cka,cd,cda,ustm +!-------------------------------------------- +!JOE-additinal output + REAL, DIMENSION( ims:ime ) :: zratio,BRi,wstar,qstar, & + resist,logres +!JOE-end +!---------------------------------------------------------------- +! LOCAL VARS +!---------------------------------------------------------------- + REAL :: thl1,sqv1,sqc1,exner1,sqvg,sqcg,vv,ww + + REAL, DIMENSION(its:ite) :: & + ZA, & !Height of lowest 1/2 sigma level(m) + THV1D, & !Theta-v at lowest 1/2 sigma (K) + TH1D, & !Theta at lowest 1/2 sigma (K) + TC1D, & !T at lowest 1/2 sigma (Celsius) + TV1D, & !Tv at lowest 1/2 sigma (K) + QVSH, & !qv at lowest 1/2 sigma (spec humidity) + PSIH2,PSIM2, & !M-O stability functions at z=2 m + PSIH10,PSIM10, & !M-O stability functions at z=10 m + WSPDI, & + z_t,z_q, & !thermal & moisture roughness lengths + GOVRTH, & !g/theta + THGB, & !theta at ground + THVGB, & !theta-v at ground + PSFC, & !press at surface (Pa/1000) + QSFCMR, & !qv at surface (mixing ratio, kg/kg) + GZ2OZ0, & !LOG((2.0+ZNT(I))/ZNT(I)) + GZ10OZ0, & !LOG((10.+ZNT(I))/ZNT(I)) + GZ2OZt, & !LOG((2.0+z_t(i))/z_t(i)) + GZ10OZt, & !LOG((10.+z_t(i))/z_t(i)) + GZ1OZt !LOG((ZA(I)+z_t(i))/z_t(i)) + + INTEGER :: N,I,K,L,NZOL,NK,NZOL2,NZOL10, ITER + INTEGER, PARAMETER :: ITMAX=5 + + REAL :: PL,THCON,TVCON,E1 + REAL :: DTHVDZ,DTHVM,VCONV,RZOL,RZOL2,RZOL10,ZOL2,ZOL10 + REAL :: DTG,PSIX,DTTHX,DTHDZ,PSIX10,PSIT,PSIT2,PSIT10, & + PSIQ,PSIQ2,PSIQ10 + REAL :: FLUXC,VSGD + REAL :: restar,VISC,DQG,OLDUST,OLDTST + REAL, PARAMETER :: psilim = -10. ! ONLY AFFECTS z/L > 2.0 + +#if defined(mpas) +!MPAS specific (Laura D. Fowler - 2014-12-02): + real,intent(in),dimension(ims:ime),optional:: dxCell +!MPAS specific end. +#endif + +!------------------------------------------------------------------- + + DO I=its,ite + ! CONVERT GROUND & LOWEST LAYER TEMPERATURE TO POTENTIAL TEMPERATURE: + ! PSFC cmb + PSFC(I)=PSFCPA(I)/1000. + THGB(I)=TSK(I)*(100./PSFC(I))**ROVCP !(K) + ! PL cmb + PL=P1D(I)/1000. + THCON=(100./PL)**ROVCP + TH1D(I)=T1D(I)*THCON !(Theta, K) + TC1D(I)=T1D(I)-273.15 !(T, Celsius) + + ! CONVERT TO VIRTUAL TEMPERATURE + QVSH(I)=QV1D(I)/(1.+QV1D(I)) !CONVERT TO SPEC HUM (kg/kg) + TVCON=(1.+EP1*QVSH(I)) + THV1D(I)=TH1D(I)*TVCON !(K) + TV1D(I)=T1D(I)*TVCON !(K) + + !RHO1D(I)=PSFCPA(I)/(R*TV1D(I)) !now using value calculated in sfc driver + ZA(I)=0.5*dz8w1d(I) !height of first half-sigma level + GOVRTH(I)=G/TH1D(I) + ENDDO + + DO I=its,ite + IF (TSK(I) .LT. 273.15) THEN + !SATURATION VAPOR PRESSURE WRT ICE (SVP1=.6112; 10*mb) + E1=SVP1*EXP(4648*(1./273.15 - 1./TSK(I)) - & + & 11.64*LOG(273.15/TSK(I)) + 0.02265*(273.15 - TSK(I))) + ELSE + !SATURATION VAPOR PRESSURE WRT WATER (Bolton 1980) + E1=SVP1*EXP(SVP2*(TSK(I)-SVPT0)/(TSK(I)-SVP3)) + ENDIF + !FOR LAND POINTS, QSFC can come from LSM, ONLY RECOMPUTE OVER WATER + IF (xland(i).gt.1.5 .or. QSFC(i).le.0.0) THEN !WATER + QSFC(I)=EP2*E1/(PSFC(I)-ep_3*E1) !specific humidity + QSFCMR(I)=EP2*E1/(PSFC(I)-E1) !mixing ratio + ELSE !LAND + QSFCMR(I)=QSFC(I)/(1.-QSFC(I)) + ENDIF + + ! QGH CHANGED TO USE LOWEST-LEVEL AIR TEMP CONSISTENT WITH MYJSFC CHANGE + ! Q2SAT = QGH IN LSM + IF (TSK(I) .LT. 273.15) THEN + !SATURATION VAPOR PRESSURE WRT ICE + E1=SVP1*EXP(4648*(1./273.15 - 1./T1D(I)) - & + & 11.64*LOG(273.15/T1D(I)) + 0.02265*(273.15 - T1D(I))) + ELSE + !SATURATION VAPOR PRESSURE WRT WATER (Bolton 1980) + E1=SVP1*EXP(SVP2*(T1D(I)-SVPT0)/(T1D(I)-SVP3)) + ENDIF + PL=P1D(I)/1000. + !QGH(I)=EP2*E1/(PL-ep_3*E1) !specific humidity + QGH(I)=EP2*E1/(PL-E1) !mixing ratio + CPM(I)=CP*(1.+0.84*QV1D(I)) + ENDDO + + DO I=its,ite + WSPD(I)=SQRT(U1D(I)*U1D(I)+V1D(I)*V1D(I)) + + !account for partial condensation + exner1=(p1d(I)/p1000mb)**ROVCP + sqc1=qc1d(I)/(1.+qc1d(I)) !lowest mod level cloud water spec hum + sqv1=QVSH(I) !lowest mod level water vapor spec hum + thl1=TH1D(I)-xlvcp/exner1*sqc1 + sqvg=qsfc(I) !sfc water vapor spec hum + sqcg=qcg(I)/(1.+qcg(I)) !sfc cloud water spec hum + + vv = thl1-THGB(I) + !TGS:ww = mavail(I)*(sqv1-sqvg) + (sqc1-sqcg) + ww = (sqv1-sqvg) + (sqc1-sqcg) + + !TGS:THVGB(I)=THGB(I)*(1.+EP1*QSFC(I)*MAVAIL(I)) + THVGB(I)=THGB(I)*(1.+EP1*QSFC(I)) + + DTHDZ=(TH1D(I)-THGB(I)) + DTHVDZ=(THV1D(I)-THVGB(I)) + !DTHVDZ= (vt1(i) + 1.0)*vv + (vq1(i) + tv0)*ww + + !-------------------------------------------------------- + ! Calculate the convective velocity scale (WSTAR) and + ! subgrid-scale velocity (VSGD) following Beljaars (1995, QJRMS) + ! and Mahrt and Sun (1995, MWR), respectively + !------------------------------------------------------- + ! VCONV = 0.25*sqrt(g/THVGB(I)*pblh(i)*dthvm) + ! Use Beljaars over land, old MM5 (Wyngaard) formula over water + IF (xland(i).lt.1.5) then !LAND (xland == 1) + + fluxc = max(hfx(i)/RHO1D(i)/cp & + & + ep1*THVGB(I)*qfx(i)/RHO1D(i),0.) + WSTAR(I) = vconvc*(g/TSK(i)*pblh(i)*fluxc)**.33 + + ELSE !WATER (xland == 2) + + !JOE-the Wyngaard formula is ~3 times larger than the Beljaars + !formula, so switch to Beljaars for water, but use VCONVC = 1.25, + !as in the COARE3.0 bulk parameterizations. + !IF(-DTHVDZ.GE.0)THEN + ! DTHVM=-DTHVDZ + !ELSE + ! DTHVM=0. + !ENDIF + !WSTAR(I) = 2.*SQRT(DTHVM) + fluxc = max(hfx(i)/RHO1D(i)/cp & + & + ep1*THVGB(I)*qfx(i)/RHO1D(i),0.) + WSTAR(I) = 1.25*(g/TSK(i)*pblh(i)*fluxc)**.33 + + ENDIF + + !-------------------------------------------------------- + ! Mahrt and Sun low-res correction + ! (for 13 km ~ 0.37 m/s; for 3 km == 0 m/s) + !-------------------------------------------------------- +!MPAS specific (Laura D. Fowler): We take into accound the actual size of individual +!grid-boxes: + if(present(dxCell)) then + VSGD = 0.32 * (max(dxCell(i)/5000.-1.,0.))**.33 + else + VSGD = 0.32 * (max(dx/5000.-1.,0.))**.33 + endif + WSPD(I)=SQRT(WSPD(I)*WSPD(I)+WSTAR(I)*WSTAR(I)+vsgd*vsgd) + WSPD(I)=MAX(WSPD(I),wmin) + + !-------------------------------------------------------- + ! CALCULATE THE BULK RICHARDSON NUMBER OF SURFACE LAYER, + ! ACCORDING TO AKB(1976), EQ(12). + !-------------------------------------------------------- + BR(I)=GOVRTH(I)*ZA(I)*DTHVDZ/(WSPD(I)*WSPD(I)) + !SET LIMITS ACCORDING TO Li et al. (2010) Boundary-Layer Meteorol (p.158) + !JOE: defying limits: BR(I)=MAX(BR(I),-2.0) + BR(I)=MAX(BR(I),-20.0) + BR(I)=MIN(BR(I),2.0) + BRi(I)=BR(I) !new variable for output - BR is not a "state" variable. + + ! IF PREVIOUSLY UNSTABLE, DO NOT LET INTO REGIMES 1 AND 2 (STABLE) + !if (itimestep .GT. 1) THEN + ! IF(MOL(I).LT.0.)BR(I)=MIN(BR(I),0.0) + !ENDIF + + !IF(I .eq. 2)THEN + ! write(*,1006)"BR:",BR(I)," fluxc:",fluxc," vt1:",vt1(i)," vq1:",vq1(i) + ! write(*,1007)"XLAND:",XLAND(I)," WSPD:",WSPD(I)," DTHVDZ:",DTHVDZ," WSTAR:",WSTAR(I) + !ENDIF + + ENDDO + + 1006 format(A,F7.3,A,f9.4,A,f9.5,A,f9.4) + 1007 format(A,F2.0,A,f6.2,A,f7.3,A,f7.2) + +!-------------------------------------------------------------------- +!-------------------------------------------------------------------- +!--- BEGIN ITERATION LOOP (ITMAX=5); USUALLY CONVERGES IN TWO PASSES +!-------------------------------------------------------------------- +!-------------------------------------------------------------------- + + DO I=its,ite + + ITER = 1 + DO WHILE (ITER .LE. ITMAX) + + !COMPUTE KINEMATIC VISCOSITY (m2/s) Andreas (1989) CRREL Rep. 89-11 + !valid between -173 and 277 degrees C. + VISC=1.326e-5*(1. + 6.542e-3*TC1D(I) + 8.301e-6*TC1D(I)*TC1D(I) & + - 4.84e-9*TC1D(I)*TC1D(I)*TC1D(I)) + + IF((XLAND(I)-1.5).GE.0)THEN + !-------------------------------------- + ! WATER + !-------------------------------------- + ! CALCULATE z0 (znt) + !-------------------------------------- + IF ( PRESENT(ISFTCFLX) ) THEN + IF ( ISFTCFLX .EQ. 0 ) THEN + !NAME OF SUBROUTINE IS MISLEADING - ACTUALLY VARIABLE CHARNOCK + !PARAMETER FROM COARE3.0: + CALL charnock_1955(ZNT(i),UST(i),WSPD(i),visc) + ELSEIF ( ISFTCFLX .EQ. 1 .OR. ISFTCFLX .EQ. 2 ) THEN + CALL davis_etal_2008(ZNT(i),UST(i)) + ELSEIF ( ISFTCFLX .EQ. 3 ) THEN + CALL Taylor_Yelland_2001(ZNT(i),UST(i),WSPD(i)) + ELSEIF ( ISFTCFLX .EQ. 4 ) THEN + CALL charnock_1955(ZNT(i),UST(i),WSPD(i),visc) + ENDIF + ELSE + !DEFAULT TO COARE 3.0 + CALL charnock_1955(ZNT(i),UST(i),WSPD(i),visc) + ENDIF + + !COMPUTE ROUGHNESS REYNOLDS NUMBER (restar) USING NEW ZNT + ! AHW: Garrattt formula: Calculate roughness Reynolds number + ! Kinematic viscosity of air (linear approx to + ! temp dependence at sea level) + restar=MAX(ust(i)*ZNT(i)/visc, 0.1) + + !-------------------------------------- + !CALCULATE z_t and z_q + !-------------------------------------- + IF ( PRESENT(ISFTCFLX) ) THEN + IF ( ISFTCFLX .EQ. 0 ) THEN + CALL fairall_2001(z_t(i),z_q(i),restar,UST(i),visc) + ELSEIF ( ISFTCFLX .EQ. 1 ) THEN + CALL fairall_2001(z_t(i),z_q(i),restar,UST(i),visc) + ELSEIF ( ISFTCFLX .EQ. 2 ) THEN + CALL garratt_1992(z_t(i),z_q(i),ZNT(i),restar,XLAND(I)) + ELSEIF ( ISFTCFLX .EQ. 3 ) THEN + CALL fairall_2001(z_t(i),z_q(i),restar,UST(i),visc) + ELSEIF ( ISFTCFLX .EQ. 4 ) THEN + CALL zilitinkevich_1995(ZNT(i),z_t(i),z_q(i),restar,& + UST(I),KARMAN,XLAND(I),IZ0TLND) + ENDIF + ELSE + !DEFAULT TO COARE 3.0 + CALL fairall_2001(z_t(i),z_q(i),restar,UST(i),visc) + ENDIF + + ELSE + + !-------------------------------------- + ! LAND + !-------------------------------------- + !COMPUTE ROUGHNESS REYNOLDS NUMBER (restar) USING DEFAULT ZNT + restar=MAX(ust(i)*ZNT(i)/visc, 0.1) + + !-------------------------------------- + !GET z_t and z_q + !-------------------------------------- + !CHECK FOR SNOW/ICE POINTS OVER LAND + !IF ( ZNT(i) .LE. SNOWZ0 .AND. TSK(I) .LE. 273.15 ) THEN + IF ( SNOWH(i) .GE. 0.1) THEN + CALL Andreas_2002(ZNT(i),restar,z_t(i),z_q(i)) + ELSE + IF ( PRESENT(IZ0TLND) ) THEN + IF ( IZ0TLND .LE. 1 .OR. IZ0TLND .EQ. 4) THEN + !IF IZ0TLND==4, THEN PSIQ WILL BE RECALCULATED USING + !PAN ET AL (1994), but PSIT FROM ZILI WILL BE USED. + CALL zilitinkevich_1995(ZNT(i),z_t(i),z_q(i),restar,& + UST(I),KARMAN,XLAND(I),IZ0TLND) + ELSEIF ( IZ0TLND .EQ. 2 ) THEN + CALL Yang_2008(ZNT(i),z_t(i),z_q(i),UST(i),MOL(I),& + qstar(I),restar,visc,XLAND(I)) + ELSEIF ( IZ0TLND .EQ. 3 ) THEN + !Original MYNN in WRF-ARW used this form: + CALL garratt_1992(z_t(i),z_q(i),ZNT(i),restar,XLAND(I)) + ENDIF + ELSE + !DEFAULT TO ZILITINKEVICH + CALL zilitinkevich_1995(ZNT(i),z_t(i),z_q(i),restar,& + UST(I),KARMAN,XLAND(I),0) + ENDIF + ENDIF + + ENDIF + zratio(i)=znt(i)/z_t(i) + + !ADD RESISTANCE (SOMEWHAT FOLLOWING JIMENEZ ET AL. (2012)) TO PROTECT AGAINST + !EXCESSIVE FLUXES WHEN USING A LOW FIRST MODEL LEVEL (ZA < 10 m). + !Formerly: GZ1OZ0(I)= LOG(ZA(I)/ZNT(I)) + GZ1OZ0(I)= LOG((ZA(I)+ZNT(I))/ZNT(I)) + GZ1OZt(I)= LOG((ZA(I)+z_t(i))/z_t(i)) + GZ2OZ0(I)= LOG((2.0+ZNT(I))/ZNT(I)) + GZ2OZt(I)= LOG((2.0+z_t(i))/z_t(i)) + GZ10OZ0(I)=LOG((10.+ZNT(I))/ZNT(I)) + GZ10OZt(I)=LOG((10.+z_t(i))/z_t(i)) + + !-------------------------------------------------------------------- + !--- DIAGNOSE BASIC PARAMETERS FOR THE APPROPRIATE STABILITY CLASS: + ! + ! THE STABILITY CLASSES ARE DETERMINED BY BR (BULK RICHARDSON NO.). + ! + ! CRITERIA FOR THE CLASSES ARE AS FOLLOWS: + ! + ! 1. BR .GE. 0.2; + ! REPRESENTS NIGHTTIME STABLE CONDITIONS (REGIME=1), + ! + ! 2. BR .LT. 0.2 .AND. BR .GT. 0.0; + ! REPRESENTS DAMPED MECHANICAL TURBULENT CONDITIONS + ! (REGIME=2), + ! + ! 3. BR .EQ. 0.0 + ! REPRESENTS FORCED CONVECTION CONDITIONS (REGIME=3), + ! + ! 4. BR .LT. 0.0 + ! REPRESENTS FREE CONVECTION CONDITIONS (REGIME=4). + ! + !-------------------------------------------------------------------- + IF (BR(I) .GT. 0.0) THEN + IF (BR(I) .GT. 0.2) THEN + !---CLASS 1; STABLE (NIGHTTIME) CONDITIONS: + REGIME(I)=1. + ELSE + !---CLASS 2; DAMPED MECHANICAL TURBULENCE: + REGIME(I)=2. + ENDIF + + !COMPUTE z/L + !CALL Li_etal_2010(ZOL(I),BR(I),ZA(I)/ZNT(I),zratio(I)) + IF (ITER .EQ. 1 .AND. itimestep .LE. 1) THEN + CALL Li_etal_2010(ZOL(I),BR(I),ZA(I)/ZNT(I),zratio(I)) + ELSE + ZOL(I)=ZA(I)*KARMAN*G*MOL(I)/(TH1D(I)*MAX(UST(I),0.001)**2) + ZOL(I)=MAX(ZOL(I),0.0) + ZOL(I)=MIN(ZOL(I),2.) + ENDIF + + !COMPUTE PSIM and PSIH + IF((XLAND(I)-1.5).GE.0)THEN + ! WATER + !CALL PSI_Suselj_Sood_2010(PSIM(I),PSIH(I),ZOL(I)) + !CALL PSI_Beljaars_Holtslag_1991(PSIM(I),PSIH(I),ZOL(I)) + !CALL PSI_Businger_1971(PSIM(I),PSIH(I),ZOL(I)) + CALL PSI_DyerHicks(PSIM(I),PSIH(I),ZOL(I),z_t(I),ZNT(I),ZA(I)) + ELSE + ! LAND + !CALL PSI_Beljaars_Holtslag_1991(PSIM(I),PSIH(I),ZOL(I)) + !CALL PSI_Businger_1971(PSIM(I),PSIH(I),ZOL(I)) + !CALL PSI_Zilitinkevich_Esau_2007(PSIM(I),PSIH(I),ZOL(I)) + CALL PSI_DyerHicks(PSIM(I),PSIH(I),ZOL(I),z_t(I),ZNT(I),ZA(I)) + ENDIF + + ! LOWER LIMIT ON PSI IN STABLE CONDITIONS + PSIM(I)=MAX(PSIM(I),psilim) + PSIH(I)=MAX(PSIH(I),psilim) + PSIM10(I)=MAX(10./ZA(I)*PSIM(I), psilim) + PSIH10(I)=MAX(10./ZA(I)*PSIH(I), psilim) + PSIM2(I)=MAX(2./ZA(I)*PSIM(I), psilim) + PSIH2(I)=MAX(2./ZA(I)*PSIH(I), psilim) + ! 1.0 over Monin-Obukhov length + RMOL(I)= ZOL(I)/ZA(I) + + ELSEIF(BR(I) .EQ. 0.) THEN + !========================================================= + !-----CLASS 3; FORCED CONVECTION/NEUTRAL: + !========================================================= + REGIME(I)=3. + + PSIM(I)=0.0 + PSIH(I)=PSIM(I) + PSIM10(I)=0. + PSIH10(I)=PSIM10(I) + PSIM2(I)=0. + PSIH2(I)=PSIM2(I) + + !ZOL(I)=0. + IF(UST(I) .LT. 0.01)THEN + ZOL(I)=BR(I)*GZ1OZ0(I) + ELSE + ZOL(I)=KARMAN*GOVRTH(I)*ZA(I)*MOL(I)/(UST(I)*UST(I)) + ENDIF + RMOL(I) = ZOL(I)/ZA(I) + + ELSEIF(BR(I) .LT. 0.)THEN + !========================================================== + !-----CLASS 4; FREE CONVECTION: + !========================================================== + REGIME(I)=4. + + !COMPUTE z/L + !CALL Li_etal_2010(ZOL(I),BR(I),ZA(I)/ZNT(I),zratio(I)) + IF (ITER .EQ. 1 .AND. itimestep .LE. 1) THEN + CALL Li_etal_2010(ZOL(I),BR(I),ZA(I)/ZNT(I),zratio(I)) + ELSE + ZOL(I)=ZA(I)*KARMAN*G*MOL(I)/(TH1D(I)*MAX(UST(I),0.001)**2) + ZOL(I)=MAX(ZOL(I),-9.999) + ZOL(I)=MIN(ZOL(I),0.0) + ENDIF + + ZOL10=10./ZA(I)*ZOL(I) + ZOL2=2./ZA(I)*ZOL(I) + ZOL(I)=MIN(ZOL(I),0.) + ZOL(I)=MAX(ZOL(I),-9.9999) + ZOL10=MIN(ZOL10,0.) + ZOL10=MAX(ZOL10,-9.9999) + ZOL2=MIN(ZOL2,0.) + ZOL2=MAX(ZOL2,-9.9999) + NZOL=INT(-ZOL(I)*100.) + RZOL=-ZOL(I)*100.-NZOL + NZOL10=INT(-ZOL10*100.) + RZOL10=-ZOL10*100.-NZOL10 + NZOL2=INT(-ZOL2*100.) + RZOL2=-ZOL2*100.-NZOL2 + + !COMPUTE PSIM and PSIH + IF((XLAND(I)-1.5).GE.0)THEN + ! WATER + !CALL PSI_Suselj_Sood_2010(PSIM(I),PSIH(I),ZOL(I)) + !CALL PSI_Hogstrom_1996(PSIM(I),PSIH(I),ZOL(I), z_t(I), ZNT(I), ZA(I)) + !CALL PSI_Businger_1971(PSIM(I),PSIH(I),ZOL(I)) + CALL PSI_DyerHicks(PSIM(I),PSIH(I),ZOL(I),z_t(I),ZNT(I),ZA(I)) + ELSE + ! LAND + !CALL PSI_Hogstrom_1996(PSIM(I),PSIH(I),ZOL(I), z_t(I), ZNT(I), ZA(I)) + !CALL PSI_Businger_1971(PSIM(I),PSIH(I),ZOL(I)) + CALL PSI_DyerHicks(PSIM(I),PSIH(I),ZOL(I),z_t(I),ZNT(I),ZA(I)) + ENDIF + +!!!!!JOE-test:avoid using psi tables in entirety +! PSIM10(I)=PSIMTB(NZOL10)+RZOL10*(PSIMTB(NZOL10+1)-PSIMTB(NZOL10)) +! PSIH10(I)=PSIHTB(NZOL10)+RZOL10*(PSIHTB(NZOL10+1)-PSIHTB(NZOL10)) +! PSIM2(I)=PSIMTB(NZOL2)+RZOL2*(PSIMTB(NZOL2+1)-PSIMTB(NZOL2)) +! PSIH2(I)=PSIHTB(NZOL2)+RZOL2*(PSIHTB(NZOL2+1)-PSIHTB(NZOL2)) + PSIM10(I)=10./ZA(I)*PSIM(I) + PSIH10(I)=10./ZA(I)*PSIH(I) + PSIM2(I)=2./ZA(I)*PSIM(I) + PSIH2(I)=2./ZA(I)*PSIH(I) + + !---LIMIT PSIH AND PSIM IN THE CASE OF THIN LAYERS AND + !---HIGH ROUGHNESS. THIS PREVENTS DENOMINATOR IN FLUXES + !---FROM GETTING TOO SMALL + !PSIH(I)=MIN(PSIH(I),0.9*GZ1OZt(I)) !JOE: less restricitive over forest/urban. + PSIH(I)=MIN(PSIH(I),0.9*GZ1OZ0(I)) + PSIM(I)=MIN(PSIM(I),0.9*GZ1OZ0(I)) + !PSIH2(I)=MIN(PSIH2(I),0.9*GZ2OZt(I)) !JOE: less restricitive over forest/urban. + PSIH2(I)=MIN(PSIH2(I),0.9*GZ2OZ0(I)) + PSIM2(I)=MIN(PSIM2(I),0.9*GZ2OZ0(I)) + PSIM10(I)=MIN(PSIM10(I),0.9*GZ10OZ0(I)) + PSIH10(I)=MIN(PSIH10(I),0.9*GZ10OZ0(I)) + + RMOL(I) = ZOL(I)/ZA(I) + + ENDIF + + !------------------------------------------------------------ + !-----COMPUTE THE FRICTIONAL VELOCITY: + !------------------------------------------------------------ + ! ZA(1982) EQS(2.60),(2.61). + GZ1OZ0(I) =LOG((ZA(I)+ZNT(I))/ZNT(I)) + GZ10OZ0(I)=LOG((10.+ZNT(I))/ZNT(I)) + PSIX=GZ1OZ0(I)-PSIM(I) + PSIX10=GZ10OZ0(I)-PSIM10(I) + ! TO PREVENT OSCILLATIONS AVERAGE WITH OLD VALUE + OLDUST = UST(I) + UST(I)=0.5*UST(I)+0.5*KARMAN*WSPD(I)/PSIX + !NON-AVERAGED: UST(I)=KARMAN*WSPD(I)/PSIX + + ! Compute u* without vconv for use in HFX calc when isftcflx > 0 + WSPDI(I)=MAX(SQRT(U1D(I)*U1D(I)+V1D(I)*V1D(I)), wmin) + IF ( PRESENT(USTM) ) THEN + USTM(I)=0.5*USTM(I)+0.5*KARMAN*WSPDI(I)/PSIX + ENDIF + + IF ((XLAND(I)-1.5).LT.0.) THEN !LAND + UST(I)=MAX(UST(I),0.01) !JOE:Relaxing this limit + !Keep ustm = ust over land. + IF ( PRESENT(USTM) ) USTM(I)=UST(I) + ENDIF + + !------------------------------------------------------------ + !-----COMPUTE THE THERMAL AND MOISTURE RESISTANCE (PSIQ AND PSIT): + !------------------------------------------------------------ + ! LOWER LIMIT ADDED TO PREVENT LARGE FLHC IN SOIL MODEL + ! ACTIVATES IN UNSTABLE CONDITIONS WITH THIN LAYERS OR HIGH Z0 + GZ1OZt(I)= LOG((ZA(I)+z_t(i))/z_t(i)) + GZ2OZt(I)= LOG((2.0+z_t(i))/z_t(i)) + + !PSIT=MAX(GZ1OZ0(I)-PSIH(I),2.) + PSIT=MAX(LOG((ZA(I)+z_t(i))/z_t(i))-PSIH(I) ,2.0) + PSIT2=MAX(LOG((2.0+z_t(i))/z_t(i))-PSIH2(I) ,2.0) + resist(I)=PSIT + logres(I)=GZ1OZt(I) + + PSIQ=MAX(LOG((za(i)+z_q(i))/z_q(I))-PSIH(I) ,2.0) + PSIQ2=MAX(LOG((2.0+z_q(i))/z_q(I))-PSIH2(I) ,2.0) + + IF((XLAND(I)-1.5).LT.0)THEN !Land only + IF ( IZ0TLND .EQ. 4 ) THEN + CALL Pan_etal_1994(PSIQ,PSIQ2,UST(I),PSIH(I),PSIH2(I),& + & KARMAN,ZA(I)) + ENDIF + ENDIF + + !---------------------------------------------------- + !COMPUTE THE TEMPERATURE SCALE (or FRICTION TEMPERATURE, T*) + !---------------------------------------------------- + DTG=TH1D(I)-THGB(I) + OLDTST=MOL(I) + MOL(I)=KARMAN*DTG/PSIT/PRT + !t_star(I) = -HFX(I)/(UST(I)*CPM(I)*RHO1D(I)) + !t_star(I) = MOL(I) + !---------------------------------------------------- + !COMPUTE THE MOISTURE SCALE (or q*) + DQG=(QVSH(i)-qsfc(i))*1000. !(kg/kg -> g/kg) + qstar(I)=KARMAN*DQG/PSIQ/PRT + + !----------------------------------------------------- + !COMPUTE DIAGNOSTICS + !----------------------------------------------------- + !COMPUTE 10 M WNDS + !----------------------------------------------------- + ! If the lowest model level is close to 10-m, use it + ! instead of the flux-based diagnostic formula. + if (ZA(i) .gt. 7.0 .and. ZA(i) .lt. 13.0) then + U10(I)=U1D(I) + V10(I)=V1D(I) + else + U10(I)=U1D(I)*PSIX10/PSIX + V10(I)=V1D(I)*PSIX10/PSIX + endif + + !----------------------------------------------------- + !COMPUTE 2m T, TH, AND Q + !THESE WILL BE OVERWRITTEN FOR LAND POINTS IN THE LSM + !----------------------------------------------------- + TH2(I)=THGB(I)+DTG*PSIT2/PSIT + !*** BE CERTAIN THAT THE 2-M THETA IS BRACKETED BY + !*** THE VALUES AT THE SURFACE AND LOWEST MODEL LEVEL. + IF ((TH1D(I)>THGB(I) .AND. (TH2(I)TH1D(I))) .OR. & + (TH1D(I)THGB(I) .OR. TH2(I)QSFCMR(I) .AND. (Q2(I)QV1D(I))) .OR. & + (QV1D(I)QSFCMR(I) .OR. Q2(I) 1200. .OR. HFX(I) < -500. .OR. & +! &LH(I) > 1200. .OR. LH(I) < -500. .OR. & +! &UST(I) < 0.0 .OR. UST(I) > 4.0 .OR. & +! &WSTAR(I)<0.0 .OR. WSTAR(I) > 6.0 .OR. & +! &RHO1D(I)<0.0 .OR. RHO1D(I) > 1.6 .OR. & +! &QSFC(I)*1000. <0.0 .OR. QSFC(I)*1000. >38. .OR. & +! &PBLH(I)>6000.) THEN +! print*,"SUSPICIOUS VALUES IN MYNN SFCLAYER",& +! ITER-ITMAX," ITERATIONS",I,J +! write(*,1000)"HFX: ",HFX(I)," LH:",LH(I)," CH:",CH(I),& +! " PBLH:",PBLH(I) +! write(*,1001)"REGIME:",REGIME(I)," z/L:",ZOL(I)," U*:",UST(I),& +! " Tstar:",MOL(I) +! write(*,1002)"PSIM:",PSIM(I)," PSIH:",PSIH(I)," W*:",WSTAR(I),& +! " DTHV:",THV1D(I)-THVGB(I) +! write(*,1003)"CPM:",CPM(I)," RHO1D:",RHO1D(I)," L:",& +! ZOL(I)/ZA(I)," DTH:",TH1D(I)-THGB(I) +! write(*,1004)"Z0/Zt:",zratio(I)," Z0:",ZNT(I)," Zt:",z_t(I),& +! " za:",za(I) +! write(*,1005)"Re:",restar," MAVAIL:",MAVAIL(I)," QSFC(I):",& +! QSFC(I)," QVSH(I):",QVSH(I) +! print*,"PSIX=",PSIX," Z0:",ZNT(I)," T1D(i):",T1D(i) +! write(*,*)"=============================================" +! ENDIF +! ENDIF + + ENDDO !end i-loop + +END SUBROUTINE SFCLAY1D_mynn +!------------------------------------------------------------------- + SUBROUTINE zilitinkevich_1995(Z_0,Zt,Zq,restar,ustar,KARMAN,& + & landsea,IZ0TLND2) + + ! This subroutine returns the thermal and moisture roughness lengths + ! from Zilitinkevich (1995) and Zilitinkevich et al. (2001) over + ! land and water, respectively. + ! + ! MODS: + ! 20120705 : added IZ0TLND option. Note: This option was designed + ! to work with the Noah LSM and may be specific for that + ! LSM only. Tests with RUC LSM showed no improvements. + + IMPLICIT NONE + REAL, INTENT(IN) :: Z_0,restar,ustar,KARMAN,landsea + INTEGER, OPTIONAL, INTENT(IN):: IZ0TLND2 + REAL, INTENT(OUT) :: Zt,Zq + REAL :: CZIL !=0.100 in Chen et al. (1997) + !=0.075 in Zilitinkevich (1995) + !=0.500 in Lemone et al. (2008) + + IF (landsea-1.5 .GT. 0) THEN !WATER + + !THIS IS BASED ON Zilitinkevich, Grachev, and Fairall (2001; + !Their equations 15 and 16). + IF (restar .LT. 0.1) THEN + Zt = Z_0*EXP(KARMAN*2.0) + Zt = MIN( Zt, 6.0e-5) + Zt = MAX( Zt, 2.0e-9) + Zq = Z_0*EXP(KARMAN*3.0) + Zq = MIN( Zq, 6.0e-5) + Zq = MAX( Zq, 2.0e-9) + ELSE + Zt = Z_0*EXP(-KARMAN*(4.0*SQRT(restar)-3.2)) + Zt = MIN( Zt, 6.0e-5) + Zt = MAX( Zt, 2.0e-9) + Zq = Z_0*EXP(-KARMAN*(4.0*SQRT(restar)-4.2)) + Zq = MIN( Zt, 6.0e-5) + Zq = MAX( Zt, 2.0e-9) + ENDIF + + ELSE !LAND + + !Option to modify CZIL according to Chen & Zhang, 2009 + IF ( IZ0TLND2 .EQ. 1 ) THEN + CZIL = 10.0 ** ( -0.40 * ( Z_0 / 0.07 ) ) + ELSE + CZIL = 0.10 + END IF + + Zt = Z_0*EXP(-KARMAN*CZIL*SQRT(restar)) + Zt = MIN( Zt, Z_0/2.) + + Zq = Z_0*EXP(-KARMAN*CZIL*SQRT(restar)) + Zq = MIN( Zq, Z_0/2.) + + !Zq = Zt + ENDIF + + return + + END SUBROUTINE zilitinkevich_1995 +!-------------------------------------------------------------------- + SUBROUTINE Pan_etal_1994(PSIQ,PSIQ2,ustar,psih,psih2,KARMAN,Z1) + + ! This subroutine returns the resistance (PSIQ) for moisture + ! exchange. This is a modified form originating from Pan et al. + ! (1994) but modified according to tests in both the RUC model + ! and WRF-ARW. Note that it is very similar to Carlson and + ! Boland (1978) model (include below in comments) but has an + ! extra molecular layer (a third layer) instead of two layers. + + IMPLICIT NONE + REAL, INTENT(IN) :: Z1,ustar,KARMAN,psih,psih2 + REAL, INTENT(OUT) :: psiq,psiq2 + REAL, PARAMETER :: Cpan=1.0 !was 20.8 in Pan et al 1994 + REAL, PARAMETER :: ZL=0.01 + REAL, PARAMETER :: ZMUs=0.2E-3 + REAL, PARAMETER :: XKA = 2.4E-5 + + !PAN et al. (1994): 3-layer model, as in paper: + !ZMU = Cpan*XKA/(KARMAN*UST(I)) + !PSIQ =MAX(KARMAN*ustar*ZMU/XKA + LOG((KARMAN*ustar*ZL + XKA)/XKA + & + ! & Z1/ZL) - PSIH,2.0) + !PSIQ2=MAX(KARMAN*ustar*ZMU/XKA + LOG((KARMAN*ustar*ZL + XKA)/XKA + & + ! & 2./ZL) - PSIH2,2.0) + !MODIFIED FORM: + PSIQ =MAX(KARMAN*ustar*ZMUs/XKA + LOG((KARMAN*ustar*Z1)/XKA + & + & Z1/ZL) - PSIH,2.0) + PSIQ2=MAX(KARMAN*ustar*ZMUs/XKA + LOG((KARMAN*ustar*2.0)/XKA + & + & 2./ZL) - PSIH2,2.0) + + !CARLSON AND BOLAND (1978): 2-layer model + !PSIQ =MAX(LOG(KARMAN*ustar*Z1/XKA + Z1/ZL)-PSIH ,2.0) + !PSIQ2=MAX(LOG(KARMAN*ustar*2./XKA + 2./ZL)-PSIH2 ,2.0) + + END SUBROUTINE Pan_etal_1994 +!-------------------------------------------------------------- + SUBROUTINE davis_etal_2008(Z_0,ustar) + + !This formulation for roughness length was designed to match + !the labratory experiments of Donelan et al. (2004). + !This is an update version from Davis et al. 2008, which + !corrects a small-bias in Z_0 (AHW real-time 2012). + + IMPLICIT NONE + REAL, INTENT(IN) :: ustar + REAL, INTENT(OUT) :: Z_0 + REAL :: ZW, ZN1, ZN2 + REAL, PARAMETER :: G=9.81, OZO=1.59E-5 + + !OLD FORM: Z_0 = 10.*EXP(-10./(ustar**(1./3.))) + !NEW FORM: + + ZW = MIN((ustar/1.06)**(0.3),1.0) + ZN1 = 0.011*ustar*ustar/G + OZO + ZN2 = 10.*exp(-9.5*ustar**(-.3333)) + & + 0.11*1.5E-5/AMAX1(ustar,0.01) + Z_0 = (1.0-ZW) * ZN1 + ZW * ZN2 + + Z_0 = MAX( Z_0, 1.27e-7) !These max/mins were suggested by + Z_0 = MIN( Z_0, 2.85e-3) !Davis et al. (2008) + + return + + END SUBROUTINE davis_etal_2008 +!-------------------------------------------------------------------- + SUBROUTINE Taylor_Yelland_2001(Z_0,ustar,wsp10) + + !This formulation for roughness length was designed account for + !wave steepness. + + IMPLICIT NONE + REAL, INTENT(IN) :: ustar,wsp10 + REAL, INTENT(OUT) :: Z_0 + REAL, parameter :: g=9.81, pi=3.14159265 + REAL :: hs, Tp, Lp + + !hs is the significant wave height + hs = 0.0248*(wsp10**2.) + !Tp dominant wave period + Tp = 0.729*MAX(wsp10,0.1) + !Lp is the wavelength of the dominant wave + Lp = g*Tp**2/(2*pi) + + Z_0 = 1200.*hs*(hs/Lp)**4.5 + Z_0 = MAX( Z_0, 1.27e-7) !These max/mins were suggested by + Z_0 = MIN( Z_0, 2.85e-3) !Davis et al. (2008) + + return + + END SUBROUTINE Taylor_Yelland_2001 +!-------------------------------------------------------------------- + SUBROUTINE charnock_1955(Z_0,ustar,wsp10,visc) + + !This version of Charnock's relation employs a varying + !Charnock parameter, similar to COARE3.0 [Fairall et al. (2003)]. + !The Charnock parameter CZC is varied from .011 to .018 + !between 10-m wsp = 10 and 18. + + IMPLICIT NONE + REAL, INTENT(IN) :: ustar, visc, wsp10 + REAL, INTENT(OUT) :: Z_0 + REAL, PARAMETER :: G=9.81, CZO2=0.011 + REAL :: CZC !variable charnock "constant" + + CZC = CZO2 + 0.007*MIN(MAX((wsp10-10.)/8., 0.), 1.0) + Z_0 = CZC*ustar*ustar/G + (0.11*visc/MAX(ustar,0.1)) + Z_0 = MAX( Z_0, 1.27e-7) !These max/mins were suggested by + Z_0 = MIN( Z_0, 2.85e-3) !Davis et al. (2008) + + return + + END SUBROUTINE charnock_1955 +!-------------------------------------------------------------------- + SUBROUTINE garratt_1992(Zt,Zq,Z_0,Ren,landsea) + + !This formulation for the thermal and moisture roughness lengths + !(Zt and Zq) relates them to Z0 via the roughness Reynolds number (Ren). + !This formula comes from Fairall et al. (2003). It is modified from + !the original Garratt-Brutsaert model to better fit the COARE/HEXMAX + !data. The formula for land uses a constant ratio (Z_0/7.4) taken + !from Garratt (1992). + + IMPLICIT NONE + REAL, INTENT(IN) :: Ren, Z_0,landsea + REAL, INTENT(OUT) :: Zt,Zq + REAL :: Rq + REAL, PARAMETER :: e=2.71828183 + + IF (landsea-1.5 .GT. 0) THEN !WATER + + Zt = Z_0*EXP(2.0 - (2.48*(Ren**0.25))) + Zq = Z_0*EXP(2.0 - (2.28*(Ren**0.25))) + + Zq = MIN( Zq, 5.5e-5) + Zq = MAX( Zq, 2.0e-9) + Zt = MIN( Zt, 5.5e-5) + Zt = MAX( Zt, 2.0e-9) !same lower limit as ECMWF + ELSE !LAND + Zq = Z_0/(e**2.) !taken from Garratt (1980,1992) + Zt = Zq + ENDIF + + return + + END SUBROUTINE garratt_1992 +!-------------------------------------------------------------------- + SUBROUTINE fairall_2001(Zt,Zq,Ren,ustar,visc) + + !This formulation for thermal and moisture roughness length (Zt and Zq) + !as a function of the roughness Reynolds number (Ren) comes from the + !COARE3.0 formulation, empirically derived from COARE and HEXMAX data + ![Fairall et al. (2003)]. Edson et al. (2004; JGR) suspected that this + !relationship overestimated roughness lengths for low Reynolds number + !flows, so a smooth flow relationship, taken from Garrattt (1992, p. 102), + !is used for flows with Ren < 2. + ! + !Note that this formulation should not be used with the Davis et al. + !(2008) formulation for Zo, because that formulation produces much + !smaller u* (Ren), resulting in a large Zt and Zq. It works best with + !the Charnock or the Taylor and Yelland relationships. + ! + !This is for use over water only. + + IMPLICIT NONE + REAL, INTENT(IN) :: Ren,ustar,visc + REAL, INTENT(OUT) :: Zt,Zq + + IF (Ren .le. 2.) then + + Zt = (5.5e-5)*(Ren**(-0.60)) + Zq = Zt + !FOR SMOOTH SEAS, USE GARRATT + !Zq = 0.2*visc/MAX(ustar,0.1) + !Zq = 0.3*visc/MAX(ustar,0.1) + + ELSE + + !FOR ROUGH SEAS, USE FAIRALL + Zt = (5.5e-5)*(Ren**(-0.60)) + Zq = Zt + + ENDIF + + Zt = MIN(Zt,1.0e-4) + Zt = MAX(Zt,2.0e-9) + + Zq = MIN(Zt,1.0e-4) + Zq = MAX(Zt,2.0e-9) + + return + + END SUBROUTINE fairall_2001 +!-------------------------------------------------------------------- + SUBROUTINE Yang_2008(Z_0,Zt,Zq,ustar,tstar,qst,Ren,visc,landsea) + + !This is a modified version of Yang et al (2002 QJRMS, 2008 JAMC) + !and Chen et al (2010, J of Hydromet). Although it was originally + !designed for arid regions with bare soil, it is modified + !here to perform over a broader spectrum of vegetation. + ! + !The original formulation relates the thermal roughness length (Zt) + !to u* and T*: + ! + ! Zt = ht * EXP(-beta*(ustar**0.5)*(ABS(tstar)**0.25)) + ! + !where ht = Renc*visc/ustar and the critical Reynolds number + !(Renc) = 70. Beta was originally = 10 (2002 paper) but was revised + !to 7.2 (in 2008 paper). Their form typically varies the + !ratio Z0/Zt by a few orders of magnitude (1-1E4). + ! + !This modified form uses beta = 0.5 and Renc = 350, so zt generally + !varies similarly to the Zilitinkevich form for small/moderate heat + !fluxes but can become ~O(1/2 Zilitinkevich) for very large negative T*. + !Also, the exponent (0.25) on tstar was changed to 1.0, since we found + !Zt was reduced too much for low-moderate positive heat fluxes. + ! + !This should only be used over land! + + IMPLICIT NONE + REAL, INTENT(IN) :: Z_0, Ren, ustar, tstar, qst, visc, landsea + REAL :: ht, tstar2 + REAL, INTENT(OUT) :: Zt,Zq + REAL, PARAMETER :: Renc=350., beta=0.5, e=2.71828183 + + ht = Renc*visc/MAX(ustar,0.01) + tstar2 = MIN(tstar, 0.0) + + Zt = ht * EXP(-beta*(ustar**0.5)*(ABS(tstar2)**1.0)) + !Zq = ht * EXP(-beta*(ustar**0.5)*(ABS(qst)**1.0)) + Zq = Zt + + Zt = MIN(Zt, Z_0/2.0) !(e**2.)) !limit from Garratt (1980,1992) + Zq = MIN(Zq, Z_0/2.0) !(e**2.)) !limit from Garratt (1980,1992) + + return + + END SUBROUTINE Yang_2008 +!-------------------------------------------------------------------- + SUBROUTINE Andreas_2002(Z_0,Ren,Zt,Zq) + + !This is taken from Andreas (2002; J. of Hydromet). + ! + !This should only be used over snow/ice! + + IMPLICIT NONE + REAL, INTENT(IN) :: Z_0, Ren + REAL, INTENT(OUT) :: Zt, Zq + REAL :: Ren2 + + REAL, PARAMETER :: bt0_s=1.25, bt0_t=0.149, bt0_r=0.317, & + bt1_s=0.0, bt1_t=-0.55, bt1_r=-0.565, & + bt2_s=0.0, bt2_t=0.0, bt2_r=-0.183 + + REAL, PARAMETER :: bq0_s=1.61, bq0_t=0.351, bq0_r=0.396, & + bq1_s=0.0, bq1_t=-0.628, bq1_r=-0.512, & + bq2_s=0.0, bq2_t=0.0, bq2_r=-0.180 + + Ren2 = Ren + ! Make sure that Re is not outside of the range of validity + ! for using their equations + IF (Ren2 .gt. 1000.) Ren2 = 1000. + + IF (Ren2 .le. 0.135) then + + Zt = Z_0*EXP(bt0_s + bt1_s*LOG(Ren2) + bt2_s*LOG(Ren2)**2) + Zq = Z_0*EXP(bq0_s + bq1_s*LOG(Ren2) + bq2_s*LOG(Ren2)**2) + + ELSE IF (Ren2 .gt. 0.135 .AND. Ren2 .lt. 2.5) then + + Zt = Z_0*EXP(bt0_t + bt1_t*LOG(Ren2) + bt2_t*LOG(Ren2)**2) + Zq = Z_0*EXP(bq0_t + bq1_t*LOG(Ren2) + bq2_t*LOG(Ren2)**2) + + ELSE + + Zt = Z_0*EXP(bt0_r + bt1_r*LOG(Ren2) + bt2_r*LOG(Ren2)**2) + Zq = Z_0*EXP(bq0_r + bq1_r*LOG(Ren2) + bq2_r*LOG(Ren2)**2) + + ENDIF + + return + + END SUBROUTINE Andreas_2002 +!-------------------------------------------------------------------- + SUBROUTINE PSI_Hogstrom_1996(psi_m, psi_h, zL, Zt, Z_0, Za) + + ! This subroutine returns the stability functions based off + ! of Hogstrom (1996). + + IMPLICIT NONE + REAL, INTENT(IN) :: zL, Zt, Z_0, Za + REAL, INTENT(OUT) :: psi_m, psi_h + REAL :: x, x0, y, y0, zmL, zhL + + zmL = Z_0*zL/Za + zhL = Zt*zL/Za + + IF (zL .gt. 0.) THEN !STABLE (not well tested - seem large) + + psi_m = -5.3*(zL - zmL) + psi_h = -8.0*(zL - zhL) + + ELSE !UNSTABLE + + x = (1.-19.0*zL)**0.25 + x0= (1.-19.0*zmL)**0.25 + y = (1.-11.6*zL)**0.5 + y0= (1.-11.6*zhL)**0.5 + + psi_m = 2.*LOG((1.+x)/(1.+x0)) + & + &LOG((1.+x**2.)/(1.+x0**2.)) - & + &2.0*ATAN(x) + 2.0*ATAN(x0) + psi_h = 2.*LOG((1.+y)/(1.+y0)) + + ENDIF + + return + + END SUBROUTINE PSI_Hogstrom_1996 +!-------------------------------------------------------------------- + SUBROUTINE PSI_DyerHicks(psi_m, psi_h, zL, Zt, Z_0, Za) + + ! This subroutine returns the stability functions based off + ! of Hogstrom (1996), but with different constants compatible + ! with Dyer and Hicks (1970/74?). This formulation is used for + ! testing/development by Nakanishi (personal communication). + + IMPLICIT NONE + REAL, INTENT(IN) :: zL, Zt, Z_0, Za + REAL, INTENT(OUT) :: psi_m, psi_h + REAL :: x, x0, y, y0, zmL, zhL + + zmL = Z_0*zL/Za !Zo/L + zhL = Zt*zL/Za !Zt/L + + IF (zL .gt. 0.) THEN !STABLE + + psi_m = -5.0*(zL - zmL) + psi_h = -5.0*(zL - zhL) + + ELSE !UNSTABLE + + x = (1.-16.*zL)**0.25 + x0= (1.-16.*zmL)**0.25 + + y = (1.-16.*zL)**0.5 + y0= (1.-16.*zhL)**0.5 + + psi_m = 2.*LOG((1.+x)/(1.+x0)) + & + &LOG((1.+x**2.)/(1.+x0**2.)) - & + &2.0*ATAN(x) + 2.0*ATAN(x0) + psi_h = 2.*LOG((1.+y)/(1.+y0)) + + ENDIF + + return + + END SUBROUTINE PSI_DyerHicks +!-------------------------------------------------------------------- + SUBROUTINE PSI_Beljaars_Holtslag_1991(psi_m, psi_h, zL) + + ! This subroutine returns the stability functions based off + ! of Beljaar and Holtslag 1991, which is an extension of Holtslag + ! and Debruin 1989. + + IMPLICIT NONE + REAL, INTENT(IN) :: zL + REAL, INTENT(OUT) :: psi_m, psi_h + REAL, PARAMETER :: a=1., b=0.666, c=5., d=0.35 + + IF (zL .lt. 0.) THEN !UNSTABLE + + WRITE(*,*)"WARNING: Universal stability functions from" + WRITE(*,*)" Beljaars and Holtslag (1991) should only" + WRITE(*,*)" be used in the stable regime!" + psi_m = 0. + psi_h = 0. + + ELSE !STABLE + + psi_m = -(a*zL + b*(zL -(c/d))*exp(-d*zL) + (b*c/d)) + psi_h = -((1.+.666*a*zL)**1.5 + & + b*(zL - (c/d))*exp(-d*zL) + (b*c/d) -1.) + + ENDIF + + return + + END SUBROUTINE PSI_Beljaars_Holtslag_1991 +!-------------------------------------------------------------------- + SUBROUTINE PSI_Zilitinkevich_Esau_2007(psi_m, psi_h, zL) + + ! This subroutine returns the stability functions come from + ! Zilitinkevich and Esau (2007, BM), which are formulatioed from the + ! "generalized similarity theory" and tuned to the LES DATABASE64 + ! to determine their dependence on z/L. + + IMPLICIT NONE + REAL, INTENT(IN) :: zL + REAL, INTENT(OUT) :: psi_m, psi_h + REAL, PARAMETER :: Cm=3.0, Ct=2.5 + + IF (zL .lt. 0.) THEN !UNSTABLE + + WRITE(*,*)"WARNING: Universal stability function from" + WRITE(*,*)" Zilitinkevich and Esau (2007) should only" + WRITE(*,*)" be used in the stable regime!" + psi_m = 0. + psi_h = 0. + + ELSE !STABLE + + psi_m = -Cm*(zL**(5./6.)) + psi_h = -Ct*(zL**(4./5.)) + + ENDIF + + return + + END SUBROUTINE PSI_Zilitinkevich_Esau_2007 +!-------------------------------------------------------------------- + SUBROUTINE PSI_Businger_1971(psi_m, psi_h, zL) + + ! This subroutine returns the flux-profile relationships + ! of Businger el al. 1971. + + IMPLICIT NONE + REAL, INTENT(IN) :: zL + REAL, INTENT(OUT) :: psi_m, psi_h + REAL :: x, y + REAL, PARAMETER :: Pi180 = 3.14159265/180. + + IF (zL .lt. 0.) THEN !UNSTABLE + + x = (1. - 15.0*zL)**0.25 + y = (1. - 9.0*zL)**0.5 + + psi_m = LOG(((1.+x)/2.)**2.) + & + &LOG((1.+x**2.)/2.) - & + &2.0*ATAN(x) + Pi180*90. + psi_h = 2.*LOG((1.+y)/2.) + + ELSE !STABLE + + psi_m = -4.7*zL + psi_h = -(4.7/0.74)*zL + + ENDIF + + return + + END SUBROUTINE PSI_Businger_1971 +!-------------------------------------------------------------------- + SUBROUTINE PSI_Suselj_Sood_2010(psi_m, psi_h, zL) + + !This subroutine returns flux-profile relatioships based off + !of Lobocki (1993), which is derived from the MY-level 2 model. + !Suselj and Sood (2010) applied the surface layer length scales + !from Nakanishi (2001) to get this new relationship. These functions + !are more agressive (larger magnitude) than most formulations. They + !showed improvement over water, but untested over land. + + IMPLICIT NONE + REAL, INTENT(IN) :: zL + REAL, INTENT(OUT) :: psi_m, psi_h + REAL, PARAMETER :: Rfc=0.19, Ric=0.183, PHIT=0.8 + + IF (zL .gt. 0.) THEN !STABLE + + psi_m = -(zL/Rfc + 1.1223*EXP(1.-1.6666/zL)) + !psi_h = -zL*Ric/((Rfc**2.)*PHIT) + 8.209*(zL**1.1091) + !THEIR EQ FOR PSI_H CRASHES THE MODEL AND DOES NOT MATCH + !THEIR FIG 1. THIS EQ (BELOW) MATCHES THEIR FIG 1 BETTER: + psi_h = -(zL*Ric/((Rfc**2.)*5.) + 7.09*(zL**1.1091)) + + ELSE !UNSTABLE + + psi_m = 0.9904*LOG(1. - 14.264*zL) + psi_h = 1.0103*LOG(1. - 16.3066*zL) + + ENDIF + + return + + END SUBROUTINE PSI_Suselj_Sood_2010 +!-------------------------------------------------------------------- + SUBROUTINE Li_etal_2010(zL, Rib, zaz0, z0zt) + + !This subroutine returns a more robust z/L that best matches + !the z/L from Hogstrom (1996) for unstable conditions and Beljaars + !and Holtslag (1991) for stable conditions. + + IMPLICIT NONE + REAL, INTENT(OUT) :: zL + REAL, INTENT(IN) :: Rib, zaz0, z0zt + REAL :: alfa, beta, zaz02, z0zt2 + REAL, PARAMETER :: au11=0.045, bu11=0.003, bu12=0.0059, & + &bu21=-0.0828, bu22=0.8845, bu31=0.1739, & + &bu32=-0.9213, bu33=-0.1057 + REAL, PARAMETER :: aw11=0.5738, aw12=-0.4399, aw21=-4.901,& + &aw22=52.50, bw11=-0.0539, bw12=1.540, & + &bw21=-0.669, bw22=-3.282 + REAL, PARAMETER :: as11=0.7529, as21=14.94, bs11=0.1569,& + &bs21=-0.3091, bs22=-1.303 + + !set limits according to Li et al (2010), p 157. + zaz02=zaz0 + IF (zaz0 .lt. 100.0) zaz02=100. + IF (zaz0 .gt. 100000.0) zaz02=100000. + + !set more limits according to Li et al (2010) + z0zt2=z0zt + IF (z0zt .lt. 0.5) z0zt2=0.5 + IF (z0zt .gt. 100.0) z0zt2=100. + + alfa = LOG(zaz02) + beta = LOG(z0zt2) + + IF (Rib .le. 0.0) THEN + zL = au11*alfa*Rib**2 + ( & + & (bu11*beta + bu12)*alfa**2 + & + & (bu21*beta + bu22)*alfa + & + & (bu31*beta**2 + bu32*beta + bu33))*Rib + !if(zL .LT. -15 .OR. zl .GT. 0.)print*,"VIOLATION Rib<0:",zL + zL = MAX(zL,-15.) !LIMITS SET ACCORDING TO Li et al (2010) + zL = MIN(zL,0.) !Figure 1. + ELSEIF (Rib .gt. 0.0 .AND. Rib .le. 0.2) THEN + zL = ((aw11*beta + aw12)*alfa + & + & (aw21*beta + aw22))*Rib**2 + & + & ((bw11*beta + bw12)*alfa + & + & (bw21*beta + bw22))*Rib + !if(zL .LT. 0 .OR. zl .GT. 4)print*,"VIOLATION 00.2:",zL + zL = MIN(zL,20.) !LIMITS ACCORDING TO Li et al (2010), THIER + !FIGUE 1C. + zL = MAX(zL,1.) + ENDIF + + return + + END SUBROUTINE Li_etal_2010 +!-------------------------------------------------------------------- + +END MODULE module_sf_mynn diff --git a/src/core_atmosphere/physics/physics_wrf/module_sf_oml.F b/src/core_atmosphere/physics/physics_wrf/module_sf_oml.F new file mode 100644 index 0000000000..0ccda6e1f6 --- /dev/null +++ b/src/core_atmosphere/physics/physics_wrf/module_sf_oml.F @@ -0,0 +1,162 @@ +!WRF:MODEL_LAYER:PHYSICS +! +! MPAS version (pulled out all the unused WRF indices) WCS 20140611 +MODULE module_sf_oml + +CONTAINS + +!---------------------------------------------------------------- + SUBROUTINE OML1D(TML,T0ML,H,H0,HUML, & + HVML,TSK,HFX, & + LH,GSW,GLW,TMOML, & + UAIR,VAIR,UST,F,EMISS,STBOLT,G,DT, & + OML_GAMMA, OML_RELAXATION_TIME ) + +!---------------------------------------------------------------- + IMPLICIT NONE +!---------------------------------------------------------------- +! +! SUBROUTINE OCEANML CALCULATES THE SEA SURFACE TEMPERATURE (TSK) +! FROM A SIMPLE OCEAN MIXED LAYER MODEL BASED ON +! (Pollard, Rhines and Thompson (1973). +! +!-- TML ocean mixed layer temperature (K) +!-- T0ML ocean mixed layer temperature (K) at initial time +!-- TMOML top 200 m ocean mean temperature (K) at initial time +!-- H ocean mixed layer depth (m) +!-- H0 ocean mixed layer depth (m) at initial time +!-- HUML ocean mixed layer u component of wind +!-- HVML ocean mixed layer v component of wind +!-- OML_GAMMA deep water lapse rate (K m-1) +!-- SF_OCEAN_PHYSICS whether to call oml model +!-- UAIR,VAIR lowest model level wind component +!-- UST frictional velocity +!-- HFX upward heat flux at the surface (W/m^2) +!-- LH latent heat flux at the surface (W/m^2) +!-- TSK surface temperature (K) +!-- GSW downward short wave flux at ground surface (W/m^2) +!-- GLW downward long wave flux at ground surface (W/m^2) +!-- EMISS emissivity of the surface +!-- STBOLT Stefan-Boltzmann constant (W/m^2/K^4) +!-- F Coriolis parameter +!-- DT time step (second) +!-- G acceleration due to gravity +!-- OML_RELAXATION_TIME time scale (s) to relax TML to T0ML, H to H0, +! HUML and HVML to 0; value <=0 means no relaxation +! +!---------------------------------------------------------------- + + REAL, INTENT(INOUT) :: TML, H, HUML, HVML, TSK + + REAL, INTENT(IN ) :: T0ML, H0, HFX, LH, GSW, GLW, & + UAIR, VAIR, UST, F, EMISS, TMOML + + REAL, INTENT(IN) :: STBOLT, G, DT, OML_GAMMA, OML_RELAXATION_TIME + +! Local + REAL :: rhoair, rhowater, Gam, alp, BV2, A1, A2, B2, u, v, wspd, & + hu1, hv1, hu2, hv2, taux, tauy, tauxair, tauyair, q, hold, & + hsqrd, thp, cwater, ust2 + CHARACTER(LEN=120) :: time_series + + hu1=huml + hv1=hvml + rhoair=1. + rhowater=1000. + cwater=4200. +! Deep ocean lapse rate (K/m) - from Rich + Gam=oml_gamma +! if(i.eq.1 .and. j.eq.1 .or. i.eq.105.and.j.eq.105) print *, 'gamma = ', gam +! Gam=0.14 +! Gam=5.6/40. +! Gam=5./100. +! Thermal expansion coeff (/K) +! alp=.0002 +! temp dependence (/K) + alp=max((tml-273.15)*1.e-5, 1.e-6) + BV2=alp*g*Gam + thp=t0ml-Gam*(h-h0) + A1=(tml-thp)*h - 0.5*Gam*h*h + if(h.ne.0.)then + u=hu1/h + v=hv1/h + else + u=0. + v=0. + endif + +! time step + + q=(-hfx-lh+gsw+glw*emiss-stbolt*emiss*tml*tml*tml*tml)/(rhowater*cwater) +! wspd=max(sqrt(uair*uair+vair*vair),0.1) + wspd=sqrt(uair*uair+vair*vair) + if (wspd .lt. 1.e-10 ) then +! print *, 'i,j,wspd are ', i,j,wspd + wspd = 1.e-10 + endif +! limit ust to 1.6 to give a value of ust for water of 0.05 +! ust2=min(ust, 1.6) +! new limit for ust: reduce atmospheric ust by half for ocean + ust2=0.5*ust + tauxair=ust2*ust2*uair/wspd + taux=rhoair/rhowater*tauxair + tauyair=ust2*ust2*vair/wspd + tauy=rhoair/rhowater*tauyair +! note: forward-backward coriolis force for effective time-centering + hu2=hu1+dt*( f*hv1 + taux) + hv2=hv1+dt*(-f*hu2 + tauy) +! consider the flux effect + A2=A1+q*dt + + huml=hu2 + hvml=hv2 + + hold=h + B2=hu2*hu2+hv2*hv2 + hsqrd=-A2/Gam + sqrt(A2*A2/(Gam*Gam) + 2.*B2/BV2) + h=sqrt(max(hsqrd,0.0)) +! limit to positive h change + if(h.lt.hold)h=hold + +! no change unless tml is warmer than layer mean temp tmol or tsk-5 (see omlinit) + if(tml.ge.tmoml .and. h.ne.0.)then + +! no change unless tml is warmer than layer mean temp tmoml or tsk-5 (see omlinit) + if(tml.ge.tmoml)then + tml=max(t0ml - Gam*(h-h0) + 0.5*Gam*h + A2/h, tmoml) + else + tml=tmoml + endif + u=hu2/h + v=hv2/h + else + tml=t0ml + u=0. + v=0. + endif + +! relax TML T0ML and H to H0, HUML and HVML to 0 + + if (oml_relaxation_time .gt. 0.) then + tml = tml - (tml-t0ml)*dt/oml_relaxation_time + h = h - (h-h0)*dt/oml_relaxation_time + huml = huml - huml*dt/oml_relaxation_time + hvml = hvml - hvml*dt/oml_relaxation_time + end if + + tsk=tml + +! if(h.gt.100.)print *,i,j,h,tml,' h,tml' + +! ww: output point data +! if( (i.eq.190 .and. j.eq.115) .or. (i.eq.170 .and. j.eq.125) ) then +! write(jtime,fmt='("TS ",f10.0)') float(itimestep) +! CALL wrf_message ( TRIM(jtime) ) +! write(time_series,fmt='("OML",2I4,2F9.5,2F8.2,2E15.5,F8.3)') & +! i,j,u,v,tml,h,taux,tauy,a2 +! CALL wrf_message ( TRIM(time_series) ) +! end if + + END SUBROUTINE OML1D + +END MODULE module_sf_oml diff --git a/src/core_atmosphere/physics/physics_wrf/module_sf_sfcdiags.F b/src/core_atmosphere/physics/physics_wrf/module_sf_sfcdiags.F new file mode 100644 index 0000000000..5f22012477 --- /dev/null +++ b/src/core_atmosphere/physics/physics_wrf/module_sf_sfcdiags.F @@ -0,0 +1,71 @@ +!WRF:MODEL_LAYER:PHYSICS +! +MODULE module_sf_sfcdiags + +CONTAINS + + SUBROUTINE SFCDIAGS(HFX,QFX,TSK,QSFC,CHS2,CQS2,T2,TH2,Q2, & + PSFC,CP,R_d,ROVCP,CHS,T3D,QV3D,UA_PHYS, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + its,ite, jts,jte, kts,kte ) +!------------------------------------------------------------------- + IMPLICIT NONE +!------------------------------------------------------------------- + INTEGER, INTENT(IN ) :: ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + its,ite, jts,jte, kts,kte + REAL, DIMENSION( ims:ime, jms:jme ) , & + INTENT(IN) :: HFX, & + QFX, & + TSK, & + QSFC + REAL, DIMENSION( ims:ime, jms:jme ) , & + INTENT(INOUT) :: Q2, & + TH2, & + T2 + REAL, DIMENSION( ims:ime, jms:jme ) , & + INTENT(IN) :: PSFC, & + CHS2, & + CQS2 + REAL, INTENT(IN ) :: CP,R_d,ROVCP + +! UA changes + LOGICAL, INTENT(IN) :: UA_PHYS ! UA: flag for UA option + REAL, DIMENSION( ims:ime, kms:kme, jms:jme ) , & + INTENT(IN ) :: QV3D,T3D + REAL, DIMENSION( ims:ime, jms:jme ) , & + INTENT(IN) :: CHS + +! LOCAL VARS + INTEGER :: I,J + REAL :: RHO + + DO J=jts,jte + DO I=its,ite + RHO = PSFC(I,J)/(R_d * TSK(I,J)) + if(CQS2(I,J).lt.1.E-5) then + Q2(I,J)=QSFC(I,J) + else + IF ( UA_PHYS ) THEN + Q2(I,J) = QSFC(I,J) - CHS(I,J)/CQS2(I,J)*(QSFC(I,J) - QV3D(i,1,j)) + ELSE + Q2(I,J) = QSFC(I,J) - QFX(I,J)/(RHO*CQS2(I,J)) + ENDIF + endif + if(CHS2(I,J).lt.1.E-5) then + T2(I,J) = TSK(I,J) + else + IF ( UA_PHYS ) THEN + T2(I,J) = TSK(I,J) - CHS(I,J)/CHS2(I,J)*(TSK(I,J) - T3D(i,1,j)) + ELSE + T2(I,J) = TSK(I,J) - HFX(I,J)/(RHO*CP*CHS2(I,J)) + ENDIF + endif + TH2(I,J) = T2(I,J)*(1.E5/PSFC(I,J))**ROVCP + ENDDO + ENDDO + + END SUBROUTINE SFCDIAGS + +END MODULE module_sf_sfcdiags diff --git a/src/core_atmosphere/physics/physics_wrf/module_sf_sfclay.F b/src/core_atmosphere/physics/physics_wrf/module_sf_sfclay.F index d20b397621..010f54dbf6 100644 --- a/src/core_atmosphere/physics/physics_wrf/module_sf_sfclay.F +++ b/src/core_atmosphere/physics/physics_wrf/module_sf_sfclay.F @@ -1,10 +1,12 @@ -!================================================================================================== -! modifications made to sourcecode: -! * changed ustm to an "inout" variable instead of an "out" variable. -! Laura D. Fowler (laura@ucar.edu / 2015-02-19). +!================================================================================================================= +!module_sf_sfclay.F was originally copied from ./phys/module_sf_sfclay.F from WRF version 3.8.1. +!Laura D. Fowler (laura@ucar.edu) / 2016-10-26. -!================================================================================================== +!modifications to sourcecode for MPAS: +! * added the actual size of each cell in the calculation of the Mahrt and Sun low-resolution correction. +! Laura D. Fowler (laura@ucar.edu) / 2016-10-26. +!================================================================================================================= !WRF:MODEL_LAYER:PHYSICS ! MODULE module_sf_sfclay @@ -35,10 +37,16 @@ SUBROUTINE SFCLAY(U3D,V3D,T3D,QV3D,P3D,dz8w, & #if defined(mpas) ,dxCell & #endif - ) + ) !------------------------------------------------------------------- IMPLICIT NONE !------------------------------------------------------------------- +! Changes in V3.7 over water surfaces: +! 1. for ZNT/Cd, replacing constant OZO with 0.11*1.5E-5/UST(I) +! the COARE 3.5 (Edson et al. 2013) formulation is also available +! 2. for VCONV, reducing magnitude by half +! 3. for Ck, replacing Carlson-Boland with COARE 3 +!------------------------------------------------------------------- !-- U3D 3D u-velocity interpolated to theta points (m/s) !-- V3D 3D v-velocity interpolated to theta points (m/s) !-- T3D temperature (K) @@ -141,13 +149,6 @@ SUBROUTINE SFCLAY(U3D,V3D,T3D,QV3D,P3D,dz8w, & PBLH, & XLAND, & TSK - REAL, DIMENSION( ims:ime, jms:jme ) , & - INTENT(OUT ) :: U10, & - V10, & - TH2, & - T2, & - Q2, & - QSFC ! REAL, DIMENSION( ims:ime, jms:jme ) , & @@ -185,20 +186,29 @@ SUBROUTINE SFCLAY(U3D,V3D,T3D,QV3D,P3D,dz8w, & INTENT(INOUT) :: & QGH - - REAL, INTENT(IN ) :: CP,G,ROVCP,R,XLV,DX REAL, OPTIONAL, DIMENSION( ims:ime, jms:jme ) , & - INTENT(OUT) :: ck,cka,cd,cda,ustm + INTENT(OUT) :: ck,cka,cd,cda + + REAL, OPTIONAL, DIMENSION( ims:ime, jms:jme ) , & + INTENT(INOUT) :: USTM INTEGER, OPTIONAL, INTENT(IN ) :: ISFTCFLX, IZ0TLND INTEGER, OPTIONAL, INTENT(IN ) :: SCM_FORCE_FLUX #if defined(mpas) -!MPAS specific (Laura D. Fowler - 2013-03-06): real,intent(in),dimension(ims:ime,jms:jme),optional:: dxCell -!MPAS specific end. + real,intent(inout),dimension(ims:ime,jms:jme):: qsfc + real,intent(out),dimension(ims:ime,jms:jme) :: u10,v10,th2,t2,q2 +#else + REAL, DIMENSION( ims:ime, jms:jme ) , & + INTENT(OUT ) :: U10, & + V10, & + TH2, & + T2, & + Q2, & + QSFC #endif ! LOCAL VARS @@ -246,7 +256,6 @@ SUBROUTINE SFCLAY(U3D,V3D,T3D,QV3D,P3D,dz8w, & ims,ime, jms,jme, kms,kme, & its,ite, jts,jte, kts,kte & #if defined(mpas) -!MPAS specific (Laura D. Fowler - 2013-03-06): ,isftcflx,iz0tlnd,scm_force_flux, & USTM(ims,j),CK(ims,j),CKA(ims,j), & CD(ims,j),CDA(ims,j),dxCell(ims,j) & @@ -277,12 +286,10 @@ SUBROUTINE SFCLAY1D(J,UX,VX,T1D,QV1D,P1D,dz8w1d, & its,ite, jts,jte, kts,kte, & isftcflx, iz0tlnd, scm_force_flux, & #if defined(mpas) -!MPAS specific (Laura D. Fowler - 2013-03-06): ustm,ck,cka,cd,cda,dxCell ) #else ustm,ck,cka,cd,cda ) #endif - !------------------------------------------------------------------- IMPLICIT NONE !------------------------------------------------------------------- @@ -353,20 +360,17 @@ SUBROUTINE SFCLAY1D(J,UX,VX,T1D,QV1D,P1D,dz8w1d, & T1D #if defined(mpas) -!MPAS specific (Laura D. Fowler - 2013-03-06): - real,intent(in),dimension(ims:ime),optional :: dxCell - real,intent(inout),dimension(ims:ime),optional:: ustm - real,intent(out),dimension(ims:ime),optional :: ck,cka,cd,cda -!MPAS specific end. -#else - REAL, OPTIONAL, DIMENSION( ims:ime ) , & - INTENT(OUT) :: ck,cka,cd,cda,ustm + real,intent(in),dimension(ims:ime),optional:: dxCell #endif + REAL, OPTIONAL, DIMENSION( ims:ime ) , & + INTENT(OUT) :: ck,cka,cd,cda + REAL, OPTIONAL, DIMENSION( ims:ime ) , & + INTENT(INOUT) :: USTM + INTEGER, OPTIONAL, INTENT(IN ) :: ISFTCFLX, IZ0TLND INTEGER, OPTIONAL, INTENT(IN ) :: SCM_FORCE_FLUX - ! LOCAL VARS REAL, DIMENSION( its:ite ) :: ZA, & @@ -398,8 +402,9 @@ SUBROUTINE SFCLAY1D(J,UX,VX,T1D,QV1D,P1D,dz8w1d, & REAL :: PL,THCON,TVCON,E1 REAL :: ZL,TSKV,DTHVDZ,DTHVM,VCONV,RZOL,RZOL2,RZOL10,ZOL2,ZOL10 REAL :: DTG,PSIX,DTTHX,PSIX10,PSIT,PSIT2,PSIQ,PSIQ2,PSIQ10 - REAL :: FLUXC,VSGD,Z0Q,VISC,RESTAR,CZIL,RESTAR2 + REAL :: FLUXC,VSGD,Z0Q,VISC,RESTAR,CZIL,GZ0OZQ,GZ0OZT REAL :: ZW, ZN1, ZN2 + REAL :: Z0T, CZC !------------------------------------------------------------------- KL=kte @@ -514,7 +519,7 @@ SUBROUTINE SFCLAY1D(J,UX,VX,T1D,QV1D,P1D,dz8w1d, & TSKV=THGB(I)*(1.+EP1*QSFC(I)) DTHVDZ=(THVX(I)-TSKV) ! Convective velocity scale Vc and subgrid-scale velocity Vsg -! following Beljaars (1995, QJRMS) and Mahrt and Sun (1995, MWR) +! following Beljaars (1994, QJRMS) and Mahrt and Sun (1995, MWR) ! ... HONG Aug. 2001 ! ! VCONV = 0.25*sqrt(g/tskv*pblh(i)*dthvm) @@ -529,7 +534,9 @@ SUBROUTINE SFCLAY1D(J,UX,VX,T1D,QV1D,P1D,dz8w1d, & ELSE DTHVM=0. ENDIF - VCONV = 2.*SQRT(DTHVM) +! VCONV = 2.*SQRT(DTHVM) +! V3.7: reducing contribution in calm conditions + VCONV = SQRT(DTHVM) endif ! Mahrt and Sun low-res correction !MPAS specific (Laura D. Fowler): We take into accound the actual size of individual @@ -730,6 +737,26 @@ SUBROUTINE SFCLAY1D(J,UX,VX,T1D,QV1D,P1D,dz8w1d, & PSIQ2=ALOG(KARMAN*UST(I)*2./XKA+2./ZL)-PSIH2(I) ! AHW: mods to compute ck, cd PSIQ10=ALOG(KARMAN*UST(I)*10./XKA+10./ZL)-PSIH10(I) + +! V3.7: using Fairall 2003 to compute z0q and z0t over water: +! adapted from module_sf_mynn.F + IF ( (XLAND(I)-1.5).GE.0. ) THEN + VISC=(1.32+0.009*(SCR3(I)-273.15))*1.E-5 +! VISC=1.326e-5*(1. + 6.542e-3*SCR3(I) + 8.301e-6*SCR3(I)*SCR3(I) & +! - 4.84e-9*SCR3(I)*SCR3(I)*SCR3(I)) + RESTAR=UST(I)*ZNT(I)/VISC + Z0T = (5.5e-5)*(RESTAR**(-0.60)) + Z0T = MIN(Z0T,1.0e-4) + Z0T = MAX(Z0T,2.0e-9) + Z0Q = Z0T + + PSIQ=max(ALOG((ZA(I)+Z0Q)/Z0Q)-PSIH(I), 2.) + PSIT=max(ALOG((ZA(I)+Z0T)/Z0T)-PSIH(I), 2.) + PSIQ2=max(ALOG((2.+Z0Q)/Z0Q)-PSIH2(I), 2.) + PSIT2=max(ALOG((2.+Z0T)/Z0T)-PSIH2(I), 2.) + PSIQ10=max(ALOG((10.+Z0Q)/Z0Q)-PSIH10(I), 2.) + ENDIF + IF ( PRESENT(ISFTCFLX) ) THEN IF ( ISFTCFLX.EQ.1 .AND. (XLAND(I)-1.5).GE.0. ) THEN ! v3.1 @@ -747,16 +774,19 @@ SUBROUTINE SFCLAY1D(J,UX,VX,T1D,QV1D,P1D,dz8w1d, & IF ( ISFTCFLX.EQ.2 .AND. (XLAND(I)-1.5).GE.0. ) THEN ! AHW: Garratt formula: Calculate roughness Reynolds number ! Kinematic viscosity of air (linear approc to -! temp dependence at sea levle) +! temp dependence at sea level) +! GZ0OZT and GZ0OZQ are based off formulas from Brutsaert (1975), which +! Garratt (1992) used with values of k = 0.40, Pr = 0.71, and Sc = 0.60 VISC=(1.32+0.009*(SCR3(I)-273.15))*1.E-5 !! VISC=1.5E-5 RESTAR=UST(I)*ZNT(I)/VISC - RESTAR2=2.48*SQRT(SQRT(RESTAR))-2. - PSIT=GZ1OZ0(I)-PSIH(I)+RESTAR2 - PSIQ=GZ1OZ0(I)-PSIH(I)+2.28*SQRT(SQRT(RESTAR))-2. - PSIT2=GZ2OZ0(I)-PSIH2(I)+RESTAR2 - PSIQ2=GZ2OZ0(I)-PSIH2(I)+2.28*SQRT(SQRT(RESTAR))-2. - PSIQ10=GZ10OZ0(I)-PSIH(I)+2.28*SQRT(SQRT(RESTAR))-2. + GZ0OZT=0.40*(7.3*SQRT(SQRT(RESTAR))*SQRT(0.71)-5.) + GZ0OZQ=0.40*(7.3*SQRT(SQRT(RESTAR))*SQRT(0.60)-5.) + PSIT=GZ1OZ0(I)-PSIH(I)+GZ0OZT + PSIQ=GZ1OZ0(I)-PSIH(I)+GZ0OZQ + PSIT2=GZ2OZ0(I)-PSIH2(I)+GZ0OZT + PSIQ2=GZ2OZ0(I)-PSIH2(I)+GZ0OZQ + PSIQ10=GZ10OZ0(I)-PSIH(I)+GZ0OZQ ENDIF ENDIF IF(PRESENT(ck) .and. PRESENT(cd) .and. PRESENT(cka) .and. PRESENT(cda)) THEN @@ -830,7 +860,13 @@ SUBROUTINE SFCLAY1D(J,UX,VX,T1D,QV1D,P1D,dz8w1d, & DO 360 I=its,ite IF((XLAND(I)-1.5).GE.0)THEN - ZNT(I)=CZO*UST(I)*UST(I)/G+OZO +! ZNT(I)=CZO*UST(I)*UST(I)/G+OZO +! Since V3.7 (ref: EC Physics document for Cy36r1) + ZNT(I)=CZO*UST(I)*UST(I)/G+0.11*1.5E-5/UST(I) +! COARE 3.5 (Edson et al. 2013) +! CZC = 0.0017*WSPD(I)-0.005 +! CZC = min(CZC,0.028) +! ZNT(I)=CZC*UST(I)*UST(I)/G+0.11*1.5E-5/UST(I) ! AHW: change roughness length, and hence the drag coefficients Ck and Cd IF ( PRESENT(ISFTCFLX) ) THEN IF ( ISFTCFLX.NE.0 ) THEN @@ -886,12 +922,12 @@ SUBROUTINE SFCLAY1D(J,UX,VX,T1D,QV1D,P1D,dz8w1d, & DO 400 I=its,ite IF(XLAND(I)-1.5.GT.0.)THEN HFX(I)=FLHC(I)*(THGB(I)-THX(I)) - IF ( PRESENT(ISFTCFLX) ) THEN - IF ( ISFTCFLX.NE.0 ) THEN -! AHW: add dissipative heating term - HFX(I)=HFX(I)+RHOX(I)*USTM(I)*USTM(I)*WSPDI(I) - ENDIF - ENDIF +! IF ( PRESENT(ISFTCFLX) ) THEN +! IF ( ISFTCFLX.NE.0 ) THEN +! AHW: add dissipative heating term (commented out in 3.6.1) +! HFX(I)=HFX(I)+RHOX(I)*USTM(I)*USTM(I)*WSPDI(I) +! ENDIF +! ENDIF ELSEIF(XLAND(I)-1.5.LT.0.)THEN HFX(I)=FLHC(I)*(THGB(I)-THX(I)) HFX(I)=AMAX1(HFX(I),-250.) diff --git a/src/core_atmosphere/utils/Makefile b/src/core_atmosphere/utils/Makefile new file mode 100644 index 0000000000..885a4e23ce --- /dev/null +++ b/src/core_atmosphere/utils/Makefile @@ -0,0 +1,29 @@ +.SUFFIXES: .F .o + +all: build_tables + mv build_tables ../../.. + +build_tables: build_tables.o atmphys_build_tables_thompson.o + $(LINKER) $(LDFLAGS) -o build_tables build_tables.o atmphys_build_tables_thompson.o -L../../framework -L../physics -lphys -lframework + + +build_tables.o: \ + atmphys_build_tables_thompson.o + +atmphys_build_tables_thompson.o: \ + ../physics/physics_wrf/module_mp_thompson.o + +clean: + $(RM) ../../../build_tables + $(RM) *.o *.mod *.f90 + @# Some Intel compilers generate *.i files; clean them up, too + $(RM) *.i + +.F.o: + $(RM) $@ $*.mod +ifeq "$(GEN_F90)" "true" + $(CPP) $(CPPFLAGS) $(PHYSICS) $(CPPINCLUDES) $< > $*.f90 + $(FC) $(FFLAGS) -c $*.f90 $(FCINCLUDES) -I../../framework -I../../operators -I../physics -I../physics/physics_wrf -I../../external/esmf_time_f90 +else + $(FC) $(CPPFLAGS) $(PHYSICS) $(FFLAGS) -c $*.F $(CPPINCLUDES) $(FCINCLUDES) -I../../framework -I../../operators -I../physics -I../physics/physics_wrf -I../../external/esmf_time_f90 +endif diff --git a/src/core_atmosphere/utils/atmphys_build_tables_thompson.F b/src/core_atmosphere/utils/atmphys_build_tables_thompson.F new file mode 100644 index 0000000000..83ac7b9014 --- /dev/null +++ b/src/core_atmosphere/utils/atmphys_build_tables_thompson.F @@ -0,0 +1,145 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!================================================================================================================= + module atmphys_build_tables_thompson + use module_mp_thompson + + implicit none + private + public:: build_tables_thompson + +!builds the files containing the look-up tables for the Thompson cloud microphysics scheme. +!Laura D. Fowler (send comments to laura@ucar.edu). +!2016-11-01. + + + contains + + +!================================================================================================================= + subroutine build_tables_thompson +!================================================================================================================= + +!local variables: + logical, parameter:: l_mp_tables = .false. + integer:: istatus + +!----------------------------------------------------------------------------------------------------------------- + +!--- partial initialization before building the look-up tables: + call thompson_init(l_mp_tables) + +!--- building look-up table for rain collecting graupel: + write(0,*) + write(0,*) '--- building MP_THOMPSON_QRacrQG_DATA.DBL' + open(unit=11,file='MP_THOMPSON_QRacrQG_DATA.DBL',form='unformatted',status='new',iostat=istatus) + if (istatus /= 0) then + call print_parallel_mesg('MP_THOMPSON_QRacrQG_DATA.DBL') + return + end if + call qr_acr_qg + write(11) tcg_racg + write(11) tmr_racg + write(11) tcr_gacr + write(11) tmg_gacr + write(11) tnr_racg + write(11) tnr_gacr + close(unit=11) + +!--- building look-up table for rain collecting snow: + write(0,*) + write(0,*) '--- building MP_THOMPSON_QRacrQS_DATA.DBL' + open(unit=11,file='MP_THOMPSON_QRacrQS_DATA.DBL',form='unformatted',status='new',iostat=istatus) + if (istatus /= 0) then + call print_parallel_mesg('MP_THOMPSON_QRacrQS_DATA.DBL') + return + end if + call qr_acr_qs + write(11)tcs_racs1 + write(11)tmr_racs1 + write(11)tcs_racs2 + write(11)tmr_racs2 + write(11)tcr_sacr1 + write(11)tms_sacr1 + write(11)tcr_sacr2 + write(11)tms_sacr2 + write(11)tnr_racs1 + write(11)tnr_racs2 + write(11)tnr_sacr1 + write(11)tnr_sacr2 + close(unit=11) + +!--- building look-up table for freezing of cloud droplets: + write(0,*) + write(0,*) '--- building MP_THOMPSON_freezeH2O_DATA.DBL' + open(unit=11,file='MP_THOMPSON_freezeH2O_DATA.DBL',form='unformatted',status='new',iostat=istatus) + if (istatus /= 0) then + call print_parallel_mesg('MP_THOMPSON_freezeH2O_DATA.DBL') + return + end if + call freezeH2O + write(11) tpi_qrfz + write(11) tni_qrfz + write(11) tpg_qrfz + write(11) tnr_qrfz + write(11) tpi_qcfz + write(11) tni_qcfz + close(unit=11) + +!--- building look-up table for autoconversion of cloud ice to snow: + write(0,*) + write(0,*) '--- building MP_THOMPSON_QIautQS_DATA.DBL' + open(unit=11,file='MP_THOMPSON_QIautQS_DATA.DBL',form='unformatted',status='new',iostat=istatus) + if (istatus /= 0) then + call print_parallel_mesg('MP_THOMPSON_QIautQS_DATA.DBL') + return + end if + call qi_aut_qs + write(11) tpi_ide + write(11) tps_iaus + write(11) tni_iaus + close(unit=11) + + write(0,*) + write(0,*) 'Finished building all tables.' + write(0,*) + write(0,*) '*******************************************************************************' + write(0,*) 'To preserve these tables when running ''make clean'', please copy the following' + write(0,*) 'files to the src/core_atmosphere/physics/physics_wrf/files/ directory:' + write(0,*) + write(0,*) ' MP_THOMPSON_QRacrQG_DATA.DBL' + write(0,*) ' MP_THOMPSON_QRacrQS_DATA.DBL' + write(0,*) ' MP_THOMPSON_freezeH2O_DATA.DBL' + write(0,*) ' MP_THOMPSON_QIautQS_DATA.DBL' + write(0,*) + write(0,*) 'Tables in the src/core_atmosphere/physics/physics_wrf/files/ directory ' + write(0,*) 'will be automatically linked to the top-level MPAS directory when compiling' + write(0,*) 'the ''atmosphere'' core.' + write(0,*) '*******************************************************************************' + + end subroutine build_tables_thompson + + +!================================================================================================================= + subroutine print_parallel_mesg(filename) +!================================================================================================================= + + character(len=*), intent(in) :: filename + + write(0,*) '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + write(0,*) '! Error encountered while trying to create new file '//trim(filename) + write(0,*) '! ' + write(0,*) '! Please ensure that this file does not exist before running ''build_tables'',' + write(0,*) '! and ensure that ''build_tables'' is *NOT* run in parallel.' + write(0,*) '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + + end subroutine print_parallel_mesg + +!================================================================================================================= + end module atmphys_build_tables_thompson +!================================================================================================================= diff --git a/src/core_atmosphere/utils/build_tables.F b/src/core_atmosphere/utils/build_tables.F new file mode 100644 index 0000000000..3521d415f4 --- /dev/null +++ b/src/core_atmosphere/utils/build_tables.F @@ -0,0 +1,23 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!================================================================================================================= + program build_tables + use atmphys_build_tables_thompson + + implicit none + + write(0,*) ' ' + write(0,*) 'Constructing tables for Thompson cloud microphysics scheme.' + write(0,*) 'This may take as much as 15-20 minutes with an optimized build...' + + call build_tables_thompson + + stop + + end program build_tables +!================================================================================================================= diff --git a/src/core_init_atmosphere/Makefile b/src/core_init_atmosphere/Makefile index d9cabd0e85..9579f48573 100644 --- a/src/core_init_atmosphere/Makefile +++ b/src/core_init_atmosphere/Makefile @@ -1,20 +1,25 @@ .SUFFIXES: .F .o -OBJS = mpas_init_atm_core.o \ - mpas_init_atm_core_interface.o \ - mpas_init_atm_cases.o \ - mpas_atm_advection.o \ - mpas_init_atm_read_met.o \ - mpas_init_atm_llxy.o \ - mpas_init_atm_bitarray.o \ - mpas_init_atm_queue.o \ - mpas_init_atm_hinterp.o \ - mpas_init_atm_static.o \ - mpas_init_atm_surface.o \ - read_geogrid.o \ - mpas_atmphys_date_time.o \ - mpas_atmphys_initialize_real.o \ - mpas_atmphys_utilities.o +OBJS = \ + mpas_init_atm_core.o \ + mpas_init_atm_core_interface.o \ + mpas_init_atm_cases.o \ + mpas_atm_advection.o \ + mpas_init_atm_read_met.o \ + mpas_init_atm_llxy.o \ + mpas_init_atm_bitarray.o \ + mpas_init_atm_queue.o \ + mpas_init_atm_hinterp.o \ + mpas_init_atm_static.o \ + mpas_init_atm_gwd.o \ + mpas_init_atm_surface.o \ + mpas_init_atm_vinterp.o \ + read_geogrid.o \ + mpas_atmphys_constants.o \ + mpas_atmphys_date_time.o \ + mpas_atmphys_functions.o \ + mpas_atmphys_initialize_real.o \ + mpas_atmphys_utilities.o all: core_hyd @@ -46,7 +51,11 @@ mpas_init_atm_cases.o: \ mpas_init_atm_llxy.o \ mpas_init_atm_hinterp.o \ mpas_init_atm_static.o \ + mpas_init_atm_gwd.o \ mpas_init_atm_surface.o \ + mpas_init_atm_vinterp.o \ + mpas_atmphys_constants.o \ + mpas_atmphys_functions.o \ mpas_atmphys_initialize_real.o mpas_init_atm_hinterp.o: mpas_init_atm_queue.o mpas_init_atm_bitarray.o diff --git a/src/core_init_atmosphere/Registry.xml b/src/core_init_atmosphere/Registry.xml index 9c0eb2ea7d..6a6cccb384 100644 --- a/src/core_init_atmosphere/Registry.xml +++ b/src/core_init_atmosphere/Registry.xml @@ -1,5 +1,5 @@ - + @@ -31,57 +31,224 @@ - - - - - - - + + + + + + + + + + + + + + + - - - - - + + + + + + + + + + + - - - - - + + + + + + + + + + + + + - - - - - + + + + + + + + + + + + + - - - - - + + + + + + + + + + + + + + + - - + + + + + - - - - + + + + + + + + + @@ -91,6 +258,7 @@ + @@ -145,39 +313,39 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -188,7 +356,7 @@ - + @@ -282,7 +450,7 @@ - + @@ -299,6 +467,7 @@ + @@ -327,6 +496,7 @@ + @@ -338,6 +508,12 @@ + + + + + + - - + + @@ -479,6 +655,9 @@ + + + @@ -490,6 +669,7 @@ + @@ -519,6 +699,13 @@ + + + + + + + @@ -527,6 +714,7 @@ + diff --git a/src/core_init_atmosphere/mpas_atm_advection.F b/src/core_init_atmosphere/mpas_atm_advection.F index e543a0a422..76fbb1435b 100644 --- a/src/core_init_atmosphere/mpas_atm_advection.F +++ b/src/core_init_atmosphere/mpas_atm_advection.F @@ -11,6 +11,7 @@ module atm_advection use mpas_derived_types use mpas_pool_routines use mpas_constants + use mpas_abort, only : mpas_dmpar_global_abort contains diff --git a/src/core_init_atmosphere/mpas_atmphys_constants.F b/src/core_init_atmosphere/mpas_atmphys_constants.F new file mode 120000 index 0000000000..12c8d2ed5a --- /dev/null +++ b/src/core_init_atmosphere/mpas_atmphys_constants.F @@ -0,0 +1 @@ +../core_atmosphere/physics/mpas_atmphys_constants.F \ No newline at end of file diff --git a/src/core_init_atmosphere/mpas_atmphys_functions.F b/src/core_init_atmosphere/mpas_atmphys_functions.F new file mode 120000 index 0000000000..1112a37e7a --- /dev/null +++ b/src/core_init_atmosphere/mpas_atmphys_functions.F @@ -0,0 +1 @@ +../core_atmosphere/physics/mpas_atmphys_functions.F \ No newline at end of file diff --git a/src/core_init_atmosphere/mpas_init_atm_cases.F b/src/core_init_atmosphere/mpas_init_atm_cases.F index c23aa09c6a..a92b1a9cd9 100644 --- a/src/core_init_atmosphere/mpas_init_atm_cases.F +++ b/src/core_init_atmosphere/mpas_init_atm_cases.F @@ -12,13 +12,17 @@ module init_atm_cases use mpas_pool_routines use mpas_constants use mpas_dmpar + use mpas_abort, only : mpas_dmpar_global_abort + use mpas_sort use atm_advection - use mpas_atmphys_initialize_real use mpas_RBF_interpolation use mpas_vector_reconstruction use mpas_timer use mpas_init_atm_static use mpas_init_atm_surface + use mpas_atmphys_constants, only: svpt0,svp1,svp2,svp3 + use mpas_atmphys_functions + use mpas_atmphys_initialize_real ! Added only clause to keep xlf90 from getting confused from the overloaded abs intrinsic in mpas_timekeeping use mpas_timekeeping !, only: MPAS_Time_type, MPAS_TimeInterval_type, MPAS_Clock_type, & @@ -39,6 +43,7 @@ subroutine init_atm_setup_case(domain, stream_manager) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! use mpas_stream_manager + use mpas_init_atm_gwd, only : compute_gwd_fields implicit none @@ -47,6 +52,7 @@ subroutine init_atm_setup_case(domain, stream_manager) integer :: i + integer :: ierr type (block_type), pointer :: block_ptr type (mpas_pool_type), pointer :: mesh @@ -57,6 +63,7 @@ subroutine init_atm_setup_case(domain, stream_manager) integer, pointer :: config_init_case logical, pointer :: config_static_interp + logical, pointer :: config_native_gwd_static logical, pointer :: config_met_interp character(len=StrKIND), pointer :: mminlu @@ -73,21 +80,19 @@ subroutine init_atm_setup_case(domain, stream_manager) ! if (config_init_case == 6) then #ifndef ROTATED_GRID - write(0,*) '*****************************************************************' - write(0,*) 'To initialize and run the mountain wave test case (case 6),' - write(0,*) ' please clean and re-compile init_atmosphere with -DROTATED_GRID' - write(0,*) ' added to the specification of MODEL_FORMULATION' - write(0,*) ' at the top of the Makefile.' - write(0,*) '*****************************************************************' - call mpas_dmpar_abort(domain % dminfo) + call mpas_dmpar_global_abort('*****************************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort('To initialize and run the mountain wave test case (case 6),', deferredAbort=.true.) + call mpas_dmpar_global_abort(' please clean and re-compile init_atmosphere with -DROTATED_GRID', deferredAbort=.true.) + call mpas_dmpar_global_abort(' added to the specification of MODEL_FORMULATION', deferredAbort=.true.) + call mpas_dmpar_global_abort(' at the top of the Makefile.', deferredAbort=.true.) + call mpas_dmpar_global_abort('*****************************************************************') #endif else #ifdef ROTATED_GRID - write(0,*) '*****************************************************************' - write(0,*) 'Only test case 6 should use code compiled with -DROTATED_GRID' - write(0,*) ' specified in the Makefile.' - write(0,*) '*****************************************************************' - call mpas_dmpar_abort(domain % dminfo) + call mpas_dmpar_global_abort('*****************************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort('Only test case 6 should use code compiled with -DROTATED_GRID', deferredAbort=.true.) + call mpas_dmpar_global_abort(' specified in the Makefile.', deferredAbort=.true.) + call mpas_dmpar_global_abort('*****************************************************************') #endif end if @@ -170,6 +175,7 @@ subroutine init_atm_setup_case(domain, stream_manager) do while (associated(block_ptr)) call mpas_pool_get_config(block_ptr % configs, 'config_static_interp', config_static_interp) + call mpas_pool_get_config(block_ptr % configs, 'config_native_gwd_static', config_native_gwd_static) call mpas_pool_get_config(block_ptr % configs, 'config_met_interp', config_met_interp) call mpas_pool_get_subpool(block_ptr % structs, 'mesh', mesh) @@ -191,19 +197,29 @@ subroutine init_atm_setup_case(domain, stream_manager) ! detected when performing the static_interp step. ! if (domain % dminfo % nprocs > 1) then - write(0,*) ' ' - write(0,*) '****************************************************************' - write(0,*) 'Error: Interpolation of static fields does not work in parallel.' - write(0,*) 'Please run the static_interp step using only a single MPI task.' - write(0,*) '****************************************************************' - write(0,*) ' ' - call mpas_dmpar_abort(domain % dminfo) + call mpas_dmpar_global_abort('****************************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort('Error: Interpolation of static fields does not work in parallel.', deferredAbort=.true.) + call mpas_dmpar_global_abort('Please run the static_interp step using only a single MPI task.', deferredAbort=.true.) + call mpas_dmpar_global_abort('****************************************************************') end if call init_atm_static(mesh, block_ptr % dimensions, block_ptr % configs) call init_atm_static_orogwd(mesh, block_ptr % dimensions, block_ptr % configs) end if + if (config_native_gwd_static) then + write(0,*) ' ' + write(0,*) 'Computing GWDO static fields on the native MPAS mesh' + write(0,*) ' ' + ierr = compute_gwd_fields(domain) + if (ierr /= 0) then + call mpas_dmpar_global_abort('****************************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort('Error while trying to compute sub-grid-scale orography', deferredAbort=.true.) + call mpas_dmpar_global_abort('statistics for use with the GWDO scheme.', deferredAbort=.true.) + call mpas_dmpar_global_abort('****************************************************************') + end if + end if + ! ! If at this point the mminlu variable is blank, we assume that the static interp step was ! not run, and that we are working with a static file created before there was a choice @@ -220,7 +236,10 @@ subroutine init_atm_setup_case(domain, stream_manager) call init_atm_case_gfs(block_ptr, mesh, nCells, nEdges, nVertLevels, fg, state, & diag, diag_physics, config_init_case, block_ptr % dimensions, block_ptr % configs) - if (config_met_interp) call physics_initialize_real(mesh, fg, domain % dminfo, block_ptr % dimensions, block_ptr % configs) + + if (config_met_interp) then + call physics_initialize_real(mesh, fg, domain % dminfo, block_ptr % dimensions, block_ptr % configs) + end if block_ptr => block_ptr % next end do @@ -241,12 +260,9 @@ subroutine init_atm_setup_case(domain, stream_manager) else - write(0,*) ' ' - write(0,*) ' ****************************************************' - write(0,*) ' Only test cases 1 through 8 are currently supported.' - write(0,*) ' ****************************************************' - write(0,*) ' ' - call mpas_dmpar_abort(domain % dminfo) + call mpas_dmpar_global_abort(' ****************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort(' Only test cases 1 through 8 are currently supported.', deferredAbort=.true.) + call mpas_dmpar_global_abort(' ****************************************************') end if @@ -298,7 +314,7 @@ subroutine init_atm_case_jw(mesh, nCells, nVertLevels, state, diag, configs, tes real (kind=RKIND), dimension(:), pointer :: rdzw, dzu, rdzu, fzm, fzp real (kind=RKIND), dimension(:), pointer :: surface_pressure - real (kind=RKIND), dimension(:,:), pointer :: zgrid, zx, zz, hx + real (kind=RKIND), dimension(:,:), pointer :: zgrid, zxu, zz, hx real (kind=RKIND), dimension(:,:), pointer :: pressure, ppb, pb, rho_zz, rb, rr, tb, rtb, p, pp, dss, t, rt real (kind=RKIND), dimension(:,:), pointer :: u, ru, w, rw, v real (kind=RKIND), dimension(:,:), pointer :: rho, theta @@ -438,7 +454,7 @@ subroutine init_atm_case_jw(mesh, nCells, nVertLevels, state, diag, configs, tes call mpas_pool_get_array(mesh, 'rdzu', rdzu) call mpas_pool_get_array(mesh, 'fzm', fzm) call mpas_pool_get_array(mesh, 'fzp', fzp) - call mpas_pool_get_array(mesh, 'zx', zx) + call mpas_pool_get_array(mesh, 'zxu', zxu) call mpas_pool_get_array(mesh, 'zz', zz) call mpas_pool_get_array(mesh, 'hx', hx) call mpas_pool_get_array(mesh, 'dss', dss) @@ -595,8 +611,8 @@ subroutine init_atm_case_jw(mesh, nCells, nVertLevels, state, diag, configs, tes do i=1, nEdges iCell1 = cellsOnEdge(1,i) iCell2 = cellsOnEdge(2,i) - do k=1,nz - zx (k,i) = (zgrid(k,iCell2)-zgrid(k,iCell1)) / dcEdge(i) + do k=1,nz1 + zxu (k,i) = 0.5 * (zgrid(k,iCell2)-zgrid(k,iCell1) + zgrid(k+1,iCell2)-zgrid(k+1,iCell1)) / dcEdge(i) end do end do do i=1, nCells @@ -1232,7 +1248,7 @@ subroutine init_atm_case_squall_line(dminfo, mesh, nCells, nVertLevels, state, d integer, intent(in) :: test_case real (kind=RKIND), dimension(:), pointer :: rdzw, dzu, rdzu, fzm, fzp - real (kind=RKIND), dimension(:,:), pointer :: zgrid, zx, zz, hx, cqw + real (kind=RKIND), dimension(:,:), pointer :: zgrid, zxu, zz, hx, cqw real (kind=RKIND), dimension(:,:), pointer :: ppb, pb, rho_zz, rb, rr, tb, rtb, p, pp, dss, t, rt, u, ru real (kind=RKIND), dimension(:,:,:), pointer :: scalars real (kind=RKIND), dimension(:,:,:), pointer :: zb, zb3 @@ -1337,7 +1353,7 @@ subroutine init_atm_case_squall_line(dminfo, mesh, nCells, nVertLevels, state, d call mpas_pool_get_array(mesh, 'rdzu', rdzu) call mpas_pool_get_array(mesh, 'fzm', fzm) call mpas_pool_get_array(mesh, 'fzp', fzp) - call mpas_pool_get_array(mesh, 'zx', zx) + call mpas_pool_get_array(mesh, 'zxu', zxu) call mpas_pool_get_array(mesh, 'zz', zz) call mpas_pool_get_array(mesh, 'hx', hx) call mpas_pool_get_array(mesh, 'dss', dss) @@ -1477,8 +1493,8 @@ subroutine init_atm_case_squall_line(dminfo, mesh, nCells, nVertLevels, state, d do i=1, nEdges iCell1 = cellsOnEdge(1,i) iCell2 = cellsOnEdge(2,i) - do k=1,nz - zx (k,i) = (zgrid(k,iCell2)-zgrid(k,iCell1)) / dcEdge(i) + do k=1,nz1 + zxu (k,i) = 0.5 * (zgrid(k,iCell2)-zgrid(k,iCell1) + zgrid(k+1,iCell2)-zgrid(k+1,iCell1)) / dcEdge(i) end do end do do i=1, nCells @@ -1820,7 +1836,7 @@ subroutine init_atm_case_mtn_wave(dminfo, mesh, nCells, nVertLevels, state, diag real (kind=RKIND), parameter :: t0=288., hm=250. real (kind=RKIND), dimension(:), pointer :: rdzw, dzu, rdzu, fzm, fzp - real (kind=RKIND), dimension(:,:), pointer :: zgrid, zx, zz, hx, cqw + real (kind=RKIND), dimension(:,:), pointer :: zgrid, zxu, zz, hx, cqw real (kind=RKIND), dimension(:,:), pointer :: ppb, pb, rho_zz, rb, rr, tb, rtb, p, pp, dss, t, rt, u, ru real (kind=RKIND), dimension(:,:,:), pointer :: scalars, deriv_two, zb, zb3 @@ -1941,7 +1957,7 @@ subroutine init_atm_case_mtn_wave(dminfo, mesh, nCells, nVertLevels, state, diag call mpas_pool_get_array(mesh, 'rdzu', rdzu) call mpas_pool_get_array(mesh, 'fzm', fzm) call mpas_pool_get_array(mesh, 'fzp', fzp) - call mpas_pool_get_array(mesh, 'zx', zx) + call mpas_pool_get_array(mesh, 'zxu', zxu) call mpas_pool_get_array(mesh, 'zz', zz) call mpas_pool_get_array(mesh, 'hx', hx) call mpas_pool_get_array(mesh, 'dss', dss) @@ -2095,12 +2111,11 @@ subroutine init_atm_case_mtn_wave(dminfo, mesh, nCells, nVertLevels, state, diag write(0,*) ' hx computation complete ' if (terrain_smooth) then - write(0,*) '***************************************************************************' - write(0,*) 'Please contact the MPAS-A developers for up-to-date terrain-smoothing code.' - write(0,*) 'Otherwise, set terrain_smooth=.false. in the mountain wave test case' - write(0,*) ' initialization routine and re-compile.' - write(0,*) '***************************************************************************' - call mpas_dmpar_abort(dminfo) + call mpas_dmpar_global_abort('***************************************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort('Please contact the MPAS-A developers for up-to-date terrain-smoothing code.', deferredAbort=.true.) + call mpas_dmpar_global_abort('Otherwise, set terrain_smooth=.false. in the mountain wave test case', deferredAbort=.true.) + call mpas_dmpar_global_abort(' initialization routine and re-compile.', deferredAbort=.true.) + call mpas_dmpar_global_abort('***************************************************************************') end if do iCell=1,nCells @@ -2121,8 +2136,8 @@ subroutine init_atm_case_mtn_wave(dminfo, mesh, nCells, nVertLevels, state, diag do i=1, nEdges iCell1 = cellsOnEdge(1,i) iCell2 = cellsOnEdge(2,i) - do k=1,nz - zx (k,i) = (zgrid(k,iCell2)-zgrid(k,iCell1)) / dcEdge(i) + do k=1,nz1 + zxu (k,i) = 0.5 * (zgrid(k,iCell2)-zgrid(k,iCell1) + zgrid(k+1,iCell2)-zgrid(k+1,iCell1)) / dcEdge(i) end do end do do i=1, nCells @@ -2439,6 +2454,8 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state use init_atm_read_met use init_atm_llxy use init_atm_hinterp + use mpas_hash + use mpas_atmphys_constants, only : svpt0, svp1, svp2, svp3 implicit none @@ -2476,7 +2493,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state real (kind=RKIND), dimension(:), pointer :: rdzw, dzu, rdzu, fzm, fzp real (kind=RKIND), dimension(:), pointer :: vert_level, latPoints, lonPoints, ter - real (kind=RKIND), dimension(:,:), pointer :: zgrid, zx, zz, hx + real (kind=RKIND), dimension(:,:), pointer :: zgrid, zxu, zz, hx real (kind=RKIND), dimension(:,:), pointer :: pressure, ppb, pb, rho_zz, rb, rr, tb, rtb, p, pp, dss, t, rt real (kind=RKIND), dimension(:), pointer :: surface_pressure real (kind=RKIND), dimension(:), pointer :: destField1d @@ -2528,6 +2545,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state integer, dimension(:), pointer :: mask_array integer, dimension(nEdges), target :: edge_mask character (len=StrKIND) :: fname + logical :: is_sfc_field real (kind=RKIND) :: flux, fluxk, lat1, lat2, eta_v, r_pert, u_pert, lat_pert, lon_pert, r real (kind=RKIND) :: lat, lon, x, y @@ -2560,6 +2578,10 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state real (kind=RKIND), dimension(nlat) :: lat_2d real (kind=RKIND) :: dlat real (kind=RKIND) :: z_edge, z_edge3, d2fdx2_cell1, d2fdx2_cell2 + real (kind=RKIND) :: alt, als, zetal, zl + + ! calculation of the water vapor mixing ratio: + real (kind=RKIND) :: sh_max,sh_min,global_sh_max,global_sh_min character (len=StrKIND), pointer :: config_met_prefix character (len=StrKIND), pointer :: config_start_time @@ -2569,6 +2591,8 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state integer, pointer :: config_nsm real (kind=RKIND), pointer :: config_dzmin real (kind=RKIND), pointer :: config_ztop + logical, pointer :: config_tc_vertical_grid + logical, pointer :: config_use_spechumd integer, pointer :: config_nfglevels integer, pointer :: config_nfgsoillevels logical, pointer :: config_smooth_surfaces @@ -2592,6 +2616,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state real (kind=RKIND), dimension(:,:), pointer :: theta real (kind=RKIND), dimension(:,:), pointer :: rho real (kind=RKIND), dimension(:,:), pointer :: relhum + real (kind=RKIND), dimension(:,:), pointer :: spechum real (kind=RKIND), dimension(:,:), pointer :: ru real (kind=RKIND), dimension(:,:), pointer :: rw real (kind=RKIND), dimension(:), pointer :: precipw @@ -2610,12 +2635,26 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state real (kind=RKIND), dimension(:,:), pointer :: z_fg real (kind=RKIND), dimension(:,:), pointer :: t_fg real (kind=RKIND), dimension(:,:), pointer :: rh_fg + real (kind=RKIND), dimension(:,:), pointer :: sh_fg real (kind=RKIND), dimension(:,:), pointer :: gfs_z real (kind=RKIND), dimension(:,:), pointer :: p_fg real (kind=RKIND), dimension(:,:), pointer :: st_fg real (kind=RKIND), dimension(:,:), pointer :: sm_fg real (kind=RKIND), dimension(:), pointer :: soilz + type (hashtable) :: level_hash + logical :: too_many_fg_levs + integer :: level_value + + ! For outputting surface fields u10, v10, q2, rh2, and t2m from first-guess data + real (kind=RKIND), dimension(:), pointer :: u10 + real (kind=RKIND), dimension(:), pointer :: v10 + real (kind=RKIND), dimension(:), pointer :: q2 + real (kind=RKIND), dimension(:), pointer :: rh2 + real (kind=RKIND), dimension(:), pointer :: t2m + + character (len=StrKIND) :: errstring + call mpas_pool_get_config(configs, 'config_met_prefix', config_met_prefix) call mpas_pool_get_config(configs, 'config_start_time', config_start_time) call mpas_pool_get_config(configs, 'config_met_interp', config_met_interp) @@ -2624,6 +2663,8 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state call mpas_pool_get_config(configs, 'config_nsm', config_nsm) call mpas_pool_get_config(configs, 'config_dzmin', config_dzmin) call mpas_pool_get_config(configs, 'config_ztop', config_ztop) + call mpas_pool_get_config(configs, 'config_tc_vertical_grid', config_tc_vertical_grid) + call mpas_pool_get_config(configs, 'config_use_spechumd', config_use_spechumd) call mpas_pool_get_config(configs, 'config_nfglevels', config_nfglevels) call mpas_pool_get_config(configs, 'config_nfgsoillevels', config_nfgsoillevels) call mpas_pool_get_config(configs, 'config_smooth_surfaces', config_smooth_surfaces) @@ -2655,7 +2696,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state call mpas_pool_get_array(mesh, 'rdzu', rdzu) call mpas_pool_get_array(mesh, 'fzm', fzm) call mpas_pool_get_array(mesh, 'fzp', fzp) - call mpas_pool_get_array(mesh, 'zx', zx) + call mpas_pool_get_array(mesh, 'zxu', zxu) call mpas_pool_get_array(mesh, 'zz', zz) call mpas_pool_get_array(mesh, 'hx', hx) call mpas_pool_get_array(mesh, 'ter', ter) @@ -2671,6 +2712,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state call mpas_pool_get_array(diag, 'pressure', pressure) call mpas_pool_get_array(diag, 'surface_pressure', surface_pressure) call mpas_pool_get_array(diag, 'relhum', relhum) + call mpas_pool_get_array(diag, 'spechum', spechum) call mpas_pool_get_array(diag, 'ru', ru) call mpas_pool_get_array(diag, 'rw', rw) @@ -2715,6 +2757,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state call mpas_pool_get_array(fg, 'z', z_fg) call mpas_pool_get_array(fg, 't', t_fg) call mpas_pool_get_array(fg, 'rh', rh_fg) + call mpas_pool_get_array(fg, 'sh', sh_fg) call mpas_pool_get_array(fg, 'gfs_z', gfs_z) call mpas_pool_get_array(fg, 'p', p_fg) @@ -2743,16 +2786,22 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state call read_met_init(trim(config_met_prefix), .false., config_start_time(1:13), istatus) if (istatus /= 0) then - write(0,*) '********************************************************************************' - write(0,*) 'Error opening initial meteorological data file '// & - trim(config_met_prefix)//':'//config_start_time(1:13) - write(0,*) '********************************************************************************' - call mpas_dmpar_abort(dminfo) + call mpas_dmpar_global_abort('********************************************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort('Error opening initial meteorological data file ' & + //trim(config_met_prefix)//':'//config_start_time(1:13), deferredAbort=.true.) + call mpas_dmpar_global_abort('********************************************************************************') end if call read_next_met_field(field, istatus) do while (istatus == 0) - if (index(field % field, 'SOILHGT') /= 0) then + if (trim(field % field) == 'SOILHGT') then + + +write(0,*) 'USING ECMWF TERRAIN...' + + interp_list(1) = FOUR_POINT + interp_list(2) = SEARCH + interp_list(3) = 0 ! ! Set up projection @@ -2770,7 +2819,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state end if - if (index(field % field, 'SOILHGT') /= 0) then + if (trim(field % field) == 'SOILHGT') then nInterpPoints = nCells latPoints => latCell lonPoints => lonCell @@ -2778,6 +2827,15 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 1 end if + allocate(rslab(-2:field % nx+3, field % ny)) + rslab(1:field % nx, 1:field % ny) = field % slab(1:field % nx, 1:field % ny) + rslab(0, 1:field % ny) = field % slab(field % nx, 1:field % ny) + rslab(-1, 1:field % ny) = field % slab(field % nx-1, 1:field % ny) + rslab(-2, 1:field % ny) = field % slab(field % nx-2, 1:field % ny) + rslab(field % nx+1, 1:field % ny) = field % slab(1, 1:field % ny) + rslab(field % nx+2, 1:field % ny) = field % slab(2, 1:field % ny) + rslab(field % nx+3, 1:field % ny) = field % slab(3, 1:field % ny) + do i=1,nInterpPoints lat = latPoints(i)*DEG_PER_RAD lon = lonPoints(i)*DEG_PER_RAD @@ -2785,13 +2843,23 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state if (x < 0.5) then lon = lon + 360.0 call latlon_to_ij(proj, lat, lon, x, y) + else if (x >= real(field%nx)+0.5) then + lon = lon - 360.0 + call latlon_to_ij(proj, lat, lon, x, y) + end if + if (y < 0.5) then + y = 1.0 + else if (y >= real(field%ny)+0.5) then + y = real(field%ny) end if if (ndims == 1) then - destField1d(i) = interp_sequence(x, y, 1, field % slab, 1, field % nx, 1, field % ny, 1, 1, -1.e30_RKIND, interp_list, 1) + destField1d(i) = interp_sequence(x, y, 1, rslab, -2, field % nx + 3, 1, field % ny, 1, 1, -1.e30_RKIND, interp_list, 1) else if (ndims == 2) then - destField2d(k,i) = interp_sequence(x, y, 1, field % slab, 1, field % nx, 1, field % ny, 1, 1, -1.e30_RKIND, interp_list, 1) + destField2d(k,i) = interp_sequence(x, y, 1, rslab, -2, field % nx + 3, 1, field % ny, 1, 1, -1.e30_RKIND, interp_list, 1) end if end do + + deallocate(rslab) end if deallocate(field % slab) @@ -2829,7 +2897,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state * (ter(cellsOnCell(j,iCell))-ter(iCell)) end do end if - hs(iCell) = ter(iCell) + 0.125*hs(iCell) + hs(iCell) = ter(iCell) + 0.216*hs(iCell) end do do iCell=1,nCells @@ -2841,8 +2909,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state * (hs(cellsOnCell(j,iCell))-hs(iCell)) end do end if -! ter(iCell) = hs(iCell) - 0.25*ter(iCell) - ter(iCell) = hs(iCell) - 0.125*ter(iCell) + ter(iCell) = hs(iCell) - 0.216*ter(iCell) end do ! note that ther variable ter used throughout this section is a pointer to grid % ter % array, here we are passing ter's parent field @@ -2861,15 +2928,65 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ! Metrics for hybrid coordinate and vertical stretching - str = 1.5 -! str = 1. - zt = config_ztop - dz = zt/float(nz1) + if (config_tc_vertical_grid) then - do k=1,nz - zw(k) = (real(k-1)/real(nz1))**str*zt - if (k > 1) dzw(k-1) = zw(k)-zw(k-1) - end do + write(0,*) 'Setting up vertical levels as in 2014 TC experiments' + + zt = config_ztop + dz = zt/float(nz1) + +!... Laura D. Fowler: change the values als,alt,and zetal valid for nVertLevels=41 to values +! needed for nVertLevels equal or greater than 55. + if (nVertLevels >= 55) then + als = .075 ! 55 levels. + alt = 1.70 ! 55 levels. + zetal = .75 ! 55 levels. + else + als = .075 ! 41 levels. + alt = 1.23 ! 41 levels. + zetal = .31 ! 41 levels. + endif + write(0,*) + write(0,*) '--- config_tc_vertical_grid = ', config_tc_vertical_grid + write(0,*) '--- als = ', als + write(0,*) '--- alt = ', alt + write(0,*) '--- zetal = ',zetal + if (nVertLevels /= 55 .and. nVertLevels /= 41) then + write(0,*) '********************************************************************************' + write(0,*) '* Note: als, alt, and zetal have not been tested for nVertLevels equal ', nVertLevels + write(0,*) '* The values of these parameters have only been tested with 41 and 55 layers' + write(0,*) '********************************************************************************' + end if + write(0,*) ' ' +!... end Laura D. Fowler / 2016-04-12. + + do k=1,nz + zl = 1.-alt*(1.-zetal) + if ((real(k-1)/real(nz1)).LT.zetal) then + zw(k) = (als*real(K-1)/real(nz1) & + +(3.*(1.-alt)+2.*(alt-als)*zetal) & + *((K-1)*dz/(zt*zetal))**2 & + -(2.*(1.-alt) + (alt-als)*zetal) & + *(real(k-1)*dz/(zt*zetal))**3) * zt + else + zw(K) = (zl+alt*(real(K-1)/real(nz1)-zetal))*zt + end if + if (k > 1) dzw(k-1) = zw(k)-zw(k-1) + end do + + else + + write(0,*) 'Setting up vertical levels as in MPAS 2.0 and earlier' + + str = 1.5 + zt = config_ztop + dz = zt/float(nz1) + + do k=1,nz + zw(k) = (real(k-1)/real(nz1))**str*zt + if (k > 1) dzw(k-1) = zw(k)-zw(k-1) + end do + end if ! ah(k) governs the transition between terrain-following ! and pure height coordinates @@ -2939,6 +3056,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ! cf2 = d1*d3*(d1-d3)/(d2*d3*(d3-d2)+d1*d3*(d1-d3)+d1*d2*(d2-d1)) ! cf3 = d1*d2*(d2-d1)/(d2*d3*(d3-d2)+d1*d3*(d1-d3)+d1*d2*(d2-d1)) + write(0,*) ' cf1, cf2, cf3 = ',cf1,cf2,cf3 ! Smoothing algorithm for coordinate surfaces @@ -2995,6 +3113,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state tempField % recvList => parinfo % cellsToRecv tempField % copyList => parinfo % cellsToCopy tempField % array => hs + tempField % isActive = .true. tempField % prev => null() tempField % next => null() @@ -3045,8 +3164,8 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state do i=1, nEdges iCell1 = cellsOnEdge(1,i) iCell2 = cellsOnEdge(2,i) - do k=1,nz - zx (k,i) = (zgrid(k,iCell2)-zgrid(k,iCell1)) / dcEdge(i) + do k=1,nz1 + zxu (k,i) = 0.5 * (zgrid(k,iCell2)-zgrid(k,iCell1) + zgrid(k+1,iCell2)-zgrid(k+1,iCell1)) / dcEdge(i) end do end do do i=1, nCells @@ -3127,17 +3246,16 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state call read_met_init(trim(config_met_prefix), .false., config_start_time(1:13), istatus) if (istatus /= 0) then - write(0,*) '********************************************************************************' - write(0,*) 'Error opening initial meteorological data file '// & - trim(config_met_prefix)//':'//config_start_time(1:13) - write(0,*) '********************************************************************************' - call mpas_dmpar_abort(dminfo) + call mpas_dmpar_global_abort('********************************************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort('Error opening initial meteorological data file ' & + //trim(config_met_prefix)//':'//config_start_time(1:13), deferredAbort=.true.) + call mpas_dmpar_global_abort('********************************************************************************') end if call read_next_met_field(field, istatus) do while (istatus == 0) - if (index(field % field, 'LANDSEA') /= 0) then + if (trim(field % field) == 'LANDSEA') then allocate(maskslab(-2:field % nx+3, field % ny)) maskslab(1:field % nx, 1:field % ny) = field % slab(1:field % nx, 1:field % ny) @@ -3158,11 +3276,10 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state call read_met_close() if (.not. allocated(maskslab)) then - write(0,*) '********************************************************************************' - write(0,*) 'LANDSEA field not found in meteorological data file '// & - trim(config_met_prefix)//':'//config_start_time(1:13) - write(0,*) '********************************************************************************' - call mpas_dmpar_abort(dminfo) + call mpas_dmpar_global_abort('********************************************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort('LANDSEA field not found in meteorological data file ' & + //trim(config_met_prefix)//':'//config_start_time(1:13), deferredAbort=.true.) + call mpas_dmpar_global_abort('********************************************************************************') end if edge_mask(:) = 1 @@ -3177,13 +3294,15 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state call read_met_init(trim(config_met_prefix), .false., config_start_time(1:13), istatus) if (istatus /= 0) then - write(0,*) '********************************************************************************' - write(0,*) 'Error opening initial meteorological data file '// & - trim(config_met_prefix)//':'//config_start_time(1:13) - write(0,*) '********************************************************************************' - call mpas_dmpar_abort(dminfo) + call mpas_dmpar_global_abort('********************************************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort('Error opening initial meteorological data file ' & + //trim(config_met_prefix)//':'//config_start_time(1:13), deferredAbort=.true.) + call mpas_dmpar_global_abort('********************************************************************************') end if + call mpas_hash_init(level_hash) + too_many_fg_levs = .false. + call read_next_met_field(field, istatus) do while (istatus == 0) @@ -3199,72 +3318,88 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state mask_array => landmask - if (index(field % field, 'UU') /= 0 .or. & - index(field % field, 'VV') /= 0 .or. & - index(field % field, 'TT') /= 0 .or. & - index(field % field, 'RH') /= 0 .or. & - index(field % field, 'GHT') /= 0 .or. & - index(field % field, 'PMSL') /= 0 .or. & - index(field % field, 'PSFC') /= 0 .or. & - index(field % field, 'SOILHGT') /= 0 .or. & - index(field % field, 'SM000010') /= 0 .or. & - index(field % field, 'SM010040') /= 0 .or. & - index(field % field, 'SM040100') /= 0 .or. & - index(field % field, 'SM100200') /= 0 .or. & - index(field % field, 'SM010200') /= 0 .or. & - index(field % field, 'SM000007') /= 0 .or. & - index(field % field, 'SM007028') /= 0 .or. & - index(field % field, 'SM028100') /= 0 .or. & - index(field % field, 'SM100255') /= 0 .or. & - index(field % field, 'ST000010') /= 0 .or. & - index(field % field, 'ST010040') /= 0 .or. & - index(field % field, 'ST040100') /= 0 .or. & - index(field % field, 'ST100200') /= 0 .or. & - index(field % field, 'ST010200') /= 0 .or. & - index(field % field, 'ST000007') /= 0 .or. & - index(field % field, 'ST007028') /= 0 .or. & - index(field % field, 'ST028100') /= 0 .or. & - index(field % field, 'ST100255') /= 0 .or. & - index(field % field, 'PRES') /= 0 .or. & - index(field % field, 'SNOW') /= 0 .or. & - index(field % field, 'SEAICE') /= 0 .or. & - index(field % field, 'SKINTEMP') /= 0) then - - if (index(field % field, 'SM000010') /= 0 .or. & - index(field % field, 'SM010040') /= 0 .or. & - index(field % field, 'SM040100') /= 0 .or. & - index(field % field, 'SM100200') /= 0 .or. & - index(field % field, 'SM010200') /= 0 .or. & - index(field % field, 'SM000007') /= 0 .or. & - index(field % field, 'SM007028') /= 0 .or. & - index(field % field, 'SM028100') /= 0 .or. & - index(field % field, 'SM100255') /= 0 .or. & - index(field % field, 'ST000010') /= 0 .or. & - index(field % field, 'ST010040') /= 0 .or. & - index(field % field, 'ST040100') /= 0 .or. & - index(field % field, 'ST100200') /= 0 .or. & - index(field % field, 'ST010200') /= 0 .or. & - index(field % field, 'ST000007') /= 0 .or. & - index(field % field, 'ST007028') /= 0 .or. & - index(field % field, 'ST028100') /= 0 .or. & - index(field % field, 'ST100255') /= 0 .or. & - index(field % field, 'SNOW') /= 0 .or. & - index(field % field, 'SEAICE') /= 0 .or. & - index(field % field, 'SKINTEMP') /= 0) then + if (trim(field % field) == 'UU' .or. & + trim(field % field) == 'VV' .or. & + trim(field % field) == 'TT' .or. & + trim(field % field) == 'RH' .or. & + trim(field % field) == 'SPECHUMD' .or. & + trim(field % field) == 'GHT' .or. & + trim(field % field) == 'PMSL' .or. & + trim(field % field) == 'PSFC' .or. & + trim(field % field) == 'SOILHGT' .or. & + trim(field % field) == 'SM000010' .or. & + trim(field % field) == 'SM010040' .or. & + trim(field % field) == 'SM040100' .or. & + trim(field % field) == 'SM100200' .or. & + trim(field % field) == 'SM010200' .or. & + trim(field % field) == 'SM000007' .or. & + trim(field % field) == 'SM007028' .or. & + trim(field % field) == 'SM028100' .or. & + trim(field % field) == 'SM100255' .or. & + trim(field % field) == 'ST000010' .or. & + trim(field % field) == 'ST010040' .or. & + trim(field % field) == 'ST040100' .or. & + trim(field % field) == 'ST100200' .or. & + trim(field % field) == 'ST010200' .or. & + trim(field % field) == 'ST000007' .or. & + trim(field % field) == 'ST007028' .or. & + trim(field % field) == 'ST028100' .or. & + trim(field % field) == 'ST100255' .or. & + trim(field % field) == 'PRES' .or. & + trim(field % field) == 'SNOW' .or. & + trim(field % field) == 'SEAICE' .or. & + trim(field % field) == 'SKINTEMP') then + + if (trim(field % field) == 'SM000010' .or. & + trim(field % field) == 'SM010040' .or. & + trim(field % field) == 'SM040100' .or. & + trim(field % field) == 'SM100200' .or. & + trim(field % field) == 'SM010200' .or. & + trim(field % field) == 'SM000007' .or. & + trim(field % field) == 'SM007028' .or. & + trim(field % field) == 'SM028100' .or. & + trim(field % field) == 'SM100255' .or. & + trim(field % field) == 'ST000010' .or. & + trim(field % field) == 'ST010040' .or. & + trim(field % field) == 'ST040100' .or. & + trim(field % field) == 'ST100200' .or. & + trim(field % field) == 'ST010200' .or. & + trim(field % field) == 'ST000007' .or. & + trim(field % field) == 'ST007028' .or. & + trim(field % field) == 'ST028100' .or. & + trim(field % field) == 'ST100255' .or. & + trim(field % field) == 'SNOW' .or. & + trim(field % field) == 'SEAICE' .or. & + trim(field % field) == 'SKINTEMP') then k = 1 - else if (index(field % field, 'PMSL') == 0 .and. & - index(field % field, 'PSFC') == 0 .and. & - index(field % field, 'SOILHGT') == 0) then + else if (trim(field % field) /= 'PMSL' .and. & + trim(field % field) /='PSFC' .and. & + trim(field % field) /= 'SOILHGT') then + + ! Since the hash table can only store integers, transfer the bit pattern from + ! the real-valued xlvl into an integer; that the result is not an integer version + ! of the level is not important, since we only want to test uniqueness of levels + level_value = transfer(field % xlvl, level_value) + if (.not. mpas_hash_search(level_hash, level_value)) then + call mpas_hash_insert(level_hash, level_value) + if (mpas_hash_size(level_hash) > config_nfglevels) then + too_many_fg_levs = .true. + end if + end if + + ! + ! In case we have more than config_nfglevels levels, just keep cycling through + ! the remaining fields in the intermediate file for the purpose of counting how + ! many unique levels are found using the code above + ! + if (too_many_fg_levs) then + call read_next_met_field(field, istatus) + cycle + end if + do k=1,config_nfglevels if (vert_level(k) == field % xlvl .or. vert_level(k) == -1.0) exit end do - if (k > config_nfglevels) then - write(0,*) '*******************************************************************' - write(0,*) 'Error: The meteorological data file has more than config_nfglevels.' - write(0,*) ' Please increase config_nfglevels in the namelist and re-run.' - write(0,*) '*******************************************************************' - call mpas_dmpar_abort(dminfo) - end if if (vert_level(k) == -1.0) vert_level(k) = field % xlvl else k = 1 @@ -3296,76 +3431,107 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ! ! Horizontally interpolate the field at level k ! - if (index(field % field, 'UU') /= 0) then + if (trim(field % field) == 'UU') then write(0,*) 'Interpolating U at ', k, vert_level(k) - mask_array => edge_mask + ! For U10, interpolate to cell centers + if (vert_level(k) == 200100.0) then + nInterpPoints = nCells + latPoints => latCell + lonPoints => lonCell + call mpas_pool_get_array(fg, 'u10', destField1d) + ndims = 1 + + ! otherwise to edges + else + mask_array => edge_mask + + nInterpPoints = nEdges + latPoints => latEdge + lonPoints => lonEdge + call mpas_pool_get_array(fg, 'u', destField2d) + ndims = 2 + end if - nInterpPoints = nEdges - latPoints => latEdge - lonPoints => lonEdge - call mpas_pool_get_array(fg, 'u', destField2d) - ndims = 2 - else if (index(field % field, 'VV') /= 0) then + else if (trim(field % field) == 'VV') then write(0,*) 'Interpolating V at ', k, vert_level(k) - mask_array => edge_mask + ! For V10, interpolate to cell centers + if (vert_level(k) == 200100.0) then + nInterpPoints = nCells + latPoints => latCell + lonPoints => lonCell + call mpas_pool_get_array(fg, 'v10', destField1d) + ndims = 1 + + ! otherwise to edges + else + mask_array => edge_mask - nInterpPoints = nEdges - latPoints => latEdge - lonPoints => lonEdge - call mpas_pool_get_array(fg, 'v', destField2d) - ndims = 2 - else if (index(field % field, 'TT') /= 0) then + nInterpPoints = nEdges + latPoints => latEdge + lonPoints => lonEdge + call mpas_pool_get_array(fg, 'v', destField2d) + ndims = 2 + end if + + else if (trim(field % field) == 'TT') then write(0,*) 'Interpolating T at ', k, vert_level(k) nInterpPoints = nCells latPoints => latCell lonPoints => lonCell call mpas_pool_get_array(fg, 't', destField2d) ndims = 2 - else if (index(field % field, 'RH') /= 0) then + else if (trim(field % field) == 'RH') then write(0,*) 'Interpolating RH at ', k, vert_level(k) nInterpPoints = nCells latPoints => latCell lonPoints => lonCell call mpas_pool_get_array(fg, 'rh', destField2d) ndims = 2 - else if (index(field % field, 'GHT') /= 0) then + else if (trim(field % field) == 'SPECHUMD') then +write(0,*) 'Interpolating SPECHUMD at ', k, vert_level(k) + nInterpPoints = nCells + latPoints => latCell + lonPoints => lonCell + call mpas_pool_get_array(fg, 'sh', destField2d) + ndims = 2 + else if (trim(field % field) == 'GHT') then write(0,*) 'Interpolating GHT at ', k, vert_level(k) nInterpPoints = nCells latPoints => latCell lonPoints => lonCell call mpas_pool_get_array(fg, 'z', destField2d) ndims = 2 - else if (index(field % field, 'PRES') /= 0) then + else if (trim(field % field) == 'PRES') then write(0,*) 'Interpolating PRES at ', k, vert_level(k) nInterpPoints = nCells latPoints => latCell lonPoints => lonCell call mpas_pool_get_array(fg, 'p', destField2d) ndims = 2 - else if (index(field % field, 'PMSL') /= 0) then + else if (trim(field % field) == 'PMSL') then write(0,*) 'Interpolating PMSL' nInterpPoints = nCells latPoints => latCell lonPoints => lonCell call mpas_pool_get_array(fg, 'pmsl', destField1d) ndims = 1 - else if (index(field % field, 'PSFC') /= 0) then + else if (trim(field % field) == 'PSFC') then write(0,*) 'Interpolating PSFC' nInterpPoints = nCells latPoints => latCell lonPoints => lonCell call mpas_pool_get_array(fg, 'psfc', destField1d) ndims = 1 - else if (index(field % field, 'SOILHGT') /= 0) then + else if (trim(field % field) == 'SOILHGT') then write(0,*) 'Interpolating SOILHGT' nInterpPoints = nCells latPoints => latCell lonPoints => lonCell call mpas_pool_get_array(fg, 'soilz', destField1d) ndims = 1 - else if (index(field % field, 'SM000010') /= 0) then + else if (trim(field % field) == 'SM000010') then write(0,*) 'Interpolating SM000010' interp_list(1) = FOUR_POINT @@ -3385,7 +3551,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 10. zs_fg(k,:) = 10. - else if (index(field % field, 'SM010200') /= 0) then + else if (trim(field % field) == 'SM010200') then write(0,*) 'Interpolating SM010200' interp_list(1) = FOUR_POINT @@ -3405,7 +3571,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 200.-10. zs_fg(k,:) = 200. - else if (index(field % field, 'SM010040') /= 0) then + else if (trim(field % field) == 'SM010040') then write(0,*) 'Interpolating SM010040' interp_list(1) = FOUR_POINT @@ -3425,7 +3591,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 40.-10. zs_fg(k,:) = 40. - else if (index(field % field, 'SM040100') /= 0) then + else if (trim(field % field) == 'SM040100') then write(0,*) 'Interpolating SM040100' interp_list(1) = FOUR_POINT @@ -3445,7 +3611,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 100.-40. zs_fg(k,:) = 100. - else if (index(field % field, 'SM100200') /= 0) then + else if (trim(field % field) == 'SM100200') then write(0,*) 'Interpolating SM100200' interp_list(1) = FOUR_POINT @@ -3465,7 +3631,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 200.-100. zs_fg(k,:) = 200. - else if (index(field % field, 'SM000007') /= 0) then + else if (trim(field % field) == 'SM000007') then write(0,*) 'Interpolating SM000007' interp_list(1) = FOUR_POINT @@ -3485,7 +3651,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 7. zs_fg(k,:) = 7. - else if (index(field % field, 'SM007028') /= 0) then + else if (trim(field % field) == 'SM007028') then write(0,*) 'Interpolating SM007028' interp_list(1) = FOUR_POINT @@ -3505,7 +3671,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 28.-7. zs_fg(k,:) = 28. - else if (index(field % field, 'SM028100') /= 0) then + else if (trim(field % field) == 'SM028100') then write(0,*) 'Interpolating SM028100' interp_list(1) = FOUR_POINT @@ -3525,7 +3691,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 100.-28. zs_fg(k,:) = 100. - else if (index(field % field, 'SM100255') /= 0) then + else if (trim(field % field) == 'SM100255') then write(0,*) 'Interpolating SM100255' interp_list(1) = FOUR_POINT @@ -3545,7 +3711,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 255.-100. zs_fg(k,:) = 255. - else if (index(field % field, 'ST000010') /= 0) then + else if (trim(field % field) == 'ST000010') then write(0,*) 'Interpolating ST000010' interp_list(1) = SIXTEEN_POINT @@ -3566,7 +3732,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 10. zs_fg(k,:) = 10. - else if (index(field % field, 'ST010200') /= 0) then + else if (trim(field % field) == 'ST010200') then write(0,*) 'Interpolating ST010200' interp_list(1) = SIXTEEN_POINT @@ -3587,7 +3753,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 200.-10. zs_fg(k,:) = 200. - else if (index(field % field, 'ST010040') /= 0) then + else if (trim(field % field) == 'ST010040') then write(0,*) 'Interpolating ST010040' interp_list(1) = SIXTEEN_POINT @@ -3608,7 +3774,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 40.-10. zs_fg(k,:) = 40. - else if (index(field % field, 'ST040100') /= 0) then + else if (trim(field % field) == 'ST040100') then write(0,*) 'Interpolating ST040100' interp_list(1) = SIXTEEN_POINT @@ -3629,7 +3795,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 100.-40. zs_fg(k,:) = 100. - else if (index(field % field, 'ST100200') /= 0) then + else if (trim(field % field) == 'ST100200') then write(0,*) 'Interpolating ST100200' interp_list(1) = SIXTEEN_POINT @@ -3650,7 +3816,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 200.-100. zs_fg(k,:) = 200. - else if (index(field % field, 'ST000007') /= 0) then + else if (trim(field % field) == 'ST000007') then write(0,*) 'Interpolating ST000007' interp_list(1) = SIXTEEN_POINT @@ -3671,7 +3837,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 7. zs_fg(k,:) = 7. - else if (index(field % field, 'ST007028') /= 0) then + else if (trim(field % field) == 'ST007028') then write(0,*) 'Interpolating ST007028' interp_list(1) = SIXTEEN_POINT @@ -3692,7 +3858,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 28.-7. zs_fg(k,:) = 28. - else if (index(field % field, 'ST028100') /= 0) then + else if (trim(field % field) == 'ST028100') then write(0,*) 'Interpolating ST028100' interp_list(1) = SIXTEEN_POINT @@ -3713,7 +3879,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 100.-28. zs_fg(k,:) = 100. - else if (index(field % field, 'ST100255') /= 0) then + else if (trim(field % field) == 'ST100255') then write(0,*) 'Interpolating ST100255' interp_list(1) = SIXTEEN_POINT @@ -3734,7 +3900,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ndims = 2 dzs_fg(k,:) = 255.-100. zs_fg(k,:) = 255. - else if (index(field % field, 'SNOW') /= 0) then + else if (trim(field % field) == 'SNOW') then write(0,*) 'Interpolating SNOW' interp_list(1) = FOUR_POINT @@ -3749,7 +3915,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state lonPoints => lonCell call mpas_pool_get_array(fg, 'snow', destField1d) ndims = 1 - else if (index(field % field, 'SEAICE') /= 0) then + else if (trim(field % field) == 'SEAICE') then write(0,*) 'Interpolating SEAICE' interp_list(1) = FOUR_POINT @@ -3766,7 +3932,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state lonPoints => lonCell call mpas_pool_get_array(fg, 'xice', destField1d) ndims = 1 - else if (index(field % field, 'SKINTEMP') /= 0) then + else if (trim(field % field) == 'SKINTEMP') then write(0,*) 'Interpolating SKINTEMP' nInterpPoints = nCells latPoints => latCell @@ -3792,6 +3958,14 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state if (x < 0.5) then lon = lon + 360.0 call latlon_to_ij(proj, lat, lon, x, y) + else if (x >= real(field%nx)+0.5) then + lon = lon - 360.0 + call latlon_to_ij(proj, lat, lon, x, y) + end if + if (y < 0.5) then + y = 1.0 + else if (y >= real(field%ny)+0.5) then + y = real(field%ny) end if if (ndims == 1) then if (maskval /= -1.0) then @@ -3815,6 +3989,57 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state end if end do + ! + ! In addition to interpolating wind fields to cell edges, we should + ! also intperolate to cell centers at the surface in order to + ! produce U10 and V10 fields + ! + is_sfc_field = .false. + mask_array => landmask + if (index(field % field, 'UU') /= 0 .and. vert_level(k) == 200100.0) then + nInterpPoints = nCells + latPoints => latCell + lonPoints => lonCell + call mpas_pool_get_array(fg, 'u10', destField1d) + ndims = 1 + is_sfc_field = .true. + else if (index(field % field, 'VV') /= 0 .and. vert_level(k) == 200100.0) then + nInterpPoints = nCells + latPoints => latCell + lonPoints => lonCell + call mpas_pool_get_array(fg, 'v10', destField1d) + ndims = 1 + is_sfc_field = .true. + else if (index(field % field, 'TT') /= 0 .and. vert_level(k) == 200100.0) then + nInterpPoints = nCells + latPoints => latCell + lonPoints => lonCell + call mpas_pool_get_array(fg, 't2m', destField1d) + ndims = 1 + is_sfc_field = .true. + else if (index(field % field, 'RH') /= 0 .and. vert_level(k) == 200100.0) then + nInterpPoints = nCells + latPoints => latCell + lonPoints => lonCell + call mpas_pool_get_array(fg, 'rh2', destField1d) + ndims = 1 + is_sfc_field = .true. + end if + + if (is_sfc_field) then + do i=1,nInterpPoints + lat = latPoints(i)*DEG_PER_RAD + lon = lonPoints(i)*DEG_PER_RAD + call latlon_to_ij(proj, lat, lon, x, y) + if (x < 0.5) then + lon = lon + 360.0 + call latlon_to_ij(proj, lat, lon, x, y) + end if + destField1d(i) = interp_sequence(x, y, 1, rslab, -2, field % nx + 3, 1, field % ny, 1, 1, & + msgval, interp_list, 1, maskval=maskval, mask_array=maskslab) + end do + end if + deallocate(rslab) end if @@ -3824,6 +4049,17 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state end do call read_met_close() + level_value = mpas_hash_size(level_hash) + call mpas_hash_destroy(level_hash) + + if (too_many_fg_levs) then + write(errstring,'(a,i4)') ' Please increase config_nfglevels to at least ', level_value + call mpas_dmpar_global_abort('*******************************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort('Error: The meteorological data file has more than config_nfglevels.', deferredAbort=.true.) + call mpas_dmpar_global_abort(trim(errstring), deferredAbort=.true.) + call mpas_dmpar_global_abort(' in the namelist and re-run.', deferredAbort=.true.) + call mpas_dmpar_global_abort('*******************************************************************') + end if ! @@ -3837,6 +4073,41 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state write(0,*) 'Found ', nfglevels_actual, ' levels in the first-guess data' write(0,*) '*************************************************' + + ! + ! Extract surface fields from first-guess + ! +! call mpas_pool_get_array(fg, 'u10', u10) +! call mpas_pool_get_array(fg, 'v10', v10) + call mpas_pool_get_array(fg, 'q2', q2) + call mpas_pool_get_array(fg, 'rh2', rh2) + call mpas_pool_get_array(fg, 't2m', t2m) + call mpas_pool_get_array(fg, 'u', u_fg) + call mpas_pool_get_array(fg, 'v', v_fg) + call mpas_pool_get_array(fg, 't', t_fg) + call mpas_pool_get_array(fg, 'rh', rh_fg) +! u10(:) = 0.0 +! v10(:) = 0.0 + q2(:) = 0.0 + rh2(:) = 0.0 + t2m(:) = 0.0 + + do k=1,config_nfglevels + if (vert_level(k) == 200100.0) then +! u10(:) = u_fg(k,:) +! v10(:) = v_fg(k,:) + t2m(:) = t_fg(k,:) + rh2(:) = rh_fg(k,:) + do iCell = 1, nCells + es = svp1 * 10.0_RKIND * exp(svp2 * (t2m(iCell)-svpt0) / (t2m(iCell)-svp3)) + es = min(es, 0.99_RKIND * 0.01_RKIND * psfc(iCell)) + rs = 0.622_RKIND * es * 100.0_RKIND / (psfc(iCell) - es * 100.0_RKIND) + q2(iCell) = 0.01_RKIND * rs * rh2(iCell) + end do + end if + end do + + ! ! For isobaric data, fill in the 3-d pressure field; otherwise, ensure @@ -3910,7 +4181,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state if (istatus == 0) then call read_next_met_field(field, istatus) do while (istatus == 0) - if (index(field % field, 'SEAICE') /= 0) then + if (trim(field % field) == 'SEAICE') then write(0,*) 'PROCESSING SEAICE' @@ -3930,7 +4201,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state lon1 = real(field % startlon,RKIND)) end if - if (index(field % field, 'SEAICE') /= 0) then + if (trim(field % field) == 'SEAICE') then nInterpPoints = nCells latPoints => latCell lonPoints => lonCell @@ -3958,6 +4229,112 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state if (x < 0.5) then lon = lon + 360.0 call latlon_to_ij(proj, lat, lon, x, y) + else if (x >= real(field%nx)+0.5) then + lon = lon - 360.0 + call latlon_to_ij(proj, lat, lon, x, y) + end if + if (y < 0.5) then + y = 1.0 + else if (y >= real(field%ny)+0.5) then + y = real(field%ny) + end if + if (ndims == 1) then + destField1d(i) = interp_sequence(x, y, 1, rslab, 1, field % nx, 1, field % ny, 1, 1, msgval, interp_list, 1) + if (destField1d(i) == msgval) destField1d(i) = fillval + else if (ndims == 2) then + destField2d(k,i) = interp_sequence(x, y, 1, rslab, 1, field % nx, 1, field % ny, 1, 1, msgval, interp_list, 1) + if (destField2d(k,i) == msgval) destField2d(k,i) = fillval + end if + else + if (ndims == 1) then + destField1d(i) = fillval + else if (ndims == 2) then + destField2d(k,i) = fillval + end if + end if + end do + deallocate(rslab) + + end if + + deallocate(field % slab) + call read_next_met_field(field, istatus) + end do + end if + + call read_met_close() + + + ! + ! Get OMLD climatology from a separate file + ! + call read_met_init('OMLD', .true., config_start_time(1:13), istatus) + + if (istatus /= 0) then + write(0,*) 'OMLD file not found...' + end if + + if (istatus == 0) then + call read_next_met_field(field, istatus) + do while (istatus == 0) + if (index(field % field, 'OMLD') /= 0) then + +write(0,*) 'PROCESSING OMLD' + + ! + ! Set up projection + ! + call map_init(proj) + + if (field % iproj == PROJ_LATLON) then + call map_set(PROJ_LATLON, proj, & + latinc = real(field % deltalat,RKIND), & + loninc = real(field % deltalon,RKIND), & + knowni = 1.0_RKIND, & + knownj = 1.0_RKIND, & + lat1 = real(field % startlat,RKIND), & + lon1 = real(field % startlon,RKIND)) + else + write(stderrUnit,*) 'ERROR: We were expecting OMLD field to be on a lat-lon projection...' + end if + + if (index(field % field, 'OMLD') /= 0) then + nInterpPoints = nCells + latPoints => latCell + lonPoints => lonCell + call mpas_pool_get_array(state, 'h_oml_initial', destField1d) + ndims = 1 + end if + + interp_list(1) = FOUR_POINT + interp_list(2) = W_AVERAGE4 + interp_list(3) = SEARCH + interp_list(4) = 0 + + masked = 1 + fillval = 0.0 + msgval = 0.0 + mask_array => landmask + + + allocate(rslab(field % nx, field % ny)) + rslab(:,:) = field % slab(:,:) + do i=1,nInterpPoints + if (mask_array(i) /= masked) then + lat = latPoints(i)*DEG_PER_RAD + lon = lonPoints(i)*DEG_PER_RAD + call latlon_to_ij(proj, lat, lon, x, y) + if (x < 0.5) then + lon = lon + 360.0 + call latlon_to_ij(proj, lat, lon, x, y) + else if (x >= real(field%nx)+0.5) then + lon = lon - 360.0 + call latlon_to_ij(proj, lat, lon, x, y) + end if + if (y < 0.5) then + y = 1.0 + else if (y >= real(field%ny)+0.5) then + y = real(field%ny) end if if (ndims == 1) then destField1d(i) = interp_sequence(x, y, 1, rslab, 1, field % nx, 1, field % ny, 1, 1, msgval, interp_list, 1) @@ -3985,6 +4362,7 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state call read_met_close() + if (allocated(maskslab)) deallocate(maskslab) ! Freeze really cold ocean @@ -4020,93 +4398,86 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state ! T sorted_arr(:,:) = -999.0 - do k=1,nfglevels_actual + do k = 1, nfglevels_actual sorted_arr(1,k) = z_fg(k,iCell) -!NOSFC if (vert_level(k) == 200100.0) sorted_arr(1,k) = fg % soilz % array(iCell) if (vert_level(k) == 200100.0) sorted_arr(1,k) = 99999.0 sorted_arr(2,k) = t_fg(k,iCell) end do call mpas_quicksort(nfglevels_actual, sorted_arr) - do k=1,nVertLevels + do k = 1, nVertLevels target_z = 0.5 * (zgrid(k,iCell) + zgrid(k+1,iCell)) -! t(k,iCell) = vertical_interp(target_z, nfglevels_actual, sorted_arr, order=1, extrap=1) t(k,iCell) = vertical_interp(target_z, nfglevels_actual-1, & - sorted_arr(:,1:nfglevels_actual-1), order=1, extrap=1) + sorted_arr(:,1:nfglevels_actual-1), order=1, extrap=1) end do ! RH sorted_arr(:,:) = -999.0 - do k=1,nfglevels_actual + relhum(:,iCell) = 0._RKIND + do k = 1, nfglevels_actual sorted_arr(1,k) = z_fg(k,iCell) -!NOSFC if (vert_level(k) == 200100.0) sorted_arr(1,k) = fg % soilz % array(iCell) if (vert_level(k) == 200100.0) sorted_arr(1,k) = 99999.0 sorted_arr(2,k) = rh_fg(k,iCell) end do call mpas_quicksort(nfglevels_actual, sorted_arr) - do k=nVertLevels,1,-1 + do k = nVertLevels, 1, -1 + target_z = 0.5 * (zgrid(k,iCell) + zgrid(k+1,iCell)) + relhum(k,iCell) = vertical_interp(target_z, nfglevels_actual-1, & + sorted_arr(:,1:nfglevels_actual-1), order=1, extrap=0) + if (target_z < z_fg(1,iCell) .and. k < nVertLevels) relhum(k,iCell) = relhum(k+1,iCell) + end do + + + ! SPECHUM: if first-guess values are negative, set those values to zero before + ! vertical interpolation. + sorted_arr(:,:) = -999.0 + spechum(:,iCell) = 0._RKIND + do k = 1, nfglevels_actual + sorted_arr(1,k) = z_fg(k,iCell) + if (vert_level(k) == 200100.0) sorted_arr(1,k) = 99999.0 + sorted_arr(2,k) = max(0._RKIND,sh_fg(k,iCell)) + end do + call mpas_quicksort(nfglevels_actual, sorted_arr) + do k = nVertLevels, 1, -1 target_z = 0.5 * (zgrid(k,iCell) + zgrid(k+1,iCell)) -! scalars(index_qv,k,iCell) = vertical_interp(target_z, nfglevels_actual, sorted_arr, order=1, extrap=0) - scalars(index_qv,k,iCell) = vertical_interp(target_z, nfglevels_actual-1, & - sorted_arr(:,1:nfglevels_actual-1), order=1, extrap=1) - if (target_z < z_fg(1,iCell)) scalars(index_qv,k,iCell) = scalars(index_qv,k+1,iCell) - relhum(k,iCell) = scalars(index_qv,k,iCell) + spechum(k,iCell) = vertical_interp(target_z, nfglevels_actual-1, & + sorted_arr(:,1:nfglevels_actual-1), order=1, extrap=0) + if (target_z < z_fg(1,iCell) .and. k < nVertLevels) spechum(k,iCell) = spechum(k+1,iCell) end do ! GHT sorted_arr(:,:) = -999.0 - do k=1,nfglevels_actual + do k = 1, nfglevels_actual sorted_arr(1,k) = z_fg(k,iCell) -!NOSFC if (vert_level(k) == 200100.0) sorted_arr(1,k) = fg % soilz % array(iCell) if (vert_level(k) == 200100.0) sorted_arr(1,k) = 99999.0 sorted_arr(2,k) = z_fg(k,iCell) end do call mpas_quicksort(nfglevels_actual, sorted_arr) - do k=1,nVertLevels + do k = 1, nVertLevels target_z = 0.5 * (zgrid(k,iCell) + zgrid(k+1,iCell)) -! gfs_z(k,iCell) = vertical_interp(target_z, nfglevels_actual, sorted_arr, order=1, extrap=1) - gfs_z(k,iCell) = vertical_interp(target_z, nfglevels_actual-1, sorted_arr(:,1:nfglevels_actual-1), order=1, extrap=1) + gfs_z(k,iCell) = vertical_interp(target_z, nfglevels_actual-1, & + sorted_arr(:,1:nfglevels_actual-1), order=1, extrap=1) end do ! PRESSURE sorted_arr(:,:) = -999.0 - do k=1,nfglevels_actual + do k = 1, nfglevels_actual sorted_arr(1,k) = z_fg(k,iCell) if (vert_level(k) == 200100.0) then -!NOSFC sorted_arr(1,k) = fg % soilz % array(iCell) sorted_arr(1,k) = 99999.0 sfc_k = k end if sorted_arr(2,k) = log(p_fg(k,iCell)) end do call mpas_quicksort(nfglevels_actual, sorted_arr) - do k=1,nVertLevels + do k = 1, nVertLevels target_z = 0.5 * (zgrid(k,iCell) + zgrid(k+1,iCell)) -! pressure(k,iCell) = exp(vertical_interp(target_z, nfglevels_actual, sorted_arr, order=1, extrap=1)) pressure(k,iCell) = exp(vertical_interp(target_z, nfglevels_actual-1, & - sorted_arr(:,1:nfglevels_actual-1), order=1, extrap=1)) + sorted_arr(:,1:nfglevels_actual-1), order=1, extrap=1)) end do - - ! PRESSURE -! sorted_arr(:,:) = -999.0 -! do k=1,nfglevels_actual -! sorted_arr(1,k) = z_fg(k,iCell) -! if (vert_level(k) == 200100.0) then -!!NOSFC sorted_arr(1,k) = fg % soilz % array(iCell) -! sorted_arr(1,k) = 99999.0 -! sfc_k = k -! end if -! sorted_arr(2,k) = log(p_fg(k,iCell)) -! end do -! call mpas_quicksort(nfglevels_actual, sorted_arr) -! do k=1,nVertLevels+1 -! target_z = zgrid(k,iCell) -! gfs_p(k,iCell) = exp(vertical_interp(target_z, nfglevels_actual, sorted_arr, order=1, extrap=1)) -! end do - end do @@ -4171,20 +4542,63 @@ subroutine init_atm_case_gfs(block, mesh, nCells, nEdges, nVertLevels, fg, state deallocate(sorted_arr) + ! Diagnose the water vapor mixing ratios: + global_sh_min = 0._RKIND + global_sh_max = 0._RKIND + if(config_use_spechumd) then + sh_min = minval(spechum(:,1:nCellsSolve)) + sh_max = maxval(spechum(:,1:nCellsSolve)) + call mpas_dmpar_min_real(dminfo,sh_min,global_sh_min) + call mpas_dmpar_max_real(dminfo,sh_max,global_sh_max) + endif + write(0,*) + write(0,*) '--- global_sh_min = ', global_sh_min + write(0,*) '--- global_sh_max = ', global_sh_max + write(0,*) + + write(0,*) '--- config_use_spechumd = ', config_use_spechumd + if(.not. config_use_spechumd .or. (global_sh_min==0._RKIND .and. global_sh_max==0._RKIND)) then + !--- calculate the saturation mixing ratio and interpolated first-guess relative humidity: + if (config_use_spechumd) then + write(0,*) 'WARNING: config_use_spechumd=T, but specific humidity was not found in '//trim(config_met_prefix)//':'//config_start_time(1:13) + end if + write(0,*) ' *** initializing water vapor mixing ratio using first-guess relative humidity' + write(0,*) + + do k = 1, nVertLevels + do iCell = 1, nCells +! es = svp1*10.*exp(svp2*(t(k,iCell)-svpt0)/(t(k,iCell)-svp3)) +! es = min(es,0.99*0.01*pressure(k,iCell)) +! rs = 0.622*es*100. / (pressure(k,iCell)-es*100.) + rs = rslf(pressure(k,iCell),t(k,iCell)) + if(t(k,iCell) .lt. svpt0) rs = rsif(pressure(k,iCell),t(k,iCell)) + scalars(index_qv,k,iCell) = 0.01_RKIND*rs*relhum(k,iCell) + enddo + enddo + else + !--- use the interpolated first-guess specific humidity: + write(0,*) ' *** initializing water vapor mixing ratio using first-guess specific humidity' + write(0,*) + do k = 1, nVertLevels + do iCell = 1, nCells + scalars(index_qv,k,iCell) = spechum(k,iCell)/(1._RKIND-spechum(k,iCell)) + enddo + enddo + endif ! - ! Diagnose fields needed in initial conditions file (u, w, rho, theta, scalars) + ! Diagnose fields needed in initial conditions file (u, w, rho, theta) ! NB: At this point, "rho_zz" is simple dry density, and "theta_m" is regular potential temperature ! do iCell=1,nCells - do k=1,nVertLevels - ! QV - es = 6.112 * exp((17.27*(t(k,iCell) - 273.16))/(t(k,iCell) - 35.86)) - es = min(es,0.99*0.01*pressure(k,iCell)) ! WCS 20141003, from LF; temporary fix - rs = 0.622 * es * 100. / (pressure(k,iCell) - es * 100.) - scalars(index_qv,k,iCell) = 0.01 * rs * scalars(index_qv,k,iCell) + ! Q2 + es = 6.112 * exp((17.27*(t2m(iCell) - 273.16))/(t2m(iCell) - 35.86)) + rs = 0.622 * es * 100. / (psfc(iCell) - es * 100.) + q2(iCell) = 0.01 * rs * rh2(iCell) + + do k=1,nVertLevels ! PI p(k,iCell) = (pressure(k,iCell) / p0) ** (rgas / cp) @@ -4458,6 +4872,8 @@ real (kind=RKIND) function vertical_interp(target_z, nz, zf, order, extrap, surf else if (extrap_type == 1) then slope = (zf(2,2) - zf(2,1)) / (zf(1,2) - zf(1,1)) vertical_interp = zf(2,1) + slope * (target_z - zf(1,1)) + else if (extrap_type == 2) then + vertical_interp = zf(2,1) - (target_z - zf(1,1))*0.0065 end if return end if @@ -4687,5 +5103,4 @@ subroutine decouple_variables(mesh, nCells, nVertLevels, state, diag) end subroutine decouple_variables - end module init_atm_cases diff --git a/src/core_init_atmosphere/mpas_init_atm_core.F b/src/core_init_atmosphere/mpas_init_atm_core.F index 8e2c4e2cfc..b261adf3be 100644 --- a/src/core_init_atmosphere/mpas_init_atm_core.F +++ b/src/core_init_atmosphere/mpas_init_atm_core.F @@ -16,7 +16,6 @@ function init_atm_core_init(domain, startTimeStamp) result(ierr) use mpas_derived_types use mpas_stream_manager use mpas_io_streams, only : MPAS_STREAM_NEAREST - use mpas_configure use init_atm_cases implicit none @@ -88,7 +87,7 @@ function init_atm_core_run(domain) result(ierr) ! call atm_initialize_advection_rk(mesh) ! call atm_initialize_deformation_weights(mesh) - call mpas_stream_mgr_write(domain % streamManager, streamID='output', ierr=ierr) + call mpas_stream_mgr_write(domain % streamManager, ierr=ierr) call mpas_stream_mgr_reset_alarms(domain % streamManager, direction=MPAS_STREAM_OUTPUT, ierr=ierr) end function init_atm_core_run @@ -99,6 +98,7 @@ function init_atm_core_finalize(domain) result(ierr) use mpas_derived_types use mpas_decomp use mpas_stream_manager + use mpas_io_units, only : stderrUnit implicit none @@ -109,6 +109,11 @@ function init_atm_core_finalize(domain) result(ierr) ierr = 0 call mpas_decomp_destroy_decomp_list(domain % decompositions) + + write(stderrUnit,'(a)') '' + write(stderrUnit,'(a)') '********************************************************' + write(stderrUnit,'(a)') ' Finished running the init_atmosphere core' + write(stderrUnit,'(a)') '********************************************************' end function init_atm_core_finalize diff --git a/src/core_init_atmosphere/mpas_init_atm_core_interface.F b/src/core_init_atmosphere/mpas_init_atm_core_interface.F index d14cc1ccc5..04fe63f541 100644 --- a/src/core_init_atmosphere/mpas_init_atm_core_interface.F +++ b/src/core_init_atmosphere/mpas_init_atm_core_interface.F @@ -7,6 +7,7 @@ ! module init_atm_core_interface + use mpas_attlist contains @@ -42,6 +43,7 @@ subroutine init_atm_setup_core(core) core % get_mesh_stream => init_atm_get_mesh_stream core % setup_immutable_streams => init_atm_setup_immutable_streams core % setup_derived_dimensions => init_atm_setup_derived_dimensions + core % setup_decomposed_dimensions => init_atm_setup_decomposed_dimensions core % setup_block => init_atm_setup_block core % setup_namelist => init_atm_setup_namelists @@ -50,6 +52,14 @@ subroutine init_atm_setup_core(core) #include "inc/core_variables.inc" +write(0,*) '' +#ifdef SINGLE_PRECISION +write(0,'(a)') 'Using default single-precision reals' +#else +write(0,'(a)') 'Using default double-precision reals' +#endif +write(0,*) '' + end subroutine init_atm_setup_core @@ -92,9 +102,9 @@ end subroutine init_atm_setup_domain !> not allocated until after this routine has been called. ! !----------------------------------------------------------------------- - function init_atm_setup_packages(configs, packages) result(ierr) + function init_atm_setup_packages(configs, packages, iocontext) result(ierr) - use mpas_derived_types, only : mpas_pool_type + use mpas_derived_types, only : mpas_pool_type, mpas_io_context_type use mpas_pool_routines, only : mpas_pool_get_config, mpas_pool_get_package use mpas_io_units, only : stderrUnit @@ -102,10 +112,12 @@ function init_atm_setup_packages(configs, packages) result(ierr) type (mpas_pool_type), intent(inout) :: configs type (mpas_pool_type), intent(inout) :: packages + type (mpas_io_context_type), intent(inout) :: iocontext integer :: ierr - logical, pointer :: initial_conds, sfc_update, vertical_stage_in, vertical_stage_out, met_stage_in, met_stage_out - logical, pointer :: config_static_interp, config_vertical_grid, config_met_interp + logical, pointer :: initial_conds, sfc_update + logical, pointer :: gwd_stage_in, vertical_stage_in, vertical_stage_out, met_stage_in, met_stage_out + logical, pointer :: config_native_gwd_static, config_static_interp, config_vertical_grid, config_met_interp integer, pointer :: config_init_case @@ -113,6 +125,7 @@ function init_atm_setup_packages(configs, packages) result(ierr) call mpas_pool_get_config(configs, 'config_init_case', config_init_case) call mpas_pool_get_config(configs, 'config_static_interp', config_static_interp) + call mpas_pool_get_config(configs, 'config_native_gwd_static', config_native_gwd_static) call mpas_pool_get_config(configs, 'config_vertical_grid', config_vertical_grid) call mpas_pool_get_config(configs, 'config_met_interp', config_met_interp) @@ -122,6 +135,9 @@ function init_atm_setup_packages(configs, packages) result(ierr) nullify(sfc_update) call mpas_pool_get_package(packages, 'sfc_updateActive', sfc_update) + nullify(gwd_stage_in) + call mpas_pool_get_package(packages, 'gwd_stage_inActive', gwd_stage_in) + nullify(vertical_stage_in) call mpas_pool_get_package(packages, 'vertical_stage_inActive', vertical_stage_in) @@ -136,6 +152,7 @@ function init_atm_setup_packages(configs, packages) result(ierr) if (.not. associated(initial_conds) .or. & .not. associated(sfc_update) .or. & + .not. associated(gwd_stage_in) .or. & .not. associated(vertical_stage_in) .or. & .not. associated(vertical_stage_out) .or. & .not. associated(met_stage_in) .or. & @@ -156,11 +173,19 @@ function init_atm_setup_packages(configs, packages) result(ierr) end if if (config_init_case == 7) then - vertical_stage_in = (config_vertical_grid .and. .not. config_static_interp) - vertical_stage_out = (config_vertical_grid .and. .not. config_met_interp) + gwd_stage_in = (config_native_gwd_static .and. .not. config_static_interp) + vertical_stage_in = (config_vertical_grid .and. .not. config_static_interp) + vertical_stage_out = (config_vertical_grid .and. .not. config_met_interp) met_stage_in = (config_met_interp .and. .not. config_vertical_grid) met_stage_out = config_met_interp + else if (config_init_case == 8) then + gwd_stage_in = .false. + vertical_stage_in = .true. + vertical_stage_out = .false. + met_stage_in = .false. + met_stage_out = .false. else + gwd_stage_in = .false. vertical_stage_in = .false. vertical_stage_out = .false. met_stage_in = .false. diff --git a/src/core_init_atmosphere/mpas_init_atm_gwd.F b/src/core_init_atmosphere/mpas_init_atm_gwd.F new file mode 100644 index 0000000000..f423bafe3a --- /dev/null +++ b/src/core_init_atmosphere/mpas_init_atm_gwd.F @@ -0,0 +1,923 @@ +! Copyright (c) 2016, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +module mpas_init_atm_gwd + + use mpas_framework + use mpas_timekeeping + + public :: compute_gwd_fields + + private + + real (kind=RKIND), parameter :: Re = 6371229.0_RKIND ! Earth radius in MPAS-Atmosphere + real (kind=RKIND), parameter :: Pi = 2.0_RKIND * asin(1.0_RKIND) + real (kind=RKIND), parameter :: rad2deg = 180.0_RKIND / Pi + + integer, parameter :: topo_x = 43200 ! x-dimension of global 30-arc-second topography array + integer, parameter :: topo_y = 21600 ! y-dimension of global 30-arc-second topography array + real (kind=RKIND), parameter :: pts_per_degree = real(topo_x,RKIND) / 360.0_RKIND + real (kind=RKIND), parameter :: start_lat = -90.0_RKIND + real (kind=RKIND), parameter :: start_lon = -180.0_RKIND + + ! Nominal delta-x (in meters) for sub-grid topography cells + real (kind=RKIND), parameter :: sg_delta = 2.0 * Pi * Re / (360.0_RKIND * real(pts_per_degree,RKIND)) + + real (kind=R4KIND), dimension(:,:), pointer :: topo ! Global 30-arc-second topography + real (kind=RKIND), dimension(:,:), pointer :: box ! Subset of topography covering a grid cell + real (kind=RKIND), dimension(:,:), pointer :: dxm ! Size (meters) in zonal direction of a grid cell + real (kind=RKIND) :: box_mean ! Mean value of topography in box + integer :: nx, ny ! Dimensions of box covering grid cell + + + contains + + + !*********************************************************************** + ! + ! function compute_gwd_fields + ! + !> \brief Main routine for computing GWDO fields on an MPAS mesh + !> \author Michael Duda + !> \date 29 May 2015 + !> \details + !> This is the main routine for computing GWDO statistics on an MPAS mesh. + !> Currently computed fields are: + !> var2d + !> con + !> ol{1,2,3,4} + !> oa{1,2,3,4} + ! + !----------------------------------------------------------------------- + function compute_gwd_fields(domain) result(iErr) + + use mpas_derived_types + use mpas_kind_types + use mpas_timer + use mpas_stream_manager + + implicit none + + type (domain_type), intent(inout) :: domain + integer :: iErr + + type (mpas_pool_type), pointer :: mesh, state + integer :: iCell, i + real (kind=RKIND) :: dc + real (kind=RKIND), pointer :: config_gwd_cell_scaling + integer, pointer :: nCells + integer, pointer :: nEdges + integer, dimension(:), pointer :: nEdgesOnCell + integer, dimension(:,:), pointer :: edgesOnCell + logical :: onUnitSphere + real (kind=RKIND), pointer :: sphere_radius + real (kind=RKIND), dimension(:), pointer :: latCell, lonCell, dcEdge + real (kind=RKIND), dimension(:), pointer :: var2d, con, oa1, oa2, oa3, oa4, ol1, ol2, ol3, ol4 + real (kind=RKIND), dimension(:), pointer :: elvmax, htheta, hgamma, hsigma + character(len=StrKIND), pointer :: config_geog_data_path + + + allocate(topo(topo_x,topo_y)) + + call mpas_pool_get_subpool(domain % blocklist % structs, 'mesh', mesh) + call mpas_pool_get_subpool(domain % blocklist % structs, 'state', state) + + call mpas_pool_get_config(mesh, 'sphere_radius', sphere_radius) + call mpas_pool_get_config(domain % configs, 'config_geog_data_path', config_geog_data_path) + call mpas_pool_get_config(domain % configs, 'config_gwd_cell_scaling', config_gwd_cell_scaling) + + ! + ! Retrieve pointers to arrays holding the latitudes and longitudes of + ! cells, and arrays that will hold the computed GWDO statistics + ! + call mpas_pool_get_dimension(mesh, 'nCells', nCells) + call mpas_pool_get_dimension(mesh, 'nEdges', nEdges) + call mpas_pool_get_array(mesh, 'latCell', latCell) + call mpas_pool_get_array(mesh, 'lonCell', lonCell) + call mpas_pool_get_array(mesh, 'nEdgesOnCell', nEdgesOnCell) + call mpas_pool_get_array(mesh, 'edgesOnCell', edgesOnCell) + call mpas_pool_get_array(mesh, 'dcEdge', dcEdge) + call mpas_pool_get_array(mesh, 'var2d', var2d) + call mpas_pool_get_array(mesh, 'con', con) + call mpas_pool_get_array(mesh, 'ol1', ol1) + call mpas_pool_get_array(mesh, 'ol2', ol2) + call mpas_pool_get_array(mesh, 'ol3', ol3) + call mpas_pool_get_array(mesh, 'ol4', ol4) + call mpas_pool_get_array(mesh, 'oa1', oa1) + call mpas_pool_get_array(mesh, 'oa2', oa2) + call mpas_pool_get_array(mesh, 'oa3', oa3) + call mpas_pool_get_array(mesh, 'oa4', oa4) +! call mpas_pool_get_array(mesh, 'elvmax', elvmax) +! call mpas_pool_get_array(mesh, 'theta', htheta) +! call mpas_pool_get_array(mesh, 'gamma', hgamma) +! call mpas_pool_get_array(mesh, 'sigma', hsigma) + + iErr = read_global_30s_topo(config_geog_data_path) + if (iErr /= 0) then + write(stderrUnit,*) 'Error reading global 30-arc-sec topography for GWD statistics' + return + end if + + ! + ! It is possible that this code is called before the mesh fields have been scaled + ! up to "Earth-sized". Because we need "Earth" distances to cut out bounding + ! boxes from topography, we try here to detect whether we are on an unscaled + ! unit sphere or not: if the maximum dcEdge value is less than 1.0, assume this + ! is the case. + ! + if (maxval(dcEdge(1:nEdges)) < 1.0_RKIND) then + write(stderrUnit,*) 'Computing GWD statistics on a unit sphere' + onUnitSphere = .true. + else + onUnitSphere = .false. + end if + + if (config_gwd_cell_scaling /= 1.0) then + write(stderrUnit,*) 'Using effective cell diameters scaled by a factor of ', config_gwd_cell_scaling + write(stderrUnit,*) 'in the computation of GWD static fields.' + end if + + ! + ! Main loop to compute each of the GWDO fields for every horizontal + ! grid cell in the mesh. + ! + do iCell=1,nCells + + ! + ! First, get an estimate of the mean diameter (in meters) of the grid + ! cell by averaging the distances to each of the neighboring cells + ! + dc = 0.0 + do i=1,nEdgesOnCell(iCell) + dc = dc + dcEdge(edgesOnCell(i,iCell)) + end do + dc = dc / real(nEdgesOnCell(iCell),RKIND) + if (onUnitSphere) then + dc = dc * sphere_radius + end if + dc = dc * config_gwd_cell_scaling + + ! + ! Cut out a rectangular piece of the global 30-arc-second topography + ! data that is centered at the lat/lon of the current cell being + ! processed and that is just large enough to cover the cell. The + ! rectangular array of topography data is stored in the module + ! variable 'box', and the dimensions of this array are given by the + ! module variables 'nx' and 'ny'. The get_box() routine also + ! computes the mean elevation in the array and stores that value in + ! the module variable 'box_mean'. + ! + call get_box(latCell(iCell)*rad2deg, lonCell(iCell)*rad2deg, dc) + + ! + ! With a box of 30-arc-second data for the current grid cell, call + ! subroutines to compute each sub-grid orography statistic + ! + var2d(iCell) = get_var() + con(iCell) = get_con() + oa1(iCell) = get_oa1() + oa2(iCell) = get_oa2() + oa3(iCell) = get_oa3() + oa4(iCell) = get_oa4() + ol1(iCell) = get_ol1() + ol2(iCell) = get_ol2() + ol3(iCell) = get_ol3() + ol4(iCell) = get_ol4() +! elvmax(iCell) = get_elvmax() +! htheta(iCell) = get_htheta() +! hgamma(iCell) = get_hgamma() +! hsigma(iCell) = get_hsigma() + end do + + deallocate(topo) + + iErr = 0 + + end function compute_gwd_fields + + + !*********************************************************************** + ! + ! function read_global_30s_topo + ! + !> \brief Reads global 30-arc-second topography into 'topo' module variable + !> \author Michael Duda + !> \date 31 October 2016 + !> \details + !> This subroutine reads the global 30-arc-second topography from the subdirectory + !> 'topo_30s' of the path provided as an argument. + ! + !----------------------------------------------------------------------- + function read_global_30s_topo(path) result(iErr) + + implicit none + + character(len=*), intent(in) :: path + + integer :: iErr + + integer, parameter :: tile_x = 1200 ! x-dimension of each tile of global 30-arc-second topography + integer, parameter :: tile_y = 1200 ! y-dimension of each tile of global 30-arc-second topography + integer, parameter :: tile_bdr = 3 ! number of layers of border/halo points surrounding each tile + + integer :: istatus + integer :: ix, iy + integer :: isigned, endian, wordsize, nx, ny, nz + real (kind=R4KIND) :: scalefactor + real (kind=R4KIND), dimension(:,:,:), allocatable :: tile + character(len=StrKIND) :: filename + + allocate(tile(tile_x+2*tile_bdr,tile_y+2*tile_bdr,1)) + + isigned = 1 + endian = 0 + wordsize = 2 + scalefactor = 1.0 + nx = tile_x + 2*tile_bdr + ny = tile_y + 2*tile_bdr + nz = 1 + + do iy=1,topo_y,tile_y + do ix=1,topo_x,tile_x + write(filename,'(a,i5.5,a1,i5.5,a1,i5.5,a1,i5.5)') trim(path)//'/topo_30s/', ix, '-', (ix+tile_x-1), '.', & + iy, '-', (iy+tile_y-1) + call read_geogrid(filename, len_trim(filename), tile, nx, ny, nz, isigned, endian, & + scalefactor, wordsize, istatus) + if (istatus /= 0) then + write(stderrUnit,*) 'Error reading topography tile '//trim(filename) + iErr = 1 + return + end if + + topo(ix:(ix+tile_x-1),iy:(iy+tile_y-1)) = tile((tile_bdr+1):(tile_x+tile_bdr),(tile_bdr+1):(tile_y+tile_bdr),1) + + end do + end do + + deallocate(tile) + + iErr = 0 + + end function read_global_30s_topo + + + !*********************************************************************** + ! + ! subroutine get_box + ! + !> \brief Cuts out a rectangular box of data centered at a given (lat,lon) + !> \author Michael Duda + !> \date 29 May 2015 + !> \details + !> This subroutine extracts a rectangular sub-array of the 30-arc-second + !> global topography dataset, stored in the module variable 'topo'; the + !> sub-array will be centered at the (lat,lon) specified in the argument + !> list, and will have a width and height large enough to span 'dx' meters. + !> The extracted sub-array is stored in the module variable 'box', and the + !> dimensions of this sub-array are stored in the module variables 'nx' and + !> 'ny'. + !> Since the mean value of the terrain in a grid cell is needed by many of + !> the GWDO statistics computations, this mean value is also computed by + !> this subroutine and stored in the module variable 'box_mean'. + ! + !----------------------------------------------------------------------- + subroutine get_box(lat, lon, dx) + + implicit none + + real (kind=RKIND), intent(in) :: lat, lon, dx + + integer :: i, j, ii, jj, ic, jc + real (kind=RKIND) :: sg_lat + + ! + ! Get number of points to extract in the zonal direction + ! + if (cos(lat/rad2deg) > (2.0 * pts_per_degree * dx * 180.0) / (real(topo_x,RKIND) * Pi * Re)) then + nx = ceiling((180.0 * dx * pts_per_degree) / (Pi * Re * cos(lat/rad2deg))) + else + nx = topo_x / 2 + end if + + ! + ! Get number of points to extract in the meridional direction + ! + ny = ceiling((180.0 * dx * pts_per_degree) / (Pi * Re)) + + ! + ! Find coordinates in global topography array of the box center + ! + ic = nint((lon - start_lon) * pts_per_degree) + 1 + jc = nint((lat - start_lat) * pts_per_degree) + 1 + + if (ic <= 0) ic = ic + topo_x + if (ic > topo_x) ic = ic - topo_x + + + if (associated(box)) deallocate(box) + allocate(box(nx,ny)) + + if (associated(dxm)) deallocate(dxm) + allocate(dxm(nx,ny)) + + ! + ! Extract sub-array (box) from global array; must properly account for + ! the periodicity in the longitude coordinate, as well as the poles + ! + box_mean = 0.0 + do j=1,ny + do i=1,nx + + ii = i - nx/2 + ic + jj = j - ny/2 + jc + + if (jj <= 0) then + jj = -jj + 1 + ii = ii + topo_y + end if + if (jj > topo_y) then + jj = topo_y - (jj - topo_y - 1) + ii = ii + topo_y + end if + do while (ii <= 0) + ii = ii + topo_x + end do + do while (ii > topo_x) + ii = ii - topo_x + end do + + box(i,j) = topo(ii,jj) + sg_lat = (start_lat + (real(jj-1,RKIND) + 0.5) / pts_per_degree) / rad2deg ! Add 0.5 for cell center + dxm(i,j) = sg_delta * cos(sg_lat) + box_mean = box_mean + box(i,j) + + end do + end do + + ! + ! Compute mean topography in the extracted box + ! + box_mean = box_mean / real(nx*ny, RKIND) + + end subroutine get_box + + + !*********************************************************************** + ! + ! function get_var + ! + !> \brief Computes standard deviation of sub-grid-scale terrain + !> \author Michael Duda + !> \date 29 May 2015 + !> \details + ! + !----------------------------------------------------------------------- + real (kind=RKIND) function get_var() + + implicit none + + integer :: i, j + real (kind=RKIND) :: s2 + + s2 = 0.0 + + do j=1,ny + do i=1,nx + s2 = s2 + (box(i,j) - box_mean)**2 + end do + end do + + get_var = sqrt(s2 / real(nx*ny,RKIND)) + + end function get_var + + + !*********************************************************************** + ! + ! function get_con + ! + !> \brief Computes orographic convexityof sub-grid-scale terrain + !> \author Michael Duda + !> \date 29 May 2015 + !> \details + ! + !----------------------------------------------------------------------- + real (kind=RKIND) function get_con() + + implicit none + + integer :: i, j + real (kind=RKIND) :: s2, s4, var + + s2 = 0.0 + s4 = 0.0 + + do j=1,ny + do i=1,nx + s2 = s2 + (box(i,j) - box_mean)**2 + s4 = s4 + (box(i,j) - box_mean)**4 + end do + end do + + var = s2 / real(nx*ny,RKIND) + + if (abs(var) < 1.0e-5) then + get_con = 0.0 + else + get_con = s4 / (var**2 * real(nx*ny,RKIND)) + end if + + end function get_con + + + !*********************************************************************** + ! + ! function get_oa1 + ! + !> \brief Computes orographic asymmetry in the West direction + !> \author Michael Duda + !> \date 29 May 2015 + !> \details + !> This function computes the sub-grid orographic asymmetry following + !> the comment from N. Wood in the footnote of Kim and Doyle (QRJMS, 2005). + ! + !----------------------------------------------------------------------- + real (kind=RKIND) function get_oa1() + + implicit none + + integer :: i, j + integer :: nu, nd + + nu = 0 + nd = 0 + do j=1,ny + do i=1,nx/2 + if (box(i,j) > box_mean) nu = nu + 1 + end do + do i=nx/2+1,nx + if (box(i,j) > box_mean) nd = nd + 1 + end do + end do + + if (nu + nd > 0) then + get_oa1 = real((nu - nd),RKIND) / real((nu + nd),RKIND) + else + get_oa1 = 0.0 + end if + + end function get_oa1 + + + !*********************************************************************** + ! + ! function get_oa2 + ! + !> \brief Computes orographic asymmetry in the South direction + !> \author Michael Duda + !> \date 29 May 2015 + !> \details + !> This function computes the sub-grid orographic asymmetry following + !> the comment from N. Wood in the footnote of Kim and Doyle (QRJMS, 2005). + ! + !----------------------------------------------------------------------- + real (kind=RKIND) function get_oa2() + + implicit none + + integer :: i, j + integer :: nu, nd + + nu = 0 + nd = 0 + do j=1,ny/2 + do i=1,nx + if (box(i,j) > box_mean) nu = nu + 1 + end do + end do + do j=ny/2+1,ny + do i=1,nx + if (box(i,j) > box_mean) nd = nd + 1 + end do + end do + + if (nu + nd > 0) then + get_oa2 = real((nu - nd),RKIND) / real((nu + nd),RKIND) + else + get_oa2 = 0.0 + end if + + end function get_oa2 + + + !*********************************************************************** + ! + ! function get_oa3 + ! + !> \brief Computes orographic asymmetry in the South-West direction + !> \author Michael Duda + !> \date 29 May 2015 + !> \details + !> This function computes the sub-grid orographic asymmetry following + !> the comment from N. Wood in the footnote of Kim and Doyle (QRJMS, 2005). + ! + !----------------------------------------------------------------------- + real (kind=RKIND) function get_oa3() + + implicit none + + integer :: i, j + integer :: nu, nd + real (kind=RKIND) :: ratio + + nu = 0 + nd = 0 + ratio = real(ny,RKIND)/real(nx,RKIND) + do j=1,ny + do i=1,nx + if (nint(real(i,RKIND) * ratio) < (ny - j)) then + if (box(i,j) > box_mean) nu = nu + 1 + else + if (box(i,j) > box_mean) nd = nd + 1 + end if + end do + end do + + if (nu + nd > 0) then + get_oa3 = real((nu - nd),RKIND) / real((nu + nd),RKIND) + else + get_oa3 = 0.0 + end if + + end function get_oa3 + + + !*********************************************************************** + ! + ! function get_oa4 + ! + !> \brief Computes orographic asymmetry in the North-West direction + !> \author Michael Duda + !> \date 29 May 2015 + !> \details + !> This function computes the sub-grid orographic asymmetry following + !> the comment from N. Wood in the footnote of Kim and Doyle (QRJMS, 2005). + ! + !----------------------------------------------------------------------- + real (kind=RKIND) function get_oa4() + + implicit none + + integer :: i, j + integer :: nu, nd + real (kind=RKIND) :: ratio + + nu = 0 + nd = 0 + ratio = real(ny,RKIND)/real(nx,RKIND) + do j=1,ny + do i=1,nx + if (nint(real(i,RKIND) * ratio) < j) then + if (box(i,j) > box_mean) nu = nu + 1 + else + if (box(i,j) > box_mean) nd = nd + 1 + end if + end do + end do + + if (nu + nd > 0) then + get_oa4 = real((nu - nd),RKIND) / real((nu + nd),RKIND) + else + get_oa4 = 0.0 + end if + + end function get_oa4 + + + !*********************************************************************** + ! + ! function get_ol1 + ! + !> \brief Computes orographic effective length for Westerly flow + !> \author Michael Duda + !> \date 29 May 2015 + !> \details + ! + !----------------------------------------------------------------------- + real (kind=RKIND) function get_ol1() + + implicit none + + integer :: i, j + integer :: nw + integer :: nt + + nw = 0 + nt = 0 + + do j=ny/4,3*ny/4 + do i=1,nx + if (box(i,j) > box_mean) nw = nw + 1 + nt = nt + 1 + end do + end do + + get_ol1 = real(nw,RKIND) / real(nt,RKIND) + + end function get_ol1 + + + !*********************************************************************** + ! + ! function get_ol2 + ! + !> \brief Computes orographic effective length for Southerly flow + !> \author Michael Duda + !> \date 29 May 2015 + !> \details + ! + !----------------------------------------------------------------------- + real (kind=RKIND) function get_ol2() + + implicit none + + integer :: i, j + integer :: nw + integer :: nt + + nw = 0 + nt = 0 + + do j=1,ny + do i=nx/4,3*nx/4 + if (box(i,j) > box_mean) nw = nw + 1 + nt = nt + 1 + end do + end do + + get_ol2 = real(nw,RKIND) / real(nt,RKIND) + + end function get_ol2 + + + !*********************************************************************** + ! + ! function get_ol3 + ! + !> \brief Computes orographic effective length for South-Westerly flow + !> \author Michael Duda + !> \date 29 May 2015 + !> \details + ! + !----------------------------------------------------------------------- + real (kind=RKIND) function get_ol3() + + implicit none + + integer :: i, j + integer :: nw + integer :: nt + + nw = 0 + nt = 0 + + do j=1,ny/2 + do i=1,nx/2 + if (box(i,j) > box_mean) nw = nw + 1 + nt = nt + 1 + end do + end do + do j=ny/2+1,ny + do i=nx/2+1,nx + if (box(i,j) > box_mean) nw = nw + 1 + nt = nt + 1 + end do + end do + + get_ol3 = real(nw,RKIND) / real(nt,RKIND) + + end function get_ol3 + + + !*********************************************************************** + ! + ! function get_ol4 + ! + !> \brief Computes orographic effective length for North-Westerly flow + !> \author Michael Duda + !> \date 29 May 2015 + !> \details + ! + !----------------------------------------------------------------------- + real (kind=RKIND) function get_ol4() + + implicit none + + integer :: i, j + integer :: nw + integer :: nt + + nw = 0 + nt = 0 + + do j=ny/2+1,ny + do i=1,nx/2 + if (box(i,j) > box_mean) nw = nw + 1 + nt = nt + 1 + end do + end do + do j=1,ny/2 + do i=nx/2+1,nx + if (box(i,j) > box_mean) nw = nw + 1 + nt = nt + 1 + end do + end do + + get_ol4 = real(nw,RKIND) / real(nt,RKIND) + + end function get_ol4 + + + !*********************************************************************** + ! + ! function get_elvmax + ! + !> \brief Computes maximum subgrid orography height + !> \author Michael Duda + !> \date 20 December 2015 + !> \details + ! + !----------------------------------------------------------------------- + real (kind=RKIND) function get_elvmax() + + implicit none + + integer :: i, j + + get_elvmax = box(1,1) + + do j=1,ny + do i=1,nx + if (box(i,j) > get_elvmax) then + get_elvmax = box(i,j) + end if + end do + end do + + end function get_elvmax + + + !*********************************************************************** + ! + ! function get_htheta + ! + !> \brief Computes angle of principle axis of the gradient correlation tensor + !> \author Michael Duda + !> \date 20 December 2015 + !> \details Computation following Lott and Miller (QJRMS 1997) + ! + !----------------------------------------------------------------------- + real (kind=RKIND) function get_htheta() + + implicit none + + integer :: i, j + real (kind=RKIND) :: dx, dy + real (kind=RKIND) :: xfp, yfp + real (kind=RKIND) :: hx2, hy2, hxy + real (kind=RKIND) :: hk, hl + + hx2 = 0.0 + hy2 = 0.0 + hxy = 0.0 + + do j=2,ny-1 + do i=2,nx-1 + dx = dxm(i,j) + dy = sg_delta + xfp = (box(i+1,j) - box(i-1,j)) / (2.0 * dx) + yfp = (box(i,j+1) - box(i,j-1)) / (2.0 * dy) + hx2 = hx2 + xfp * xfp + hy2 = hy2 + yfp * yfp + hxy = hxy + xfp * yfp + end do + end do + + hx2 = hx2 / real((nx-2)*(ny-2),RKIND) + hy2 = hy2 / real((nx-2)*(ny-2),RKIND) + hxy = hxy / real((nx-2)*(ny-2),RKIND) + + hk = 0.5 * (hx2 + hy2) + hl = 0.5 * (hx2 - hy2) + + get_htheta = 0.5 * atan2(hxy, hl) + + end function get_htheta + + + !*********************************************************************** + ! + ! function get_hgamma + ! + !> \brief Computes anisotropy of subgrid orography + !> \author Michael Duda + !> \date 20 December 2015 + !> \details Computation following Lott and Miller (QJRMS 1997) + ! + !----------------------------------------------------------------------- + real (kind=RKIND) function get_hgamma() + + implicit none + + integer :: i, j + real (kind=RKIND) :: dx, dy + real (kind=RKIND) :: xfp, yfp + real (kind=RKIND) :: hx2, hy2, hxy + real (kind=RKIND) :: hk, hl, hlp + + hx2 = 0.0 + hy2 = 0.0 + hxy = 0.0 + + do j=2,ny-1 + do i=2,nx-1 + dx = dxm(i,j) + dy = sg_delta + xfp = (box(i+1,j) - box(i-1,j)) / (2.0 * dx) + yfp = (box(i,j+1) - box(i,j-1)) / (2.0 * dy) + hx2 = hx2 + xfp * xfp + hy2 = hy2 + yfp * yfp + hxy = hxy + xfp * yfp + end do + end do + + hx2 = hx2 / real((nx-2)*(ny-2),RKIND) + hy2 = hy2 / real((nx-2)*(ny-2),RKIND) + hxy = hxy / real((nx-2)*(ny-2),RKIND) + + hk = 0.5 * (hx2 + hy2) + hl = 0.5 * (hx2 - hy2) + hlp = sqrt(hl*hl + hxy*hxy) + + if ((hk + hlp) > 0.0 .and. (hk - hlp) >= 0.0) then + get_hgamma = sqrt((hk - hlp) / (hk + hlp)) + else + get_hgamma = 0.0 + end if + + end function get_hgamma + + + !*********************************************************************** + ! + ! function get_hsigma + ! + !> \brief Computes mean slope of subgrid orography + !> \author Michael Duda + !> \date 20 December 2015 + !> \details Computation following Lott and Miller (QJRMS 1997) + ! + !----------------------------------------------------------------------- + real (kind=RKIND) function get_hsigma() + + implicit none + + integer :: i, j + real (kind=RKIND) :: dx, dy + real (kind=RKIND) :: xfp, yfp + real (kind=RKIND) :: hx2, hy2, hxy + real (kind=RKIND) :: hk, hl, hlp + + hx2 = 0.0 + hy2 = 0.0 + hxy = 0.0 + + do j=2,ny-1 + do i=2,nx-1 + dx = dxm(i,j) + dy = sg_delta + xfp = (box(i+1,j) - box(i-1,j)) / (2.0 * dx) + yfp = (box(i,j+1) - box(i,j-1)) / (2.0 * dy) + hx2 = hx2 + xfp * xfp + hy2 = hy2 + yfp * yfp + hxy = hxy + xfp * yfp + end do + end do + + hx2 = hx2 / real((nx-2)*(ny-2),RKIND) + hy2 = hy2 / real((nx-2)*(ny-2),RKIND) + hxy = hxy / real((nx-2)*(ny-2),RKIND) + + hk = 0.5 * (hx2 + hy2) + hl = 0.5 * (hx2 - hy2) + hlp = sqrt(hl*hl + hxy*hxy) + + get_hsigma = sqrt(hk + hlp) + + end function get_hsigma + +end module mpas_init_atm_gwd diff --git a/src/core_init_atmosphere/mpas_init_atm_llxy.F b/src/core_init_atmosphere/mpas_init_atm_llxy.F index 939b588acc..11f95dd92a 100644 --- a/src/core_init_atmosphere/mpas_init_atm_llxy.F +++ b/src/core_init_atmosphere/mpas_init_atm_llxy.F @@ -2223,6 +2223,8 @@ END SUBROUTINE llij_gauss SUBROUTINE llxy_error_fatal(mesg) + USE mpas_abort, ONLY : mpas_dmpar_global_abort + IMPLICIT NONE CHARACTER (LEN=*), INTENT(IN) :: mesg diff --git a/src/core_init_atmosphere/mpas_init_atm_static.F b/src/core_init_atmosphere/mpas_init_atm_static.F index ae33fb9052..47f33731dc 100644 --- a/src/core_init_atmosphere/mpas_init_atm_static.F +++ b/src/core_init_atmosphere/mpas_init_atm_static.F @@ -9,11 +9,11 @@ module mpas_init_atm_static !================================================================================================== use atm_advection -! use mpas_configure use mpas_dmpar use mpas_pool_routines use init_atm_hinterp use init_atm_llxy + use mpas_abort, only : mpas_dmpar_global_abort use mpas_atmphys_utilities @@ -209,10 +209,11 @@ subroutine init_atm_static(mesh, dims, configs) ismax_lu = 20 write(mminlu,'(a)') 'MODIFIED_IGBP_MODIS_NOAH' case default - write(0,*) '*****************************************************************' - write(0,*) 'Invalid land use dataset '''//trim(config_landuse_data)//''' selected for config_landuse_data' - write(0,*) ' Possible options are: ''USGS'', ''MODIFIED_IGBP_MODIS_NOAH''' - write(0,*) '*****************************************************************' + call mpas_dmpar_global_abort('*****************************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort('Invalid land use dataset '''//trim(config_landuse_data) & + //''' selected for config_landuse_data', deferredAbort=.true.) + call mpas_dmpar_global_abort(' Possible options are: ''USGS'', ''MODIFIED_IGBP_MODIS_NOAH''', deferredAbort=.true.) + call mpas_dmpar_global_abort('*****************************************************************', deferredAbort=.true.) call mpas_dmpar_global_abort('Please correct the namelist.') end select surface_input_select0 @@ -283,10 +284,11 @@ subroutine init_atm_static(mesh, dims, configs) case('MODIFIED_IGBP_MODIS_NOAH') geog_sub_path = 'modis_landuse_20class_30s/' case default - write(0,*) '*****************************************************************' - write(0,*) 'Invalid land use dataset '''//trim(config_landuse_data)//''' selected for config_landuse_data' - write(0,*) ' Possible options are: ''USGS'', ''MODIFIED_IGBP_MODIS_NOAH''' - write(0,*) '*****************************************************************' + call mpas_dmpar_global_abort('*****************************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort('Invalid land use dataset '''//trim(config_landuse_data) & + //''' selected for config_landuse_data', deferredAbort=.true.) + call mpas_dmpar_global_abort(' Possible options are: ''USGS'', ''MODIFIED_IGBP_MODIS_NOAH''', deferredAbort=.true.) + call mpas_dmpar_global_abort('*****************************************************************', deferredAbort=.true.) call mpas_dmpar_global_abort('Please correct the namelist.') end select surface_input_select1 nx = 1200 diff --git a/src/core_init_atmosphere/mpas_init_atm_surface.F b/src/core_init_atmosphere/mpas_init_atm_surface.F index f04f15a62d..8631920b31 100644 --- a/src/core_init_atmosphere/mpas_init_atm_surface.F +++ b/src/core_init_atmosphere/mpas_init_atm_surface.F @@ -7,11 +7,11 @@ ! !================================================================================================== module mpas_init_atm_surface -! use mpas_configure use mpas_derived_types use mpas_pool_routines use mpas_timekeeping use mpas_timer + use mpas_abort, only : mpas_dmpar_global_abort use init_atm_hinterp use init_atm_llxy @@ -82,6 +82,13 @@ subroutine init_atm_case_sfc(domain, dminfo, stream_manager, mesh, fg, state, di end do + ! + ! Ensure that no output alarms are still ringing for the 'surface' stream after + ! we exit the time loop above; the main run routine may write out all other + ! output streams with ringing alarms. + ! + call mpas_stream_mgr_reset_alarms(stream_manager, streamID='surface', direction=MPAS_STREAM_OUTPUT, ierr=ierr) + end subroutine init_atm_case_sfc !================================================================================================== @@ -116,6 +123,7 @@ subroutine interp_sfc_to_MPAS(timeString, mesh, fg, dims, dminfo, config_sfc_pre real(kind=RKIND), dimension(:,:), allocatable :: maskslab integer, dimension(:), pointer :: landmask + integer :: global_max_landmask real(kind=RKIND), dimension(:), pointer :: destField1d real(kind=RKIND), dimension(:), pointer :: sst, xice @@ -130,14 +138,28 @@ subroutine interp_sfc_to_MPAS(timeString, mesh, fg, dims, dminfo, config_sfc_pre call mpas_pool_get_dimension(dims, 'nCells', nCells) +! +! Try to determine whether we have used a 'grid.nc' or a 'static.nc' file as input. +! If we are working from a 'grid.nc', we expect that the global maximum of the landmask +! will be 0. +! + call mpas_dmpar_max_int(dminfo, maxval(landmask(1:nCells)), global_max_landmask) + if (global_max_landmask == 0) then + call mpas_dmpar_global_abort('*******************************************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort('The global maximum of the ''landmask'' field is zero, which suggests that this', deferredAbort=.true.) + call mpas_dmpar_global_abort('field was not in the input file. A ''landmask'' field is needed to properly', deferredAbort=.true.) + call mpas_dmpar_global_abort('interpolate surface fields.', deferredAbort=.true.) + call mpas_dmpar_global_abort('Please rerun after specifying a static or initial conditions file as input in', deferredAbort=.true.) + call mpas_dmpar_global_abort('the ''streams.init_atmosphere'' file.', deferredAbort=.true.) + call mpas_dmpar_global_abort('*******************************************************************************') + end if + !open intermediate file: call read_met_init(trim(config_sfc_prefix),.false.,timeString,istatus) if(istatus /= 0) then - write(0,*) '********************************************************************************' - write(0,*) 'Error opening surface file '// & - trim(config_sfc_prefix)//':'//timeString(1:13) - write(0,*) '********************************************************************************' - call mpas_dmpar_abort(dminfo) + call mpas_dmpar_global_abort('********************************************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort('Error opening surface file '//trim(config_sfc_prefix)//':'//timeString(1:13), deferredAbort=.true.) + call mpas_dmpar_global_abort('********************************************************************************') else write(0,*) 'Processing file ',trim(config_sfc_prefix)//':'//timeString(1:13) end if @@ -146,7 +168,7 @@ subroutine interp_sfc_to_MPAS(timeString, mesh, fg, dims, dminfo, config_sfc_pre have_landmask = .false. call read_next_met_field(field,istatus) do while (istatus == 0) - if(index(field % field, 'LANDSEA') /= 0) then + if(trim(field % field) == 'LANDSEA') then have_landmask = .true. if(.not.allocated(maskslab)) allocate(maskslab(-2:field % nx+3, field % ny)) maskslab(1:field % nx, 1:field % ny) = field % slab(1:field % nx, 1:field % ny) @@ -166,11 +188,9 @@ subroutine interp_sfc_to_MPAS(timeString, mesh, fg, dims, dminfo, config_sfc_pre !read sea-surface temperatures and seaice data. open intermediate file: call read_met_init(trim(config_sfc_prefix),.false.,timeString(1:13),istatus) if(istatus /= 0) then - write(0,*) '********************************************************************************' - write(0,*) 'Error opening surface file '// & - trim(config_sfc_prefix)//':'//timeString(1:13) - write(0,*) '********************************************************************************' - call mpas_dmpar_abort(dminfo) + call mpas_dmpar_global_abort('********************************************************************************', deferredAbort=.true.) + call mpas_dmpar_global_abort('Error opening surface file '//trim(config_sfc_prefix)//':'//timeString(1:13), deferredAbort=.true.) + call mpas_dmpar_global_abort('********************************************************************************') end if if(.not. have_landmask) then @@ -184,7 +204,7 @@ subroutine interp_sfc_to_MPAS(timeString, mesh, fg, dims, dminfo, config_sfc_pre do while (istatus == 0) !sea-surface data: - if(index(field % field, 'SKINTEMP') /= 0 .or. index(field % field, 'SST') /= 0) then + if((trim(field % field) == 'SKINTEMP') .or. (trim(field % field) == 'SST')) then ! write(0,*) '... Processing SST:' sst(1:nCells) = 0.0_RKIND destField1d => sst @@ -209,7 +229,7 @@ subroutine interp_sfc_to_MPAS(timeString, mesh, fg, dims, dminfo, config_sfc_pre deallocate(field%slab) !sea-ice data: - else if(index(field % field, 'SEAICE') /= 0) then + else if(trim(field % field) == 'SEAICE') then ! write(0,*) '... Processing SEAICE:' xice(1:nCells) = 0.0_RKIND destField1d => xice diff --git a/src/core_init_atmosphere/mpas_init_atm_vinterp.F b/src/core_init_atmosphere/mpas_init_atm_vinterp.F new file mode 100644 index 0000000000..c5163d2708 --- /dev/null +++ b/src/core_init_atmosphere/mpas_init_atm_vinterp.F @@ -0,0 +1,111 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the +! LICENSE file +! distributed with this code, or at +! http://mpas-dev.github.com/license.html +! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! MODULE INTERP_MODULE +! +! This module provides routines for vertical interpolation. +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +module init_atm_vinterp + use mpas_kind_types + + contains + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! Name: interp_array_from_string + ! + ! Purpose: + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + real (kind=RKIND) function vertical_interp(target_z, nz, zf, order, extrap, surface_val, sealev_val) + + implicit none + + real (kind=RKIND), intent(in) :: target_z + integer, intent(in) :: nz + real (kind=RKIND), dimension(2,nz), intent(in) :: zf ! zf(1,:) is column of vertical coordinate values, zf(2,:) is column of field values + integer, intent(in), optional :: order + integer, intent(in), optional :: extrap + real (kind=RKIND), intent(in), optional :: surface_val + real (kind=RKIND), intent(in), optional :: sealev_val + + integer :: k, lm, lp + real (kind=RKIND) :: wm, wp + real (kind=RKIND) :: slope + + integer :: interp_order, extrap_type + real (kind=RKIND) :: surface, sealevel + + + if (present(order)) then + interp_order = order + else + interp_order = 2 + end if + + if (present(extrap)) then + extrap_type = extrap + else + extrap_type = 1 + end if + + if (present(surface_val)) then + surface = surface_val + else + surface = 200100.0 + end if + + if (present(sealev_val)) then + sealevel = sealev_val + else + sealevel = 201300.0 + end if + + ! + ! Extrapolation required + ! + if (target_z < zf(1,1)) then + if (extrap_type == 0) then + vertical_interp = zf(2,1) + else if (extrap_type == 1) then + slope = (zf(2,2) - zf(2,1)) / (zf(1,2) - zf(1,1)) + vertical_interp = zf(2,1) + slope * (target_z - zf(1,1)) + end if + return + end if + if (target_z >= zf(1,nz)) then + if (extrap_type == 0) then + vertical_interp = zf(2,nz) + else if (extrap_type == 1) then + slope = (zf(2,nz) - zf(2,nz-1)) / (zf(1,nz) - zf(1,nz-1)) + vertical_interp = zf(2,nz) + slope * (target_z - zf(1,nz)) + end if + return + end if + + + ! + ! No extrapolation required + ! + do k=1,nz-1 + if (target_z >= zf(1,k) .and. target_z < zf(1,k+1)) then + lm = k + lp = k+1 + wm = (zf(1,k+1) - target_z) / (zf(1,k+1) - zf(1,k)) + wp = (target_z - zf(1,k)) / (zf(1,k+1) - zf(1,k)) + exit + end if + end do + + vertical_interp = wm*zf(2,lm) + wp*zf(2,lp) + + return + + end function vertical_interp + +end module init_atm_vinterp diff --git a/src/core_landice/Registry.xml b/src/core_landice/Registry.xml index f5d6db45a7..b623ac65a7 100644 --- a/src/core_landice/Registry.xml +++ b/src/core_landice/Registry.xml @@ -1,5 +1,5 @@ - + diff --git a/src/core_landice/mpas_li_core.F b/src/core_landice/mpas_li_core.F index 9e6f85fdf0..ea982b926c 100644 --- a/src/core_landice/mpas_li_core.F +++ b/src/core_landice/mpas_li_core.F @@ -9,6 +9,7 @@ module li_core use mpas_framework use mpas_timekeeping + use mpas_abort, only : mpas_dmpar_global_abort implicit none private @@ -55,7 +56,6 @@ module li_core function li_core_init(domain, startTimeStamp) result(err) - use mpas_configure use mpas_derived_types use mpas_pool_routines use mpas_stream_manager @@ -138,8 +138,6 @@ function li_core_init(domain, startTimeStamp) result(err) ! === Initialize modules === ! === - call mpas_timer_init(domain) - call li_velocity_init(domain, err_tmp) err = ior(err, err_tmp) diff --git a/src/core_landice/mpas_li_core_interface.F b/src/core_landice/mpas_li_core_interface.F index b1c6a4453d..1de7e6fc7f 100644 --- a/src/core_landice/mpas_li_core_interface.F +++ b/src/core_landice/mpas_li_core_interface.F @@ -12,6 +12,8 @@ module li_core_interface use mpas_dmpar use mpas_constants use mpas_io_units + use mpas_attlist + use mpas_abort, only : mpas_dmpar_global_abort use li_core public @@ -42,6 +44,7 @@ subroutine li_setup_core(core)!{{{ core % get_mesh_stream => li_get_mesh_stream core % setup_immutable_streams => li_setup_immutable_streams core % setup_derived_dimensions => li_setup_derived_dimensions + core % setup_decomposed_dimensions => li_setup_decomposed_dimensions core % setup_block => li_setup_block core % setup_namelist => li_setup_namelists @@ -85,11 +88,12 @@ end subroutine li_setup_domain!}}} !> *not* allocated until after this routine is called. ! !----------------------------------------------------------------------- - function li_setup_packages(configPool, packagePool) result(ierr) + function li_setup_packages(configPool, packagePool, iocontext) result(ierr) implicit none type (mpas_pool_type), intent(inout) :: configPool type (mpas_pool_type), intent(inout) :: packagePool + type (mpas_io_context_type), intent(inout) :: iocontext integer :: ierr ierr = 0 diff --git a/src/core_landice/mpas_li_diagnostic_vars.F b/src/core_landice/mpas_li_diagnostic_vars.F index c7cac69921..8186e846b1 100644 --- a/src/core_landice/mpas_li_diagnostic_vars.F +++ b/src/core_landice/mpas_li_diagnostic_vars.F @@ -27,7 +27,6 @@ module li_diagnostic_vars use mpas_derived_types use mpas_pool_routines use mpas_timer - use mpas_configure use li_velocity use li_mask @@ -112,7 +111,6 @@ subroutine li_calculate_diagnostic_vars(domain, timeLevel, solveVelo, err) type (block_type), pointer :: block type (mpas_pool_type), pointer :: meshPool type (mpas_pool_type), pointer :: statePool - type (field2DReal), pointer :: normalVelocityField, layerThicknessEdgeField real (kind=RKIND), dimension(:,:), pointer :: normalVelocity, uReconstructX, uReconstructY, uReconstructZ, uReconstructZonal, uReconstructMeridional integer :: err_tmp !!! integer :: blockVertexMaskChanged, procVertexMaskChanged, anyVertexMaskChanged @@ -180,9 +178,7 @@ subroutine li_calculate_diagnostic_vars(domain, timeLevel, solveVelo, err) ! update halos on velocity call mpas_timer_start("halo updates") - call mpas_pool_get_subpool(domain % blocklist % structs, 'state', statePool) - call mpas_pool_get_field(statePool, 'normalVelocity', normalVelocityField, timeLevel=timeLevel) - call mpas_dmpar_exch_halo_field(normalVelocityField) + call mpas_dmpar_field_halo_exch(domain, 'normalVelocity', timeLevel=timeLevel) call mpas_timer_stop("halo updates") call mpas_timer_stop("velocity solve") @@ -229,9 +225,7 @@ subroutine li_calculate_diagnostic_vars(domain, timeLevel, solveVelo, err) end do call mpas_timer_start("halo updates") - call mpas_pool_get_subpool(domain % blocklist % structs, 'state', statePool) - call mpas_pool_get_field(statePool, 'layerThicknessEdge', layerThicknessEdgeField, timeLevel=timeLevel) - call mpas_dmpar_exch_halo_field(layerThicknessEdgeField) + call mpas_dmpar_field_halo_exch(domain, 'layerThicknessEdge', timeLevel=timeLevel) call mpas_timer_stop("halo updates") call mpas_timer_stop("calc. diagnostic vars except vel") @@ -306,7 +300,6 @@ subroutine diagnostic_solve_before_velocity(domain, timeLevel, err)!{{{ integer, dimension(:), pointer :: cellMask real (kind=RKIND), dimension(:,:), pointer :: layerThickness real (kind=RKIND), dimension(:,:,:), pointer :: tracers - type (field1DInteger), pointer :: cellMaskField, edgeMaskField, vertexMaskField integer, pointer :: nCells real (kind=RKIND), pointer :: config_sea_level, config_ice_density, config_ocean_density real (kind=RKIND) :: thisThk @@ -332,13 +325,9 @@ subroutine diagnostic_solve_before_velocity(domain, timeLevel, err)!{{{ ! Update halos on masks - the outermost cells/edges/vertices may be wrong for mask components that need neighbor information call mpas_timer_start("halo updates") - call mpas_pool_get_subpool(domain % blocklist % structs, 'state', statePool) - call mpas_pool_get_field(statePool, 'cellMask', cellMaskField, timeLevel=timeLevel) - call mpas_pool_get_field(statePool, 'edgeMask', edgeMaskField, timeLevel=timeLevel) - call mpas_pool_get_field(statePool, 'vertexMask', vertexMaskField, timeLevel=timeLevel) - call mpas_dmpar_exch_halo_field(cellMaskField) - call mpas_dmpar_exch_halo_field(edgeMaskField) - call mpas_dmpar_exch_halo_field(vertexMaskField) + call mpas_dmpar_field_halo_exch(domain, 'cellMask', timeLevel=timeLevel) + call mpas_dmpar_field_halo_exch(domain, 'edgeMask', timeLevel=timeLevel) + call mpas_dmpar_field_halo_exch(domain, 'vertexMask', timeLevel=timeLevel) call mpas_timer_stop("halo updates") !!! ! Update beta before the velocity solve occurs, now that we have the new state and its mask. diff --git a/src/core_landice/mpas_li_mask.F b/src/core_landice/mpas_li_mask.F index bdf58947bd..80dfdff8c9 100644 --- a/src/core_landice/mpas_li_mask.F +++ b/src/core_landice/mpas_li_mask.F @@ -23,7 +23,6 @@ module li_mask use mpas_derived_types use mpas_pool_routines - use mpas_configure use mpas_dmpar use li_setup diff --git a/src/core_landice/mpas_li_setup.F b/src/core_landice/mpas_li_setup.F index 11cca1f7d3..6e06c0ebdf 100644 --- a/src/core_landice/mpas_li_setup.F +++ b/src/core_landice/mpas_li_setup.F @@ -23,7 +23,6 @@ module li_setup use mpas_derived_types use mpas_pool_routines use mpas_kind_types - use mpas_configure use mpas_dmpar implicit none diff --git a/src/core_landice/mpas_li_sia.F b/src/core_landice/mpas_li_sia.F index c077735e31..cde7a1810d 100644 --- a/src/core_landice/mpas_li_sia.F +++ b/src/core_landice/mpas_li_sia.F @@ -23,7 +23,6 @@ module li_sia use mpas_derived_types use mpas_pool_routines - use mpas_configure use mpas_dmpar use li_mask use li_setup diff --git a/src/core_landice/mpas_li_tendency.F b/src/core_landice/mpas_li_tendency.F index 22eb3db7e5..ff4af14d3d 100644 --- a/src/core_landice/mpas_li_tendency.F +++ b/src/core_landice/mpas_li_tendency.F @@ -24,7 +24,6 @@ module li_tendency use mpas_derived_types use mpas_pool_routines - use mpas_configure use mpas_constants use mpas_dmpar use li_setup diff --git a/src/core_landice/mpas_li_time_integration.F b/src/core_landice/mpas_li_time_integration.F index 7bfa7f372c..03625d5430 100644 --- a/src/core_landice/mpas_li_time_integration.F +++ b/src/core_landice/mpas_li_time_integration.F @@ -23,7 +23,6 @@ module li_time_integration use mpas_derived_types use mpas_pool_routines - use mpas_configure use mpas_constants use mpas_dmpar use li_time_integration_fe diff --git a/src/core_landice/mpas_li_time_integration_fe.F b/src/core_landice/mpas_li_time_integration_fe.F index 1fe5c40f62..c00db47076 100644 --- a/src/core_landice/mpas_li_time_integration_fe.F +++ b/src/core_landice/mpas_li_time_integration_fe.F @@ -23,12 +23,10 @@ module li_time_integration_fe use mpas_derived_types use mpas_pool_routines - use mpas_configure use mpas_constants use mpas_dmpar use mpas_timer use mpas_vector_reconstruction - use mpas_configure use li_velocity, only: li_velocity_solve use li_tendency use li_diagnostic_vars @@ -194,7 +192,6 @@ subroutine calculate_tendencies(domain, deltat, err) type (mpas_pool_type), pointer :: tendPool real (kind=RKIND), dimension(:,:), pointer :: layerThickness_tend - type (field2DReal), pointer :: layerThickness_tend_field integer :: allowableDtProcNumber, allowableDtMinProcNumber real (kind=RKIND) :: allowableDt, allowableDtMin logical, pointer :: config_print_thickness_advection_info @@ -230,9 +227,7 @@ subroutine calculate_tendencies(domain, deltat, err) ! Now that we have exited the block loop, do any needed halo updates. ! update halos on thickness tend call mpas_timer_start("halo updates") - call mpas_pool_get_subpool(domain % blocklist % structs, 'tend', tendPool) - call mpas_pool_get_field(tendPool, 'layerThickness', layerThickness_tend_field) - call mpas_dmpar_exch_halo_field(layerThickness_tend_field) + call mpas_dmpar_field_halo_exch(domain, 'tend_layerThickness') call mpas_timer_stop("halo updates") ! If we are printing advection debug information, @@ -291,9 +286,7 @@ subroutine calculate_tendencies(domain, deltat, err) !!! case ('None') !=================================================== !!! ! Do nothing - no need to waste time doing a halo update if not advecting tracers! The tendency will be 0 everywhere !!! case default -!!! call mpas_dmpar_exch_halo_field3d_real(dminfo, tracer_tendency, & -!!! size(tracer_tendency,dim=1), mesh % nVertLevels, mesh % nCells, & -!!! domain % blocklist % parinfo % cellsToSend, domain % blocklist % parinfo % cellsToRecv) +!!! call mpas_dmpar_field_halo_exch(domain, 'tendTracers') !!! end select diff --git a/src/core_landice/mpas_li_velocity.F b/src/core_landice/mpas_li_velocity.F index 6c7a51cdbb..0f34efaddf 100644 --- a/src/core_landice/mpas_li_velocity.F +++ b/src/core_landice/mpas_li_velocity.F @@ -25,7 +25,6 @@ module li_velocity use mpas_derived_types use mpas_pool_routines - use mpas_configure !!! use li_lifev use li_sia use li_setup diff --git a/src/core_ocean/Makefile b/src/core_ocean/Makefile index 10a641be58..b5a10764ce 100644 --- a/src/core_ocean/Makefile +++ b/src/core_ocean/Makefile @@ -1,35 +1,29 @@ .SUFFIXES: .F .c .o -OCEAN_SHARED_INCLUDES=-I../shared -I../analysis_members -I../cvmix -I../../framework -I../../external/esmf_time_f90 -I../../operators -OCEAN_LIBRARIES=cvmix/*.o analysis_members/*.o shared/*.o - -ifdef MODE - -ifeq ($(wildcard ./mode_$(MODE)), ) # CHECK FOR EXISTENCE OF MODE DIRECTORY -all: exit - -core_reg: exit - -error_msg: error_head - @echo "$(MODE) is not a valid build mode for the ocean core" - -else # IFEQ ($(wildcard.... +OCEAN_SHARED_INCLUDES = -I$(PWD)/../framework -I$(PWD)/../external/esmf_time_f90 -I$(PWD)/../operators +OCEAN_SHARED_INCLUDES += -I$(PWD)/shared -I$(PWD)/analysis_members -I$(PWD)/cvmix -I$(PWD)/mode_forward -I$(PWD)/mode_analysis all: shared libcvmix analysis_members - (cd mode_$(MODE); $(MAKE) FCINCLUDES="$(FCINCLUDES) $(OCEAN_SHARED_INCLUDES)" ) + (cd mode_forward; $(MAKE) FCINCLUDES="$(FCINCLUDES) $(OCEAN_SHARED_INCLUDES)" all ) + (cd mode_analysis; $(MAKE) FCINCLUDES="$(FCINCLUDES) $(OCEAN_SHARED_INCLUDES)" all ) + (cd driver; $(MAKE) FCINCLUDES="$(FCINCLUDES) $(OCEAN_SHARED_INCLUDES)" all ) if [ -e libdycore.a ]; then \ ($(RM) libdycore.a) \ fi - ar -ru libdycore.a $(OCEAN_LIBRARIES) mode_$(MODE)/*.o + ar -ru libdycore.a `find . -type f -name "*.o"` core_reg: $(CPP) $(CPPFLAGS) $(CPPINCLUDES) Registry.xml > Registry_processed.xml core_input_gen: if [ ! -e default_inputs ]; then mkdir default_inputs; fi - (cd default_inputs; $(NL_GEN) ../Registry_processed.xml namelist.ocean_$(MODE) ) - (cd default_inputs; $(ST_GEN) ../Registry_processed.xml streams.ocean_$(MODE) stream_list.ocean_$(MODE). mutable ) + (cd default_inputs; $(NL_GEN) ../Registry_processed.xml namelist.ocean ) + (cd default_inputs; $(NL_GEN) ../Registry_processed.xml namelist.ocean.forward mode=forward ) + (cd default_inputs; $(NL_GEN) ../Registry_processed.xml namelist.ocean.analysis mode=analysis ) + (cd default_inputs; $(ST_GEN) ../Registry_processed.xml streams.ocean stream_list.ocean. mutable ) + (cd default_inputs; $(ST_GEN) ../Registry_processed.xml streams.ocean.forward stream_list.ocean.forward. mutable mode=forward ) + (cd default_inputs; $(ST_GEN) ../Registry_processed.xml streams.ocean.analysis stream_list.ocean.analysis. mutable mode=analysis ) gen_includes: $(CPP) $(CPPFLAGS) $(CPPINCLUDES) Registry.xml > Registry_processed.xml @@ -41,20 +35,6 @@ post_build: cp default_inputs/* $(ROOT_DIR)/default_inputs/. ( cd $(ROOT_DIR)/default_inputs; for FILE in `ls -1`; do if [ ! -e ../$$FILE ]; then cp $$FILE ../.; fi; done ) - -endif # IFEQ ($(wildcard.... - -else # IFDEF MODE - -all: exit - -core_reg: exit - -error_msg: error_head - @echo "The ocean core requires a build mode." - -endif # IFDEF MODE - cvmix_source: get_cvmix.sh (chmod a+x get_cvmix.sh; ./get_cvmix.sh) (cd cvmix; make clean) @@ -72,31 +52,13 @@ shared: libcvmix analysis_members: libcvmix shared ( cd analysis_members; $(MAKE) FCINCLUDES="$(FCINCLUDES) $(OCEAN_SHARED_INCLUDES)" CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)" all ) -error_head: - @echo "" - @echo "" - @echo "*************************************" - @echo "ERROR" - -error_tail: error_head error_msg - @echo "Available build modes are:" - @ls -d mode_* | grep ".*" | sed "s/mode_/ /g" - @echo "" - @echo "Please specify at build time as follows:" - @echo " make target CORE=ocean MODE=build_mode" - @echo "*************************************" - @echo "" - @echo "" - -exit: error_head error_msg error_tail - @exit 1 - clean: if [ -d cvmix ]; then \ (cd cvmix; make clean) \ fi (cd mode_forward; $(MAKE) clean) (cd mode_analysis; $(MAKE) clean) + (cd driver; $(MAKE) clean) (cd analysis_members; $(MAKE) clean) (cd shared; $(MAKE) clean) ($(RM) *.mod libdycore.a Registry_processed.xml) diff --git a/src/core_ocean/Registry.xml b/src/core_ocean/Registry.xml index c14d66db79..23d39b7844 100644 --- a/src/core_ocean/Registry.xml +++ b/src/core_ocean/Registry.xml @@ -1,5 +1,5 @@ - + - + + + + + + + + - - @@ -71,15 +82,7 @@ possible_values="'gregorian', 'gregorian_noleap'" /> - - - + + + + + + + + - + + + + + - + - + - + - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + - + - + - + - + - + - + - + - + - + + + + + type="input" + filename_template="mesh.nc" + input_interval="initial_only" + immutable="true" + mode="forward;analysis"> @@ -855,12 +881,17 @@ + + + + + immutable="true" + mode="forward;analysis"> @@ -868,12 +899,7 @@ - - - - - + immutable="true" + mode="forward;analysis"> @@ -893,13 +920,14 @@ - - - - + + + + + + runtime_format="single_file" + mode="forward"> @@ -940,7 +969,8 @@ reference_time="0000-01-01_00:00:00" output_interval="0001_00:00:00" clobber_mode="truncate" - runtime_format="single_file"> + runtime_format="single_file" + mode="forward"> @@ -963,7 +993,8 @@ reference_time="0000-01-01_00:00:00" output_interval="0001_00:00:00" clobber_mode="truncate" - runtime_format="single_file"> + runtime_format="single_file" + mode="forward"> @@ -987,7 +1018,8 @@ reference_time="0000-01-01_00:00:00" output_interval="0001_00:00:00" clobber_mode="truncate" - runtime_format="single_file"> + runtime_format="single_file" + mode="forward"> @@ -1015,12 +1047,14 @@ reference_time="0000-01-01_00:00:00" output_interval="0001_00:00:00" clobber_mode="truncate" - runtime_format="single_file"> + runtime_format="single_file" + mode="forward"> + + runtime_format="single_file" + mode="forward"> @@ -1038,10 +1073,10 @@ - + - + @@ -1074,7 +1109,8 @@ reference_time="0000-01-01_00:00:00" output_interval="0001_00:00:00" clobber_mode="truncate" - runtime_format="single_file"> + runtime_format="single_file" + mode="forward"> @@ -1099,7 +1135,8 @@ reference_time="0000-01-01_00:00:00" output_interval="0001_00:00:00" clobber_mode="truncate" - runtime_format="single_file"> + runtime_format="single_file" + mode="forward"> @@ -1137,7 +1174,7 @@ description="layer thickness" /> @@ -1312,21 +1349,27 @@ /> - + @@ -1416,12 +1464,15 @@ - + @@ -1447,7 +1498,7 @@ description="Tracer of 1 extrapolated to ocean surface" /> - + @@ -1460,8 +1511,9 @@ - + - + @@ -1490,9 +1542,11 @@ /> + - + + - - + @@ -1858,8 +2028,18 @@ /> + + - @@ -1968,8 +2150,9 @@ - + @@ -1980,7 +2163,7 @@ description="Time averaged tracer1 extrapolated to ocean surface" /> - + @@ -1988,7 +2171,7 @@ description="Time averaged meridional surface velocity" /> - + diff --git a/src/core_ocean/analysis_members/Makefile b/src/core_ocean/analysis_members/Makefile index 00c20c2791..1b6097cabb 100644 --- a/src/core_ocean/analysis_members/Makefile +++ b/src/core_ocean/analysis_members/Makefile @@ -1,19 +1,25 @@ -.SUFFIXES: .F .o +.SUFFIXES: .F .c .o -OBJS = mpas_ocn_analysis_driver.o \ - mpas_ocn_global_stats.o \ - mpas_ocn_zonal_mean.o +OBJS = mpas_ocn_analysis_driver.o -all: $(OBJS) +MEMBERS = mpas_ocn_global_stats.o \ + mpas_ocn_okubo_weiss.o \ + mpas_ocn_layer_volume_weighted_averages.o \ + mpas_ocn_surface_area_weighted_averages.o \ + mpas_ocn_water_mass_census.o \ + mpas_ocn_meridional_heat_transport.o \ + mpas_ocn_test_compute_interval.o \ + mpas_ocn_high_frequency_output.o \ + mpas_ocn_zonal_mean.o -mpas_ocn_analysis_driver.o: mpas_ocn_global_stats.o mpas_ocn_zonal_mean.o +all: $(OBJS) -mpas_ocn_global_stats.o: +mpas_ocn_analysis_driver.o: $(MEMBERS) -mpas_ocn_zonal_mean.o: +mpas_ocn_okubo_weiss.o: mpas_ocn_okubo_weiss_eigenvalues.o clean: - $(RM) *.o *.i *.mod *.f90 lib*.a + $(RM) *.o *.i *.mod *.f90 .F.o: $(RM) $@ $*.mod @@ -23,3 +29,6 @@ ifeq "$(GEN_F90)" "true" else $(FC) $(CPPFLAGS) $(FFLAGS) -c $*.F $(CPPINCLUDES) $(FCINCLUDES) endif + +.c.o: + $(CC) $(CPPFLAGS) $(CFLAGS) $(CINCLUDES) -c $< diff --git a/src/core_ocean/analysis_members/Registry_TEMPLATE.xml b/src/core_ocean/analysis_members/Registry_TEMPLATE.xml index e2a66059d7..7781514365 100644 --- a/src/core_ocean/analysis_members/Registry_TEMPLATE.xml +++ b/src/core_ocean/analysis_members/Registry_TEMPLATE.xml @@ -1,26 +1,53 @@ - - + + - - + + - + + + + + - @@ -28,11 +55,3 @@ - - - - diff --git a/src/core_ocean/analysis_members/Registry_analysis_members.xml b/src/core_ocean/analysis_members/Registry_analysis_members.xml index 967676d3e7..8359f533a9 100644 --- a/src/core_ocean/analysis_members/Registry_analysis_members.xml +++ b/src/core_ocean/analysis_members/Registry_analysis_members.xml @@ -1,2 +1,9 @@ #include "Registry_global_stats.xml" +#include "Registry_surface_area_weighted_averages.xml" +#include "Registry_water_mass_census.xml" +#include "Registry_layer_volume_weighted_averages.xml" #include "Registry_zonal_mean.xml" +#include "Registry_okubo_weiss.xml" +#include "Registry_meridional_heat_transport.xml" +#include "Registry_test_compute_interval.xml" +#include "Registry_high_frequency_output.xml" diff --git a/src/core_ocean/analysis_members/Registry_global_stats.xml b/src/core_ocean/analysis_members/Registry_global_stats.xml index 6afe97b96d..2332939a3a 100644 --- a/src/core_ocean/analysis_members/Registry_global_stats.xml +++ b/src/core_ocean/analysis_members/Registry_global_stats.xml @@ -1,40 +1,37 @@ - - + - - + + + + - + - - - - - - - - - - - - - - + + + + + + + + + + + + + diff --git a/src/core_ocean/analysis_members/Registry_high_frequency_output.xml b/src/core_ocean/analysis_members/Registry_high_frequency_output.xml new file mode 100644 index 0000000000..fa992c2c54 --- /dev/null +++ b/src/core_ocean/analysis_members/Registry_high_frequency_output.xml @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/core_ocean/analysis_members/Registry_layer_volume_weighted_averages.xml b/src/core_ocean/analysis_members/Registry_layer_volume_weighted_averages.xml new file mode 100644 index 0000000000..4a3ba39a9e --- /dev/null +++ b/src/core_ocean/analysis_members/Registry_layer_volume_weighted_averages.xml @@ -0,0 +1,373 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/core_ocean/analysis_members/Registry_meridional_heat_transport.xml b/src/core_ocean/analysis_members/Registry_meridional_heat_transport.xml new file mode 100644 index 0000000000..fc9d9ca9b2 --- /dev/null +++ b/src/core_ocean/analysis_members/Registry_meridional_heat_transport.xml @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/core_ocean/analysis_members/Registry_okubo_weiss.xml b/src/core_ocean/analysis_members/Registry_okubo_weiss.xml new file mode 100644 index 0000000000..951136698d --- /dev/null +++ b/src/core_ocean/analysis_members/Registry_okubo_weiss.xml @@ -0,0 +1,175 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/core_ocean/analysis_members/Registry_surface_area_weighted_averages.xml b/src/core_ocean/analysis_members/Registry_surface_area_weighted_averages.xml new file mode 100644 index 0000000000..77112b2d73 --- /dev/null +++ b/src/core_ocean/analysis_members/Registry_surface_area_weighted_averages.xml @@ -0,0 +1,365 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/core_ocean/analysis_members/Registry_test_compute_interval.xml b/src/core_ocean/analysis_members/Registry_test_compute_interval.xml new file mode 100644 index 0000000000..c11a0734cb --- /dev/null +++ b/src/core_ocean/analysis_members/Registry_test_compute_interval.xml @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/src/core_ocean/analysis_members/Registry_water_mass_census.xml b/src/core_ocean/analysis_members/Registry_water_mass_census.xml new file mode 100644 index 0000000000..277da0089d --- /dev/null +++ b/src/core_ocean/analysis_members/Registry_water_mass_census.xml @@ -0,0 +1,109 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/core_ocean/analysis_members/Registry_zonal_mean.xml b/src/core_ocean/analysis_members/Registry_zonal_mean.xml index 826724a929..8bb0dc3538 100644 --- a/src/core_ocean/analysis_members/Registry_zonal_mean.xml +++ b/src/core_ocean/analysis_members/Registry_zonal_mean.xml @@ -1,58 +1,49 @@ - - + - - - + + + - - - - + - - - - - - - - - - - - + @@ -77,3 +68,24 @@ /> + + + + + + + + + + + + + + diff --git a/src/core_ocean/analysis_members/mpas_ocn_TEMPLATE.F b/src/core_ocean/analysis_members/mpas_ocn_TEMPLATE.F index 6f954ea0ee..898d67edd3 100644 --- a/src/core_ocean/analysis_members/mpas_ocn_TEMPLATE.F +++ b/src/core_ocean/analysis_members/mpas_ocn_TEMPLATE.F @@ -7,41 +7,48 @@ ! !||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| ! -! ocn_TEMPLATE +! ocn_TEM_PLATE ! -!> \brief MPAS ocean analysis core member: TEMPLATE +!> \brief MPAS ocean analysis mode member: TEM_PLATE !> \author FILL_IN_AUTHOR !> \date FILL_IN_DATE !> \details -!> MPAS ocean analysis core member: TEMPLATE +!> MPAS ocean analysis mode member: TEM_PLATE !> In order to add a new analysis member, do the following: !> 1. Copy these to your new analysis member name: !> cp mpas_ocn_TEMPLATE.F mpas_ocn_your_new_name.F !> cp Registry_ocn_TEMPLATE.xml Registry_ocn_your_new_name.xml -!> +!> !> 2. In those two new files, replace the following text: -!> TEMPLATE, amTemplate, FILL_IN_AUTHOR, FILL_IN_DATE -!> Note TEMPLATE uses underscores, like global_stats, while -!> amTemplate uses caps, e.g. amGlobalStats. +!> tempLate, TEM_PLATE, FILL_IN_AUTHOR, FILL_IN_DATE +!> Typically tempLate uses camel case (variable names), like yourNewName, +!> while TEM_PLATE uses underscores (subroutine names), like your_new_name. +!> note: do not replace 'filename_template' in Registry_ocn_yourNewName.xml !> !> 3. Add a #include line for your registry to !> Registry_analysis_members.xml -!> -!> 4. In mpas_ocn_analysis_driver.F, add calls for your analysis member -!> by copying lines with TEMPLATE. +!> +!> 4. In mpas_ocn_analysis_driver.F, add a use statement for your new analysis member. +!> In addition, add lines for your analysis member, and replace TEM_PLATE +!> and temPlate as described in step 2. There should be 5 places that need additions: +!> - Adding the analysis member name to the analysis member list +!> - Adding an init if test can subroutine call +!> - Adding a compute if test can subroutine call +!> - Adding a restart if test can subroutine call +!> - Adding a finalize if test can subroutine call !> !> 5. In src/core_ocean/analysis_members/Makefile, add your -!> new analysis member everywhere you see -!> mpas_ocn_global_stats.o -!> -! +!> new analysis member to the list of members. See another analysis member +!> in that file for an example. +!> NOTE: If your analysis member depends on other files, add a dependency +!> line for the member and list them there. See okubo weiss for an example. +!> !----------------------------------------------------------------------- -module ocn_TEMPLATE +module ocn_TEM_PLATE use mpas_derived_types use mpas_pool_routines - use mpas_timer use mpas_dmpar use mpas_timekeeping use mpas_stream_manager @@ -65,12 +72,10 @@ module ocn_TEMPLATE ! !-------------------------------------------------------------------- - public :: ocn_setup_packages_TEMPLATE, & - ocn_init_TEMPLATE, & - ocn_init_alarms_TEMPLATE, & - ocn_compute_TEMPLATE, & - ocn_restart_TEMPLATE, & - ocn_finalize_TEMPLATE + public :: ocn_init_TEM_PLATE, & + ocn_compute_TEM_PLATE, & + ocn_restart_TEM_PLATE, & + ocn_finalize_TEM_PLATE !-------------------------------------------------------------------- ! @@ -78,80 +83,24 @@ module ocn_TEMPLATE ! !-------------------------------------------------------------------- - type (timer_node), pointer :: amTemplateTimer - !*********************************************************************** contains !*********************************************************************** ! -! routine ocn_setup_packages_TEMPLATE -! -!> \brief Set up packages for MPAS-Ocean analysis member -!> \author Mark Petersen -!> \date November 2013 -!> \details -!> This routine is intended to configure the packages for this MPAS -!> ocean analysis member -! -!----------------------------------------------------------------------- - - subroutine ocn_setup_packages_TEMPLATE(configPool, packagePool, err)!{{{ - - !----------------------------------------------------------------- - ! - ! input variables - ! - !----------------------------------------------------------------- - type (mpas_pool_type), intent(in) :: configPool - type (mpas_pool_type), intent(in) :: packagePool - - !----------------------------------------------------------------- - ! - ! input/output variables - ! - !----------------------------------------------------------------- - - !----------------------------------------------------------------- - ! - ! output variables - ! - !----------------------------------------------------------------- - - integer, intent(out) :: err !< Output: error flag - - !----------------------------------------------------------------- - ! - ! local variables - ! - !----------------------------------------------------------------- - logical, pointer :: amTemplateActive - - err = 0 - - call mpas_pool_get_package(packagePool, 'amTemplateActive', amTemplateActive) - - ! turn on package for this analysis member - amTemplateActive = .true. - - end subroutine ocn_setup_packages_TEMPLATE!}}} - - -!*********************************************************************** -! -! routine ocn_init_TEMPLATE +! routine ocn_init_TEM_PLATE ! !> \brief Initialize MPAS-Ocean analysis member !> \author FILL_IN_AUTHOR !> \date FILL_IN_DATE -!> \details -!> This routine conducts all initializations required for the +!> \details +!> This routine conducts all initializations required for the !> MPAS-Ocean analysis member. ! !----------------------------------------------------------------------- - subroutine ocn_init_TEMPLATE(domain, err)!{{{ + subroutine ocn_init_TEM_PLATE(domain, err)!{{{ !----------------------------------------------------------------- ! @@ -183,93 +132,22 @@ subroutine ocn_init_TEMPLATE(domain, err)!{{{ err = 0 - end subroutine ocn_init_TEMPLATE!}}} - -!*********************************************************************** -! -! routine ocn_init_alarms_TEMPLATE -! -!> \brief Initialize alarms for MPAS-Ocean analysis member -!> \author Mark Petersen -!> \date November 2013 -!> \details -!> This routine conducts all alarm initializations required for the -!> MPAS-Ocean analysis member. -! -!----------------------------------------------------------------------- - - subroutine ocn_init_alarms_TEMPLATE(clock, startTime, computeAlarmID, err)!{{{ - - !----------------------------------------------------------------- - ! - ! input variables - ! - !----------------------------------------------------------------- - - integer, intent(in) :: computeAlarmID - type (MPAS_Time_Type), intent(in) :: startTime - - !----------------------------------------------------------------- - ! - ! input/output variables - ! - !----------------------------------------------------------------- - - type (MPAS_Clock_type), intent(inout) :: clock - - !----------------------------------------------------------------- - ! - ! output variables - ! - !----------------------------------------------------------------- - - integer, intent(out) :: err !< Output: error flag - - !----------------------------------------------------------------- - ! - ! local variables - ! - !----------------------------------------------------------------- - - integer :: ierr - character(len=StrKIND) :: compute_interval - type (MPAS_Time_Type) :: alarmStartTime - type (MPAS_TimeInterval_type) :: alarmTimeStep - - character (len=StrKIND), pointer :: config_TEMPLATE_compute_interval, config_output_interval - - err = 0 - - call mpas_pool_get_config(ocnConfigs, 'config_TEMPLATE_compute_interval', config_TEMPLATE_compute_interval) - call mpas_pool_get_config(ocnConfigs, 'config_output_interval', config_output_interval) - - ! set compute alarm for this analysis member - if (config_TEMPLATE_compute_interval=='same_as_output') then - compute_interval = config_output_interval - else - compute_interval = config_TEMPLATE_compute_interval - endif - - call mpas_set_timeInterval(alarmTimeStep, timeString=compute_interval, ierr=ierr) - alarmStartTime = startTime + alarmTimeStep - call mpas_add_clock_alarm(clock, computeAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr) - - end subroutine ocn_init_alarms_TEMPLATE!}}} + end subroutine ocn_init_TEM_PLATE!}}} !*********************************************************************** ! -! routine ocn_compute_TEMPLATE +! routine ocn_compute_TEM_PLATE ! !> \brief Compute MPAS-Ocean analysis member !> \author FILL_IN_AUTHOR !> \date FILL_IN_DATE -!> \details +!> \details !> This routine conducts all computation required for this !> MPAS-Ocean analysis member. ! !----------------------------------------------------------------------- - subroutine ocn_compute_TEMPLATE(domain, timeLevel, err)!{{{ + subroutine ocn_compute_TEM_PLATE(domain, timeLevel, err)!{{{ !----------------------------------------------------------------- ! @@ -301,14 +179,14 @@ subroutine ocn_compute_TEMPLATE(domain, timeLevel, err)!{{{ ! !----------------------------------------------------------------- - type (mpas_pool_type), pointer :: amTemplatePool + type (mpas_pool_type), pointer :: temPlateAMPool type (dm_info) :: dminfo type (block_type), pointer :: block type (mpas_pool_type), pointer :: statePool type (mpas_pool_type), pointer :: meshPool type (mpas_pool_type), pointer :: scratchPool type (mpas_pool_type), pointer :: diagnosticsPool - type (mpas_pool_type), pointer :: amTemplate + type (mpas_pool_type), pointer :: temPlateAM ! Here are some example variables which may be needed for your analysis member integer, pointer :: nVertLevels, nCellsSolve, nEdgesSolve, nVerticesSolve, num_tracers @@ -321,15 +199,13 @@ subroutine ocn_compute_TEMPLATE(domain, timeLevel, err)!{{{ dminfo = domain % dminfo - call mpas_timer_start("compute_TEMPLATE", .false., amTemplateTimer) - block => domain % blocklist do while (associated(block)) call mpas_pool_get_subpool(block % structs, 'state', statePool) call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) call mpas_pool_get_subpool(block % structs, 'scratch', scratchPool) call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) - call mpas_pool_get_subpool(block % structs, 'amTemplate', amTemplatePool) + call mpas_pool_get_subpool(block % structs, 'temPlateAM', temPlateAMPool) ! Here are some example variables which may be needed for your analysis member call mpas_pool_get_dimension(statePool, 'num_tracers', num_tracers) @@ -346,7 +222,7 @@ subroutine ocn_compute_TEMPLATE(domain, timeLevel, err)!{{{ call mpas_pool_get_array(meshPool, 'maxLevelEdgeTop', maxLevelEdgeTop) call mpas_pool_get_array(meshPool, 'maxLevelVertexBot', maxLevelVertexBot) - ! Computations which are functions of nCells, nEdges, or nVertices + ! Computations which are functions of nCells, nEdges, or nVertices ! must be placed within this block loop ! Here are some example loops do iCell = 1,nCellsSolve @@ -366,36 +242,34 @@ subroutine ocn_compute_TEMPLATE(domain, timeLevel, err)!{{{ ! call mpas_dmpar_min_real_array(dminfo, nMins, mins(1:nMins), reductions(1:nMins)) ! call mpas_dmpar_max_real_array(dminfo, nMaxes, maxes(1:nMaxes), reductions(1:nMaxes)) - ! Even though some variables do not include an index that is decomposed amongst + ! Even though some variables do not include an index that is decomposed amongst ! domain partitions, we assign them within a block loop so that all blocks have the ! correct values for writing output. block => domain % blocklist do while (associated(block)) - call mpas_pool_get_subpool(block % structs, 'amTemplate', amTemplatePool) + call mpas_pool_get_subpool(block % structs, 'temPlateAM', temPlateAMPool) - ! assignment of final amTemplate variables could occur here. + ! assignment of final temPlateAM variables could occur here. block => block % next end do - call mpas_timer_stop("TEMPLATE", amTemplateTimer) - - end subroutine ocn_compute_TEMPLATE!}}} + end subroutine ocn_compute_TEM_PLATE!}}} !*********************************************************************** ! -! routine ocn_restart_TEMPLATE +! routine ocn_restart_TEM_PLATE ! !> \brief Save restart for MPAS-Ocean analysis member !> \author FILL_IN_AUTHOR !> \date FILL_IN_DATE -!> \details +!> \details !> This routine conducts computation required to save a restart state !> for the MPAS-Ocean analysis member. ! !----------------------------------------------------------------------- - subroutine ocn_restart_TEMPLATE(domain, err)!{{{ + subroutine ocn_restart_TEM_PLATE(domain, err)!{{{ !----------------------------------------------------------------- ! @@ -427,22 +301,22 @@ subroutine ocn_restart_TEMPLATE(domain, err)!{{{ err = 0 - end subroutine ocn_restart_TEMPLATE!}}} + end subroutine ocn_restart_TEM_PLATE!}}} !*********************************************************************** ! -! routine ocn_finalize_TEMPLATE +! routine ocn_finalize_TEM_PLATE ! !> \brief Finalize MPAS-Ocean analysis member !> \author FILL_IN_AUTHOR !> \date FILL_IN_DATE -!> \details +!> \details !> This routine conducts all finalizations required for this !> MPAS-Ocean analysis member. ! !----------------------------------------------------------------------- - subroutine ocn_finalize_TEMPLATE(domain, err)!{{{ + subroutine ocn_finalize_TEM_PLATE(domain, err)!{{{ !----------------------------------------------------------------- ! @@ -474,8 +348,8 @@ subroutine ocn_finalize_TEMPLATE(domain, err)!{{{ err = 0 - end subroutine ocn_finalize_TEMPLATE!}}} + end subroutine ocn_finalize_TEM_PLATE!}}} -end module ocn_TEMPLATE +end module ocn_TEM_PLATE ! vim: foldmethod=marker diff --git a/src/core_ocean/analysis_members/mpas_ocn_analysis_driver.F b/src/core_ocean/analysis_members/mpas_ocn_analysis_driver.F index 249d155f51..bb5e1fbd56 100644 --- a/src/core_ocean/analysis_members/mpas_ocn_analysis_driver.F +++ b/src/core_ocean/analysis_members/mpas_ocn_analysis_driver.F @@ -22,12 +22,21 @@ module ocn_analysis_driver use mpas_derived_types use mpas_pool_routines use mpas_timekeeping + use mpas_timer use mpas_stream_manager + use mpas_abort, only : mpas_dmpar_global_abort use ocn_constants use ocn_global_stats + use ocn_surface_area_weighted_averages + use ocn_layer_volume_weighted_averages use ocn_zonal_mean -! use ocn_TEMPLATE + use ocn_okubo_weiss + use ocn_water_mass_census + use ocn_meridional_heat_transport + use ocn_test_compute_interval + use ocn_high_frequency_output +! use ocn_TEM_PLATE implicit none private @@ -49,7 +58,6 @@ module ocn_analysis_driver ocn_analysis_init, & ocn_analysis_compute_startup, & ocn_analysis_compute, & - ocn_analysis_compute_w_alarms, & ocn_analysis_write, & ocn_analysis_restart, & ocn_analysis_finalize @@ -60,6 +68,16 @@ module ocn_analysis_driver ! !-------------------------------------------------------------------- + + character (len=*), parameter :: initTimerPrefix = 'init_' + character (len=*), parameter :: computeTimerPrefix = 'compute_' + character (len=*), parameter :: writeTimerPrefix = 'write_' + character (len=*), parameter :: alarmTimerPrefix = 'reset_alarm_' + character (len=*), parameter :: restartTimerPrefix = 'restart_' + character (len=*), parameter :: finalizeTimerPrefix = 'finalize_' + character (len=*), parameter :: computeAlarmSuffix = 'CMPALRM' + type (mpas_pool_type), pointer :: analysisMemberList + !*********************************************************************** contains @@ -71,7 +89,7 @@ module ocn_analysis_driver !> \brief Setup packages for MPAS-Ocean analysis driver !> \author Mark Petersen !> \date November 2013 -!> \details +!> \details !> This routine is intended to configure the packages for all !> ocean analysis members. ! @@ -109,29 +127,42 @@ subroutine ocn_analysis_setup_packages(configPool, packagePool, err)!{{{ !----------------------------------------------------------------- integer :: err_tmp - logical, pointer :: config_use_global_stats, config_use_zonal_mean -! logical, pointer :: config_use_TEMPLATE - - err = 0 - - call mpas_pool_get_config(configPool, 'config_use_global_stats', config_use_global_stats) - if (config_use_global_stats) then - call ocn_setup_packages_global_stats(configPool, packagePool, err_tmp) - err = ior(err, err_tmp) - endif + character (len=StrKIND) :: configName, packageName + logical, pointer :: config_AM_enable + logical, pointer :: AMPackageActive + type (mpas_pool_iterator_type) :: poolItr + integer :: nameLength - call mpas_pool_get_config(configPool, 'config_use_zonal_mean', config_use_zonal_mean) - if (config_use_zonal_mean) then - call ocn_setup_packages_zonal_mean(configPool, packagePool, err_tmp) - err = ior(err, err_tmp) - endif + err = 0 -! call mpas_pool_get_config(configPool, 'config_use_TEMPLATE', config_use_TEMPLATE) -! if (config_use_TEMPLATE) then -! call ocn_setup_packages_TEMPLATE(configPool, packagePool, err_tmp) -! err = ior(err, err_tmp) -! endif + call mpas_pool_create_pool(analysisMemberList) + call mpas_pool_add_config(analysisMemberList, 'globalStats', 1) + call mpas_pool_add_config(analysisMemberList, 'testComputeInterval', 1) + call mpas_pool_add_config(analysisMemberList, 'layerVolumeWeightedAverage', 1) + call mpas_pool_add_config(analysisMemberList, 'meridionalHeatTransport', 1) + call mpas_pool_add_config(analysisMemberList, 'okuboWeiss', 1) + call mpas_pool_add_config(analysisMemberList, 'surfaceAreaWeightedAverages', 1) + call mpas_pool_add_config(analysisMemberList, 'waterMassCensus', 1) + call mpas_pool_add_config(analysisMemberList, 'zonalMean', 1) + call mpas_pool_add_config(analysisMemberList, 'highFrequencyOutput', 1) +! call mpas_pool_add_config(analysisMemberList, 'temPlate', 1) + + ! DON'T EDIT BELOW HERE + + ! Iterate over all analysis members to setup packages + call mpas_pool_begin_iteration(analysisMemberList) + do while ( mpas_pool_get_next_member(analysisMemberList, poolItr) ) + nameLength = len_trim(poolItr % memberName) + configName = 'config_AM_' // poolItr % memberName(1:nameLength) // '_enable' + call mpas_pool_get_config(configPool, configName, config_AM_enable) + + if ( config_AM_enable ) then + packageName = poolItr % memberName(1:nameLength) // 'AMPKGActive' + call mpas_pool_get_package(packagePool, packageName, AMPackageActive) + AMPackageActive = .true. + end if + end do end subroutine ocn_analysis_setup_packages!}}} @@ -142,8 +173,8 @@ end subroutine ocn_analysis_setup_packages!}}} !> \brief Initialize MPAS-Ocean analysis driver !> \author Mark Petersen !> \date November 2013 -!> \details -!> This routine calls all initializations required for the +!> \details +!> This routine calls all initializations required for the !> MPAS-Ocean analysis driver. ! !----------------------------------------------------------------------- @@ -179,28 +210,75 @@ subroutine ocn_analysis_init(domain, err)!{{{ !----------------------------------------------------------------- integer :: err_tmp - logical, pointer :: config_use_global_stats, config_use_zonal_mean -! logical, pointer :: config_use_TEMPLATE - err = 0 + character (len=StrKIND) :: configName, alarmName, streamName, timerName + logical, pointer :: config_AM_enable + character (len=StrKIND), pointer :: config_AM_compute_interval, config_AM_stream_name + integer :: nameLength + type (mpas_pool_iterator_type) :: poolItr - call mpas_pool_get_config(domain % configs, 'config_use_global_stats', config_use_global_stats) - if (config_use_global_stats) then - call ocn_init_global_stats(domain, err_tmp) - err = ior(err, err_tmp) - endif + logical :: streamFound + character (len=StrKIND) :: referenceTimeString, outputIntervalString + type (MPAS_Time_Type) :: referenceTime + type (MPAS_TimeInterval_type) :: alarmTimeStep - call mpas_pool_get_config(domain % configs, 'config_use_zonal_mean', config_use_zonal_mean) - if (config_use_zonal_mean) then - call ocn_init_zonal_mean(domain, err_tmp) - err = ior(err, err_tmp) - endif + err = 0 -! call mpas_pool_get_config(domain % configs, 'config_use_TEMPLATE', config_use_TEMPLATE) -! if (config_use_TEMPLATE) then -! call ocn_init_TEMPLATE(domain, err_tmp) -! err = ior(err, err_tmp) -! endif + call mpas_timer_start('analysis_init', .false.) + + call mpas_pool_begin_iteration(analysisMemberList) + do while ( mpas_pool_get_next_member(analysisMemberList, poolItr) ) + nameLength = len_trim(poolItr % memberName) + configName = 'config_AM_' // poolItr % memberName(1:nameLength) // '_enable' + call mpas_pool_get_config(domain % configs, configName, config_AM_enable) + + if ( config_AM_enable ) then + timerName = trim(initTimerPrefix) // poolItr % memberName(1:nameLength) + call mpas_timer_start(timerName, .false.) + call ocn_init_analysis_members(domain, poolItr % memberName, err_tmp) + err = ior(err, err_tmp) + + configName = 'config_AM_' // poolItr % memberName(1:nameLength) // '_compute_interval' + call mpas_pool_get_config(domain % configs, configName, config_AM_compute_interval) + + configName = 'config_AM_' // poolItr % memberName(1:nameLength) // '_stream_name' + call mpas_pool_get_config(domain % configs, configName, config_AM_stream_name) + + if ( config_AM_compute_interval == 'dt' ) then + alarmTimeStep = mpas_get_clock_timestep(domain % clock, err_tmp) + call mpas_get_timeInterval(alarmTimeStep, timeString=config_AM_compute_interval, ierr=err_tmp) + end if + + ! Verify stream exists before trying to use output_interval + if ( config_AM_stream_name /= 'none' ) then + streamFound = .false. + + call mpas_stream_mgr_begin_iteration(domain % streamManager) + do while ( mpas_stream_mgr_get_next_stream(domain % streamManager, streamName) ) + if ( trim(streamName) == trim(config_AM_stream_name) ) then + streamFound = .true. + end if + end do + + if ( .not. streamFound ) then + call mpas_dmpar_global_abort('ERROR: Stream ' // trim(config_AM_stream_name) // ' does not exist. Exiting...') + end if + end if + + + if ( config_AM_compute_interval /= 'output_interval' .and. config_AM_stream_name /= 'none') then + alarmName = poolItr % memberName(1:nameLength) // computeAlarmSuffix + call mpas_set_timeInterval(alarmTimeStep, timeString=config_AM_compute_interval, ierr=err_tmp) + call MPAS_stream_mgr_get_property(domain % streamManager, config_AM_stream_name, MPAS_STREAM_PROPERTY_REF_TIME, referenceTimeString, err_tmp) + call mpas_set_time(referenceTime, dateTimeString=referenceTimeString, ierr=err_tmp) + call mpas_add_clock_alarm(domain % clock, alarmName, referenceTime, alarmTimeStep, ierr=err_tmp) + call mpas_reset_clock_alarm(domain % clock, alarmName, ierr=err_tmp) + end if + call mpas_timer_stop(timerName) + end if + end do + + call mpas_timer_stop('analysis_init') end subroutine ocn_analysis_init!}}} @@ -211,8 +289,8 @@ end subroutine ocn_analysis_init!}}} !> \brief Driver for MPAS-Ocean analysis computations !> \author Mark Petersen !> \date November 2013 -!> \details -!> This routine calls all computation subroutines required for the +!> \details +!> This routine calls all computation subroutines required for the !> MPAS-Ocean analysis driver. ! !----------------------------------------------------------------------- @@ -249,35 +327,54 @@ subroutine ocn_analysis_compute_startup(domain, err)!{{{ integer :: timeLevel, err_tmp - logical, pointer :: config_use_global_stats, config_global_stats_compute_startup - logical, pointer :: config_use_zonal_mean, config_zonal_mean_compute_startup -! logical, pointer :: config_use_TEMPLATE, config_TEMPLATE_compute_startup + character (len=StrKIND) :: configName, timerName + character (len=StrKIND), pointer :: config_AM_stream_name + logical, pointer :: config_AM_enable, config_AM_write_on_startup, config_AM_compute_on_startup + type (mpas_pool_iterator_type) :: poolItr + integer :: nameLength err = 0 - timeLevel=1 - call mpas_pool_get_config(domain % configs, 'config_use_global_stats', config_use_global_stats) - call mpas_pool_get_config(domain % configs, 'config_global_stats_compute_startup', config_global_stats_compute_startup) - if (config_use_global_stats.and.config_global_stats_compute_startup) then - call ocn_compute_global_stats(domain, timeLevel, err_tmp) - call mpas_stream_mgr_write(domain % streamManager, streamID='globalStatsOutput', forceWriteNow=.true., ierr=err_tmp) - err = ior(err, err_tmp) - endif + call mpas_timer_start('analysis_compute', .false.) - call mpas_pool_get_config(domain % configs, 'config_use_zonal_mean', config_use_zonal_mean) - call mpas_pool_get_config(domain % configs, 'config_zonal_mean_compute_startup', config_zonal_mean_compute_startup) - if (config_use_zonal_mean.and.config_zonal_mean_compute_startup) then - call ocn_compute_zonal_mean(domain, timeLevel, err_tmp) - call mpas_stream_mgr_write(domain % streamManager, streamID='zonalMeanOutput', forceWriteNow=.true., ierr=err_tmp) - err = ior(err, err_tmp) - endif + timeLevel=1 -! call mpas_pool_get_config(domain % configs, 'config_use_TEMPLATE', config_use_TEMPLATE) -! call mpas_pool_get_config(domain % configs, 'config_TEMPLATE_compute_startup', config_TEMPLATE_compute_startup) -! if (config_use_TEMPLATE.and.config_TEMPLATE_compute_startup) then -! call ocn_compute_TEMPLATE(domain, timeLevel, err_tmp) -! err = ior(err, err_tmp) -! endif + call mpas_pool_begin_iteration(analysisMemberList) + do while ( mpas_pool_get_next_member(analysisMemberList, poolItr) ) + nameLength = len_trim(poolItr % memberName) + configName = 'config_AM_' // poolItr % memberName(1:nameLength) // '_enable' + call mpas_pool_get_config(domain % configs, configName, config_AM_enable) + + if ( config_AM_enable ) then + configName = 'config_AM_' // poolItr % memberName(1:nameLength) // '_compute_on_startup' + call mpas_pool_get_config(domain % configs, configName, config_AM_compute_on_startup) + configName = 'config_AM_' // poolItr % memberName(1:nameLength) // '_write_on_startup' + call mpas_pool_get_config(domain % configs, configName, config_AM_write_on_startup) + + if ( config_AM_compute_on_startup ) then + timerName = trim(computeTimerPrefix) // poolItr % memberName(1:nameLength) + call mpas_timer_start(timerName, .false.) + call ocn_compute_analysis_members(domain, timeLevel, poolItr % memberName, err_tmp) + call mpas_timer_stop(timerName) + err = ior(err, err_tmp) + + if ( config_AM_write_on_startup ) then + configName = 'config_AM_' // poolItr % memberName(1:nameLength) // '_stream_name' + call mpas_pool_get_config(domain % configs, configName, config_AM_stream_name) + if ( config_AM_stream_name /= 'none' ) then + call mpas_stream_mgr_write(domain % streamManager, streamID=config_AM_stream_name, forceWriteNow=.true., ierr=err_tmp) + end if + end if + else + if ( config_AM_write_on_startup ) then + write(stderrUnit, *) ' *** WARNING: write_on_startup called without compute_on_startup for analysis member: ' & + // poolItr % memberName(1:nameLength) // '. Skipping output...' + end if + end if + end if + end do + + call mpas_timer_stop('analysis_compute') end subroutine ocn_analysis_compute_startup!}}} @@ -288,8 +385,8 @@ end subroutine ocn_analysis_compute_startup!}}} !> \brief Driver for MPAS-Ocean analysis computations !> \author Mark Petersen !> \date November 2013 -!> \details -!> This routine calls all computation subroutines required for the +!> \details +!> This routine calls all computation subroutines required for the !> MPAS-Ocean analysis driver. ! !----------------------------------------------------------------------- @@ -326,102 +423,54 @@ subroutine ocn_analysis_compute(domain, err)!{{{ integer :: timeLevel, err_tmp - logical, pointer :: config_use_global_stats, config_use_zonal_mean -! logical, pointer :: config_use_TEMPLATE + character (len=StrKIND) :: configName, alarmName, timerName + character (len=StrKIND), pointer :: config_AM_stream_name, config_AM_compute_interval + logical, pointer :: config_AM_enable + type (mpas_pool_iterator_type) :: poolItr + integer :: nameLength err = 0 - timeLevel=1 - call mpas_pool_get_config(domain % configs, 'config_use_global_stats', config_use_global_stats) - if (config_use_global_stats) then - call ocn_compute_global_stats(domain, timeLevel, err_tmp) - endif + call mpas_timer_start('analysis_compute', .false.) - call mpas_pool_get_config(domain % configs, 'config_use_zonal_mean', config_use_zonal_mean) - if (config_use_zonal_mean) then - call ocn_compute_zonal_mean(domain, timeLevel, err_tmp) - endif + timeLevel=1 -! call mpas_pool_get_config(domain % configs, 'config_use_TEMPLATE', config_use_TEMPLATE) -! if (config_use_TEMPLATE) then -! call ocn_compute_TEMPLATE(domain, timeLevel, err_tmp) -! endif + call mpas_pool_begin_iteration(analysisMemberList) + do while ( mpas_pool_get_next_member(analysisMemberList, poolItr) ) + nameLength = len_trim(poolItr % memberName) + configName = 'config_AM_' // poolItr % memberName(1:nameLength) // '_enable' + call mpas_pool_get_config(domain % configs, configName, config_AM_enable) + + if ( config_AM_enable ) then + configName = 'config_AM_' // poolItr % memberName(1:nameLength) // '_compute_interval' + call mpas_pool_get_config(domain % configs, configName, config_AM_compute_interval) + configName = 'config_AM_' // poolItr % memberName(1:nameLength) // '_stream_name' + call mpas_pool_get_config(domain % configs, configName, config_AM_stream_name) + + ! Build name of alarm for analysis member + alarmName = poolItr % memberName(1:nameLength) // computeAlarmSuffix + timerName = trim(computeTimerPrefix) // poolItr % memberName(1:nameLength) + + ! Compute analysis member just before output + if ( config_AM_compute_interval == 'output_interval' .and. config_AM_stream_name /= 'none') then + if ( mpas_stream_mgr_ringing_alarms(domain % streamManager, streamID=config_AM_stream_name, direction=MPAS_STREAM_OUTPUT, ierr=err_tmp) ) then + call mpas_timer_start(timerName, .false.) + call ocn_compute_analysis_members(domain, timeLevel, poolItr % memberName, err_tmp) + call mpas_timer_stop(timerName) + end if + else if ( mpas_is_alarm_ringing(domain % clock, alarmName, ierr=err_tmp) ) then + call mpas_reset_clock_alarm(domain % clock, alarmName, ierr=err_tmp) + call mpas_timer_start(timerName, .false.) + call ocn_compute_analysis_members(domain, timeLevel, poolItr % memberName, err_tmp) + call mpas_timer_stop(timerName) + end if + end if + end do + + call mpas_timer_stop('analysis_compute') end subroutine ocn_analysis_compute!}}} -!*********************************************************************** -! -! routine ocn_analysis_compute_w_alarms -! -!> \brief Driver for MPAS-Ocean analysis computations -!> \author Mark Petersen -!> \date November 2013 -!> \details -!> This routine calls all computation subroutines required for the -!> MPAS-Ocean analysis driver. -! -!----------------------------------------------------------------------- - - subroutine ocn_analysis_compute_w_alarms(domain, err)!{{{ - - !----------------------------------------------------------------- - ! - ! input variables - ! - !----------------------------------------------------------------- - - !----------------------------------------------------------------- - ! - ! input/output variables - ! - !----------------------------------------------------------------- - - type (domain_type), intent(inout) :: domain - - !----------------------------------------------------------------- - ! - ! output variables - ! - !----------------------------------------------------------------- - - integer, intent(out) :: err !< Output: error flag - - !----------------------------------------------------------------- - ! - ! local variables - ! - !----------------------------------------------------------------- - - integer :: timeLevel, err_tmp - logical, pointer :: config_use_global_stats, config_use_zonal_mean -! logical, pointer :: config_use_TEMPLATE - - err = 0 - - timeLevel=1 - call mpas_pool_get_config(domain % configs, 'config_use_global_stats', config_use_global_stats) - if (config_use_global_stats) then - if (mpas_stream_mgr_ringing_alarms(domain % streamManager, streamID='globalStatsOutput', direction=MPAS_STREAM_OUTPUT, ierr=err_tmp)) then - call ocn_compute_global_stats(domain, timeLevel, err_tmp) - endif - endif - - call mpas_pool_get_config(domain % configs, 'config_use_zonal_mean', config_use_zonal_mean) - if (config_use_zonal_mean) then - if (mpas_stream_mgr_ringing_alarms(domain % streamManager, streamID='zonalMeanOutput', direction=MPAS_STREAM_OUTPUT, ierr=err_tmp)) then - call ocn_compute_zonal_mean(domain, timeLevel, err_tmp) - endif - endif - -! call mpas_pool_get_config(domain % configs, 'config_use_TEMPLATE', config_use_TEMPLATE) -! if (config_use_TEMPLATE) then -! if (mpas_stream_mgr_ringing_alarms(domain % streamManager, streamID='TEMPLATEOutput', direction=MPAS_STREAM_OUTPUT, ierr=err_tmp) then -! call ocn_compute_TEMPLATE(domain, timeLevel, err_tmp) -! endif -! endif - - end subroutine ocn_analysis_compute_w_alarms!}}} - !*********************************************************************** ! ! routine ocn_analysis_restart @@ -429,7 +478,7 @@ end subroutine ocn_analysis_compute_w_alarms!}}} !> \brief Save restart for MPAS-Ocean analysis driver !> \author Mark Petersen !> \date November 2013 -!> \details +!> \details !> This routine calls all subroutines required to prepare to save !> the restart state for the MPAS-Ocean analysis driver. ! @@ -466,28 +515,32 @@ subroutine ocn_analysis_restart(domain, err)!{{{ !----------------------------------------------------------------- integer :: err_tmp - logical, pointer :: config_use_global_stats, config_use_zonal_mean -! logical, pointer :: config_use_TEMPLATE + + character (len=StrKIND) :: configName, timerName + type (mpas_pool_iterator_type) :: poolItr + logical, pointer :: config_AM_enable + integer :: nameLength err = 0 - call mpas_pool_get_config(domain % configs, 'config_use_global_stats', config_use_global_stats) - if (config_use_global_stats) then - call ocn_restart_global_stats(domain, err_tmp) - err = ior(err, err_tmp) - endif + call mpas_timer_start('analysis_restart', .false.) - call mpas_pool_get_config(domain % configs, 'config_use_zonal_mean', config_use_zonal_mean) - if (config_use_zonal_mean) then - call ocn_restart_zonal_mean(domain, err_tmp) - err = ior(err, err_tmp) - endif + call mpas_pool_begin_iteration(analysisMemberList) + do while ( mpas_pool_get_next_member(analysisMemberList, poolItr) ) + nameLength = len_trim(poolItr % memberName) + configName = 'config_AM_' // poolItr % memberName(1:nameLength) // '_enable' + call mpas_pool_get_config(domain % configs, configName, config_AM_enable) -! call mpas_pool_get_config(domain % configs, 'config_use_TEMPLATE', config_use_TEMPLATE) -! if (config_use_TEMPLATE) then -! call ocn_restart_TEMPLATE(domain, err_tmp) -! err = ior(err, err_tmp) -! endif + if ( config_AM_enable ) then + timerName = trim(restartTimerPrefix) // poolItr % memberName(1:nameLength) + call mpas_timer_start(timerName, .false.) + call ocn_restart_analysis_members(domain, poolItr % memberName, err_tmp) + err = ior(err, err_tmp) + call mpas_timer_stop(timerName) + end if + end do + + call mpas_timer_stop('analysis_restart') end subroutine ocn_analysis_restart!}}} @@ -498,15 +551,15 @@ end subroutine ocn_analysis_restart!}}} !> \brief Driver for MPAS-Ocean analysis output !> \author Mark Petersen !> \date November 2013 -!> \details -!> This routine calls all output writing subroutines required for the +!> \details +!> This routine calls all output writing subroutines required for the !> MPAS-Ocean analysis driver. !> At this time this is just a stub, and all analysis output is written !> to the output file specified by config_output_name. ! !----------------------------------------------------------------------- - subroutine ocn_analysis_write(streamManager, err)!{{{ + subroutine ocn_analysis_write(domain, err)!{{{ !----------------------------------------------------------------- ! @@ -514,7 +567,7 @@ subroutine ocn_analysis_write(streamManager, err)!{{{ ! !----------------------------------------------------------------- - type (MPAS_streamManager_type), intent(inout) :: streamManager + type (domain_type), intent(in) :: domain !----------------------------------------------------------------- ! @@ -538,30 +591,39 @@ subroutine ocn_analysis_write(streamManager, err)!{{{ integer :: err_tmp - logical, pointer :: config_use_global_stats, config_use_zonal_mean -! logical, pointer :: config_use_TEMPLATE + character (len=StrKIND) :: configName, timerName + character (len=StrKIND), pointer :: config_AM_stream_name + logical, pointer :: config_AM_enable + type (mpas_pool_iterator_type) :: poolItr + integer :: nameLength err = 0 - call mpas_pool_get_config(ocnConfigs, 'config_use_global_stats', config_use_global_stats) - if (config_use_global_stats) then - call mpas_stream_mgr_write(streamManager, streamID='globalStatsOutput', ierr=err_tmp) - call mpas_stream_mgr_reset_alarms(streamManager, streamID='globalStatsOutput', ierr=err_tmp) - err = ior(err, err_tmp) - endif - - call mpas_pool_get_config(ocnConfigs, 'config_use_zonal_mean', config_use_zonal_mean) - if (config_use_zonal_mean) then - call mpas_stream_mgr_write(streamManager, streamID='zonalMeanOutput', ierr=err_tmp) - call mpas_stream_mgr_reset_alarms(streamManager, streamID='zonalMeanOutput', ierr=err_tmp) - endif - -! call mpas_pool_get_config(ocnConfigs, 'config_use_TEMPLATE', config_use_TEMPLATE) -! if (config_use_TEMPLATE) then -! call mpas_stream_mgr_write(streamManager, streamID='TEMPLATEOutput', ierr=err_tmp) -! call mpas_stream_mgr_reset_alarms(streamManager, streamID='TEMPLATEOutput', ierr=err_tmp) -! err = ior(err, err_tmp) -! endif + call mpas_timer_start('analysis_write', .false.) + + call mpas_pool_begin_iteration(analysisMemberList) + do while ( mpas_pool_get_next_member(analysisMemberList, poolItr) ) + nameLength = len_trim(poolItr % memberName) + configName = 'config_AM_' // poolItr % memberName(1:nameLength) // '_enable' + call mpas_pool_get_config(domain % configs, configName, config_AM_enable) + + if ( config_AM_enable ) then + configName = 'config_AM_' // poolItr % memberName(1:nameLength) // '_stream_name' + call mpas_pool_get_config(domain % configs, configName, config_AM_stream_name) + if ( config_AM_stream_name /= 'none' ) then + timerName = trim(writeTimerPrefix) // poolItr % memberName(1:nameLength) + call mpas_timer_start(timerName, .false.) + call mpas_stream_mgr_write(domain % streamManager, streamID=config_AM_stream_name, ierr=err_tmp) + call mpas_timer_stop(timerName) + timerName = trim(alarmTimerPrefix) // poolItr % memberName(1:nameLength) + call mpas_timer_start(timerName, .false.) + call mpas_stream_mgr_reset_alarms(domain % streamManager, streamID=config_AM_stream_name, ierr=err_tmp) + call mpas_timer_stop(timerName) + end if + end if + end do + + call mpas_timer_stop('analysis_write') end subroutine ocn_analysis_write!}}} @@ -572,8 +634,8 @@ end subroutine ocn_analysis_write!}}} !> \brief Finalize MPAS-Ocean analysis driver !> \author Mark Petersen !> \date November 2013 -!> \details -!> This routine calls all finalize routines required for the +!> \details +!> This routine calls all finalize routines required for the !> MPAS-Ocean analysis driver. ! !----------------------------------------------------------------------- @@ -609,31 +671,229 @@ subroutine ocn_analysis_finalize(domain, err)!{{{ !----------------------------------------------------------------- integer :: err_tmp - logical, pointer :: config_use_global_stats, config_use_zonal_mean -! logical, pointer :: config_use_TEMPLATE + + character (len=StrKIND) :: configName, timerName + logical, pointer :: config_AM_enable + type (mpas_pool_iterator_type) :: poolItr + integer :: nameLength err = 0 - call mpas_pool_get_config(domain % configs, 'config_use_global_stats', config_use_global_stats) - if (config_use_global_stats) then - call ocn_finalize_global_stats(domain, err_tmp) - err = ior(err, err_tmp) - endif + call mpas_timer_start('analysis_finalize', .false.) - call mpas_pool_get_config(domain % configs, 'config_use_zonal_mean', config_use_zonal_mean) - if (config_use_zonal_mean) then - call ocn_finalize_zonal_mean(domain, err_tmp) - err = ior(err, err_tmp) - endif + call mpas_pool_begin_iteration(analysisMemberList) + + do while ( mpas_pool_get_next_member(analysisMemberList, poolItr) ) + nameLength = len_trim(poolItr % memberName) + configName = 'config_AM_' // poolItr % memberName(1:nameLength) // '_enable' + call mpas_pool_get_config(domain % configs, configName, config_AM_enable) + + if ( config_AM_enable ) then + timerName = trim(finalizeTimerPrefix) // poolItr % memberName(1:nameLength) + call mpas_timer_start(timerName, .false.) + call ocn_finalize_analysis_members(domain, poolItr % memberName, err_tmp) + err = ior(err, err_tmp) + call mpas_timer_stop(timerName) + end if + end do -! call mpas_pool_get_config(domain % configs, 'config_use_TEMPLATE', config_use_TEMPLATE) -! if (config_use_TEMPLATE) then -! call ocn_finalize_TEMPLATE(domain, err_tmp) -! err = ior(err, err_tmp) -! endif + call mpas_timer_stop('analysis_finalize') end subroutine ocn_analysis_finalize!}}} +!*********************************************************************** +! +! routine ocn_init_analysis_members +! +!> \brief Analysis member initialization driver +!> \author Doug Jacobsen +!> \date 07/01/2015 +!> \details +!> This private routine calls the correct init routine for each analysis member. +! +!----------------------------------------------------------------------- + subroutine ocn_init_analysis_members(domain, analysisMemberName, iErr)!{{{ + type (domain_type), intent(inout) :: domain !< Input: Domain information + character (len=*), intent(in) :: analysisMemberName !< Input: Name of analysis member + integer, intent(out) :: iErr !< Output: Error code + + integer :: nameLength, err_tmp + + iErr = 0 + + nameLength = len_trim(analysisMemberName) + + if ( analysisMemberName(1:nameLength) == 'globalStats' ) then + call ocn_init_global_stats(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'testComputeInterval' ) then + call ocn_init_test_compute_interval(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'layerVolumeWeightedAverage' ) then + call ocn_init_layer_volume_weighted_averages(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'meridionalHeatTransport' ) then + call ocn_init_meridional_heat_transport(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'okuboWeiss' ) then + call ocn_init_okubo_weiss(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'surfaceAreaWeightedAverages' ) then + call ocn_init_surface_area_weighted_averages(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'waterMassCensus' ) then + call ocn_init_water_mass_census(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'zonalMean' ) then + call ocn_init_zonal_mean(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'highFrequencyOutput' ) then + call ocn_init_high_frequency_output(domain, err_tmp) +! else if ( analysisMemberName(1:nameLength) == 'temPlate' ) then +! call ocn_init_TEM_PLATE(domain, err_tmp) + end if + + iErr = ior(iErr, err_tmp) + + end subroutine ocn_init_analysis_members!}}} + +!*********************************************************************** +! +! routine ocn_compute_analysis_members +! +!> \brief Analysis member compute driver +!> \author Doug Jacobsen +!> \date 07/01/2015 +!> \details +!> This private routine calls the correct compute routine for each analysis member. +! +!----------------------------------------------------------------------- + subroutine ocn_compute_analysis_members(domain, timeLevel, analysisMemberName, iErr)!{{{ + type (domain_type), intent(inout) :: domain !< Input: Domain information + integer, intent(in) :: timeLevel !< Input: Time level to compute with in analysis member + character (len=*), intent(in) :: analysisMemberName !< Input: Name of analysis member + integer, intent(out) :: iErr !< Output: Error code + + integer :: nameLength, err_tmp + + iErr = 0 + + nameLength = len_trim(analysisMemberName) + + if ( analysisMemberName(1:nameLength) == 'globalStats' ) then + call ocn_compute_global_stats(domain, timeLevel, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'testComputeInterval' ) then + call ocn_compute_test_compute_interval(domain, timeLevel, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'layerVolumeWeightedAverage' ) then + call ocn_compute_layer_volume_weighted_averages(domain, timeLevel, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'meridionalHeatTransport' ) then + call ocn_compute_meridional_heat_transport(domain, timeLevel, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'okuboWeiss' ) then + call ocn_compute_okubo_weiss(domain, timeLevel, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'surfaceAreaWeightedAverages' ) then + call ocn_compute_surface_area_weighted_averages(domain, timeLevel, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'waterMassCensus' ) then + call ocn_compute_water_mass_census(domain, timeLevel, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'zonalMean' ) then + call ocn_compute_zonal_mean(domain, timeLevel, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'highFrequencyOutput' ) then + call ocn_compute_high_frequency_output(domain, timeLevel, err_tmp) +! else if ( analysisMemberName(1:nameLength) == 'temPlate' ) then +! call ocn_compute_TEM_PLATE(domain, timeLevel, err_tmp) + end if + + iErr = ior(iErr, err_tmp) + + end subroutine ocn_compute_analysis_members!}}} + +!*********************************************************************** +! +! routine ocn_restart_analysis_members +! +!> \brief Analysis member restart driver +!> \author Doug Jacobsen +!> \date 07/01/2015 +!> \details +!> This private routine calls the correct restart routine for each analysis member. +! +!----------------------------------------------------------------------- + subroutine ocn_restart_analysis_members(domain, analysisMemberName, iErr)!{{{ + type (domain_type), intent(inout) :: domain !< Input: Domain information + character (len=*), intent(in) :: analysisMemberName !< Input: Name of analysis member + integer, intent(out) :: iErr !< Output: Error code + + integer :: nameLength, err_tmp + + iErr = 0 + + nameLength = len_trim(analysisMemberName) + + if ( analysisMemberName(1:nameLength) == 'globalStats' ) then + call ocn_restart_global_stats(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'testComputeInterval' ) then + call ocn_restart_test_compute_interval(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'layerVolumeWeightedAverage' ) then + call ocn_restart_layer_volume_weighted_averages(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'meridionalHeatTransport' ) then + call ocn_restart_meridional_heat_transport(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'okuboWeiss' ) then + call ocn_restart_okubo_weiss(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'surfaceAreaWeightedAverages' ) then + call ocn_restart_surface_area_weighted_averages(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'waterMassCensus' ) then + call ocn_restart_water_mass_census(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'zonalMean' ) then + call ocn_restart_zonal_mean(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'highFrequencyOutput' ) then + call ocn_restart_high_frequency_output(domain, err_tmp) +! else if ( analysisMemberName(1:nameLength) == 'temPlate' ) then +! call ocn_restart_TEM_PLATE(domain, err_tmp) + end if + + iErr = ior(iErr, err_tmp) + + end subroutine ocn_restart_analysis_members!}}} + +!*********************************************************************** +! +! routine ocn_finalize_analysis_members +! +!> \brief Analysis member finalize driver +!> \author Doug Jacobsen +!> \date 07/01/2015 +!> \details +!> This private routine calls the correct finalize routine for each analysis member. +! +!----------------------------------------------------------------------- + subroutine ocn_finalize_analysis_members(domain, analysisMemberName, iErr)!{{{ + type (domain_type), intent(inout) :: domain !< Input: Domain information + character (len=*), intent(in) :: analysisMemberName !< Input: Name of analysis member + integer, intent(out) :: iErr !< Output: Error code + + integer :: nameLength, err_tmp + + iErr = 0 + + nameLength = len_trim(analysisMemberName) + + if ( analysisMemberName(1:nameLength) == 'globalStats' ) then + call ocn_finalize_global_stats(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'testComputeInterval' ) then + call ocn_finalize_test_compute_interval(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'layerVolumeWeightedAverage' ) then + call ocn_finalize_layer_volume_weighted_averages(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'meridionalHeatTransport' ) then + call ocn_finalize_meridional_heat_transport(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'okuboWeiss' ) then + call ocn_finalize_okubo_weiss(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'surfaceAreaWeightedAverages' ) then + call ocn_finalize_surface_area_weighted_averages(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'waterMassCensus' ) then + call ocn_finalize_water_mass_census(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'zonalMean' ) then + call ocn_finalize_zonal_mean(domain, err_tmp) + else if ( analysisMemberName(1:nameLength) == 'highFrequencyOutput' ) then + call ocn_finalize_high_frequency_output(domain, err_tmp) +! else if ( analysisMemberName(1:nameLength) == 'temPlate' ) then +! call ocn_finalize_TEM_PLATE(domain, err_tmp) + end if + + iErr = ior(iErr, err_tmp) + + end subroutine ocn_finalize_analysis_members!}}} + end module ocn_analysis_driver ! vim: foldmethod=marker diff --git a/src/core_ocean/analysis_members/mpas_ocn_global_stats.F b/src/core_ocean/analysis_members/mpas_ocn_global_stats.F index e918b26ce6..c989190b72 100644 --- a/src/core_ocean/analysis_members/mpas_ocn_global_stats.F +++ b/src/core_ocean/analysis_members/mpas_ocn_global_stats.F @@ -21,7 +21,6 @@ module ocn_global_stats use mpas_derived_types use mpas_pool_routines - use mpas_timer use mpas_dmpar use mpas_timekeeping use mpas_stream_manager @@ -45,8 +44,7 @@ module ocn_global_stats ! !-------------------------------------------------------------------- - public :: ocn_setup_packages_global_stats, & - ocn_init_global_stats, & + public :: ocn_init_global_stats, & ocn_compute_global_stats, & ocn_restart_global_stats, & ocn_finalize_global_stats @@ -57,68 +55,10 @@ module ocn_global_stats ! !-------------------------------------------------------------------- - type (timer_node), pointer :: amGlobalStatsTimer - !*********************************************************************** contains -!*********************************************************************** -! -! routine ocn_setup_packages_global_stats -! -!> \brief Set up packages for MPAS-Ocean analysis member -!> \author Mark Petersen -!> \date November 2013 -!> \details -!> This routine is intended to configure the packages for this MPAS -!> ocean analysis member -! -!----------------------------------------------------------------------- - - subroutine ocn_setup_packages_global_stats(configPool, packagePool, err)!{{{ - - !----------------------------------------------------------------- - ! - ! input variables - ! - !----------------------------------------------------------------- - - type (mpas_pool_type), intent(in) :: configPool - type (mpas_pool_type), intent(in) :: packagePool - - !----------------------------------------------------------------- - ! - ! input/output variables - ! - !----------------------------------------------------------------- - - !----------------------------------------------------------------- - ! - ! output variables - ! - !----------------------------------------------------------------- - - integer, intent(out) :: err !< Output: error flag - - !----------------------------------------------------------------- - ! - ! local variables - ! - !----------------------------------------------------------------- - - logical, pointer :: amGlobalStatsActive - - err = 0 - - call mpas_pool_get_package(packagePool, 'amGlobalStatsActive', amGlobalStatsActive) - - ! turn on package for this analysis member - amGlobalStatsActive = .true. - - end subroutine ocn_setup_packages_global_stats!}}} - - !*********************************************************************** ! ! routine ocn_init_global_stats @@ -126,8 +66,8 @@ end subroutine ocn_setup_packages_global_stats!}}} !> \brief Initialize MPAS-Ocean analysis member !> \author Mark Petersen !> \date November 2013 -!> \details -!> This routine conducts all initializations required for the +!> \details +!> This routine conducts all initializations required for the !> MPAS-Ocean analysis member. ! !----------------------------------------------------------------------- @@ -162,10 +102,73 @@ subroutine ocn_init_global_stats(domain, err)!{{{ ! !----------------------------------------------------------------- + type (dm_info), pointer :: dminfo + logical, pointer :: config_AM_globalStats_text_file + character (len=StrKIND), pointer :: config_AM_globalStats_directory + integer :: fileID, i + err = 0 + call mpas_pool_get_config(domain % configs, 'config_AM_globalStats_directory', config_AM_globalStats_directory) + call mpas_pool_get_config(domain % configs, 'config_AM_globalStats_text_file', config_AM_globalStats_text_file) + if (config_AM_globalStats_text_file) then + dminfo => domain % dminfo + if (dminfo % my_proc_id == IO_NODE) then + fileID = getFreeUnit() + open(fileID,file=trim(config_AM_globalStats_directory)//'/stats_readme.txt',STATUS='UNKNOWN', POSITION='rewind') + + write (fileID,'(a)') 'readme file for MPAS-Ocean global statistics' + write (fileID,'(/,a)') 'stats_time.txt. contains: timeIndex, timestamp, CFLNumberGlobal' + write (fileID,'(/,a)') 'All other stats_*.txt. contain the following columns. Rows correspond to timestamps in rows of stats_time.txt' + write (fileID,'(a)') "See user's guide for units associated with these variables." + + i=1 + write (fileID,'(i5,a)') i,'. time, in days, using a 360 day calendar'; i=i+1 + write (fileID,'(i5,a)') i,'. layerThickness'; i=i+1 + write (fileID,'(i5,a)') i,'. normalVelocity'; i=i+1 + write (fileID,'(i5,a)') i,'. tangentialVelocity'; i=i+1 + write (fileID,'(i5,a)') i,'. layerThicknessEdge'; i=i+1 + write (fileID,'(i5,a)') i,'. relativeVorticity'; i=i+1 + write (fileID,'(i5,a)') i,'. enstrophy = relativeVorticity**2'; i=i+1 + write (fileID,'(i5,a)') i,'. kineticEnergyCell'; i=i+1 + write (fileID,'(i5,a)') i,'. normalizedAbsoluteVorticity = (relative vorticity + planetary vorticity)/layer thickness'; i=i+1 + write (fileID,'(i5,a)') i,'. pressure'; i=i+1 + write (fileID,'(i5,a)') i,'. montgomeryPotential'; i=i+1 + write (fileID,'(i5,a)') i,'. vertVelocityTop vertical velocity'; i=i+1 + write (fileID,'(i5,a)') i,'. vertAleTransportTop vertical transport'; i=i+1 + write (fileID,'(i5,a)') i,'. lowFreqDivergence'; i=i+1 + write (fileID,'(i5,a)') i,'. highFreqThickness'; i=i+1 + write (fileID,'(i5,a)') i,'. Tracers: usually T, S, then others in remaining columns' + + write (fileID,'(/,a)') 'A chain of simple unix commands may be used to access a specific part of the data. For example,' + write (fileID,'(a)') 'to view the last three values of column seven in the global average, use:' + write (fileID,'(a)') "cat stats_avg.txt | awk '{print $7}' | tail -n3" + + close (fileID) + endif + + endif + end subroutine ocn_init_global_stats!}}} + integer function getFreeUnit()!{{{ + implicit none + + integer :: index + logical :: isOpened + + getFreeUnit = 0 + do index = 1,99 + if((index /= 5) .and. (index /= 6)) then + inquire(unit = index, opened = isOpened) + if( .not. isOpened) then + getFreeUnit = index + return + end if + end if + end do + end function getFreeUnit!}}} + !*********************************************************************** ! ! routine ocn_compute_global_stats @@ -173,7 +176,7 @@ end subroutine ocn_init_global_stats!}}} !> \brief Compute MPAS-Ocean analysis member !> \author Mark Petersen !> \date November 2013 -!> \details +!> \details !> This routine conducts all computation required for this !> MPAS-Ocean analysis member. ! @@ -211,7 +214,7 @@ subroutine ocn_compute_global_stats(domain, timeLevel, err)!{{{ ! !----------------------------------------------------------------- - type (mpas_pool_type), pointer :: amGlobalStatsPool + type (mpas_pool_type), pointer :: globalStatsAMPool type (dm_info) :: dminfo type (block_type), pointer :: block type (mpas_pool_type), pointer :: statePool @@ -237,36 +240,36 @@ subroutine ocn_compute_global_stats(domain, timeLevel, err)!{{{ normalizedRelativeVorticityEdge, normalizedPlanetaryVorticityEdge, pressure, montgomeryPotential, vertAleTransportTop, vertVelocityTop, & lowFreqDivergence, highFreqThickness, density real (kind=RKIND), dimension(:,:,:), pointer :: tracers - + real (kind=RKIND), dimension(:), pointer :: minGlobalStats,maxGlobalStats,sumGlobalStats, averages, rms, verticalSumMins, verticalSumMaxes real (kind=RKIND), dimension(kMaxVariables) :: sumSquares, reductions, sums, mins, maxes real (kind=RKIND), dimension(kMaxVariables) :: sums_tmp, sumSquares_tmp, mins_tmp, maxes_tmp, averages_tmp, verticalSumMins_tmp, verticalSumMaxes_tmp real (kind=RKIND), dimension(:,:), allocatable :: enstrophy, normalizedAbsoluteVorticity, workArray - logical, pointer :: thicknessFilterActive, amGlobalStatsActive + logical, pointer :: thicknessFilterActive, globalStatsAMPKGActive + logical, pointer :: config_AM_globalStats_text_file + character (len=StrKIND), pointer :: config_AM_globalStats_directory err = 0 - call mpas_pool_get_package(ocnPackages, 'amGlobalStatsActive', amGlobalStatsActive) + call mpas_pool_get_package(ocnPackages, 'globalStatsAMPKGActive', globalStatsAMPKGActive) - if ( .not. amGlobalStatsActive ) return + if ( .not. globalStatsAMPKGActive ) return dminfo = domain % dminfo - call mpas_timer_start("compute_global_stats", .false., amGlobalStatsTimer) - call mpas_pool_get_package(ocnPackages, 'thicknessFilterActive', thicknessFilterActive) ! write out data to Analysis Member output - call mpas_pool_get_subpool(domain % blocklist % structs, 'amGlobalStats', amGlobalStatsPool) - call mpas_pool_get_array(amGlobalStatsPool, 'minGlobalStats', minGlobalStats) - call mpas_pool_get_array(amGlobalStatsPool, 'maxGlobalStats', maxGlobalStats) - call mpas_pool_get_array(amGlobalStatsPool, 'sumGlobalStats', sumGlobalStats) - call mpas_pool_get_array(amGlobalStatsPool, 'rmsGlobalStats', rms) - call mpas_pool_get_array(amGlobalStatsPool, 'avgGlobalStats', averages) - call mpas_pool_get_array(amGlobalStatsPool, 'vertSumMinGlobalStats', verticalSumMins) - call mpas_pool_get_array(amGlobalStatsPool, 'vertSumMaxGlobalStats', verticalSumMaxes) + call mpas_pool_get_subpool(domain % blocklist % structs, 'globalStatsAM', globalStatsAMPool) + call mpas_pool_get_array(globalStatsAMPool, 'minGlobalStats', minGlobalStats) + call mpas_pool_get_array(globalStatsAMPool, 'maxGlobalStats', maxGlobalStats) + call mpas_pool_get_array(globalStatsAMPool, 'sumGlobalStats', sumGlobalStats) + call mpas_pool_get_array(globalStatsAMPool, 'rmsGlobalStats', rms) + call mpas_pool_get_array(globalStatsAMPool, 'avgGlobalStats', averages) + call mpas_pool_get_array(globalStatsAMPool, 'vertSumMinGlobalStats', verticalSumMins) + call mpas_pool_get_array(globalStatsAMPool, 'vertSumMaxGlobalStats', verticalSumMaxes) sums = 0.0 sumSquares = 0.0 @@ -288,24 +291,24 @@ subroutine ocn_compute_global_stats(domain, timeLevel, err)!{{{ call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) call mpas_pool_get_subpool(block % structs, 'scratch', scratchPool) call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) - call mpas_pool_get_subpool(block % structs, 'amGlobalStats', amGlobalStatsPool) - + call mpas_pool_get_subpool(block % structs, 'globalStatsAM', globalStatsAMPool) + call mpas_pool_get_dimension(statePool, 'num_tracers', num_tracers) - call mpas_pool_get_array(meshPool, 'areaCell', areaCell) - call mpas_pool_get_array(meshPool, 'dcEdge', dcEdge) - call mpas_pool_get_array(meshPool, 'dvEdge', dvEdge) - call mpas_pool_get_array(meshPool, 'areaTriangle', areaTriangle) - call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) - call mpas_pool_get_array(meshPool, 'maxLevelEdgeTop', maxLevelEdgeTop) + call mpas_pool_get_array(meshPool, 'areaCell', areaCell) + call mpas_pool_get_array(meshPool, 'dcEdge', dcEdge) + call mpas_pool_get_array(meshPool, 'dvEdge', dvEdge) + call mpas_pool_get_array(meshPool, 'areaTriangle', areaTriangle) + call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) + call mpas_pool_get_array(meshPool, 'maxLevelEdgeTop', maxLevelEdgeTop) call mpas_pool_get_array(meshPool, 'maxLevelVertexBot', maxLevelVertexBot) - call mpas_pool_get_array(statePool, 'layerThickness', layerThickness) - call mpas_pool_get_array(statePool, 'normalVelocity', normalVelocity) - call mpas_pool_get_array(statePool, 'tracers', tracers) + call mpas_pool_get_array(statePool, 'layerThickness', layerThickness, 1) + call mpas_pool_get_array(statePool, 'normalVelocity', normalVelocity, 1) + call mpas_pool_get_array(statePool, 'tracers', tracers, 1) if(thicknessFilterActive) then - call mpas_pool_get_array(statePool, 'lowFreqDivergence', lowFreqDivergence) - call mpas_pool_get_array(statePool, 'highFreqThickness', highFreqThickness) + call mpas_pool_get_array(statePool, 'lowFreqDivergence', lowFreqDivergence, 1) + call mpas_pool_get_array(statePool, 'highFreqThickness', highFreqThickness, 1) end if call mpas_pool_get_array(diagnosticsPool, 'density', density) @@ -490,7 +493,7 @@ subroutine ocn_compute_global_stats(domain, timeLevel, err)!{{{ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) end if - + ! highFreqThickness variableIndex = variableIndex + 1 if (thicknessFilterActive) then @@ -662,7 +665,7 @@ subroutine ocn_compute_global_stats(domain, timeLevel, err)!{{{ variableIndex = variableIndex + 1 averages(variableIndex) = sums(variableIndex)/volumeCellGlobal rms(variableIndex) = sqrt(sumSquares(variableIndex)/volumeCellGlobal) - + ! highFreqThickness variableIndex = variableIndex + 1 averages(variableIndex) = sums(variableIndex)/volumeCellGlobal @@ -672,7 +675,7 @@ subroutine ocn_compute_global_stats(domain, timeLevel, err)!{{{ variableIndex = variableIndex + 1 averages(variableIndex) = 0.0_RKIND rms(variableIndex) = 0.0_RKIND - + ! highFreqThickness variableIndex = variableIndex + 1 averages(variableIndex) = 0.0_RKIND @@ -690,7 +693,43 @@ subroutine ocn_compute_global_stats(domain, timeLevel, err)!{{{ maxGlobalStats(1:nVariables) = maxes(1:nVariables) sumGlobalStats(1:nVariables) = sums(1:nVariables) - call mpas_timer_stop("global_stats", amGlobalStatsTimer) + call mpas_pool_get_config(domain % configs, 'config_AM_globalStats_text_file', config_AM_globalStats_text_file) + call mpas_pool_get_config(domain % configs, 'config_AM_globalStats_directory', config_AM_globalStats_directory) + if (config_AM_globalStats_text_file) then + + call mpas_pool_get_subpool(domain % blocklist % structs, 'diagnostics', diagnosticsPool) + call mpas_pool_get_array(diagnosticsPool, 'xtime', xtime) + + ! write out the data to files + if (dminfo % my_proc_id == IO_NODE) then + fileID = getFreeUnit() + open(fileID,file=trim(config_AM_globalStats_directory)//'/stats_min.txt',STATUS='UNKNOWN', POSITION='append') + write (fileID,'(100es24.14)') time_days, mins(1:nVariables) + close (fileID) + open(fileID,file=trim(config_AM_globalStats_directory)//'/stats_max.txt',STATUS='UNKNOWN', POSITION='append') + write (fileID,'(100es24.14)') time_days, maxes(1:nVariables) + close (fileID) + open(fileID,file=trim(config_AM_globalStats_directory)//'/stats_sum.txt',STATUS='UNKNOWN', POSITION='append') + write (fileID,'(100es24.14)') time_days, sums(1:nVariables) + close (fileID) + open(fileID,file=trim(config_AM_globalStats_directory)//'/stats_rms.txt',STATUS='UNKNOWN', POSITION='append') + write (fileID,'(100es24.14)') time_days, rms(1:nVariables) + close (fileID) + open(fileID,file=trim(config_AM_globalStats_directory)//'/stats_avg.txt',STATUS='UNKNOWN', POSITION='append') + write (fileID,'(100es24.14)') time_days, averages(1:nVariables) + close (fileID) + open(fileID,file=trim(config_AM_globalStats_directory)//'/stats_time.txt',STATUS='UNKNOWN', POSITION='append') + write (fileID,'(a)') trim(xtime) + close (fileID) + open(fileID,file=trim(config_AM_globalStats_directory)//'/stats_colmin.txt',STATUS='UNKNOWN', POSITION='append') + write (fileID,'(100es24.14)') verticalSumMins(1:nVariables) + close (fileID) + open(fileID,file=trim(config_AM_globalStats_directory)//'/stats_colmax.txt',STATUS='UNKNOWN', POSITION='append') + write (fileID,'(100es24.14)') verticalSumMaxes(1:nVariables) + close (fileID) + end if + + endif end subroutine ocn_compute_global_stats!}}} @@ -701,7 +740,7 @@ end subroutine ocn_compute_global_stats!}}} !> \brief Save restart for MPAS-Ocean analysis member !> \author Mark Petersen !> \date November 2013 -!> \details +!> \details !> This routine conducts computation required to save a restart state !> for the MPAS-Ocean analysis member. ! @@ -748,7 +787,7 @@ end subroutine ocn_restart_global_stats!}}} !> \brief Finalize MPAS-Ocean analysis member !> \author Mark Petersen !> \date November 2013 -!> \details +!> \details !> This routine conducts all finalizations required for this !> MPAS-Ocean analysis member. ! diff --git a/src/core_ocean/analysis_members/mpas_ocn_high_frequency_output.F b/src/core_ocean/analysis_members/mpas_ocn_high_frequency_output.F new file mode 100644 index 0000000000..38e3675394 --- /dev/null +++ b/src/core_ocean/analysis_members/mpas_ocn_high_frequency_output.F @@ -0,0 +1,319 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| +! +! ocn_high_frequency_output +! +!> \brief MPAS ocean analysis mode member: high_frequency_output +!> \author Todd Ringler +!> \date 2015/06/12 +!> \details +!> MPAS ocean analysis mode member: high_frequency_output +!> +!----------------------------------------------------------------------- + +module ocn_high_frequency_output + + use mpas_derived_types + use mpas_pool_routines + use mpas_dmpar + use mpas_timekeeping + use mpas_stream_manager + + use ocn_constants + use ocn_diagnostics_routines + + implicit none + private + save + + !-------------------------------------------------------------------- + ! + ! Public parameters + ! + !-------------------------------------------------------------------- + + !-------------------------------------------------------------------- + ! + ! Public member functions + ! + !-------------------------------------------------------------------- + + public :: ocn_init_high_frequency_output, & + ocn_compute_high_frequency_output, & + ocn_restart_high_frequency_output, & + ocn_finalize_high_frequency_output + + !-------------------------------------------------------------------- + ! + ! Private module variables + ! + !-------------------------------------------------------------------- + +!*********************************************************************** + +contains + +!*********************************************************************** +! +! routine ocn_init_high_frequency_output +! +!> \brief Initialize MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date 2015/06/12 +!> \details +!> This routine conducts all initializations required for the +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_init_high_frequency_output(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_init_high_frequency_output!}}} + +!*********************************************************************** +! +! routine ocn_compute_high_frequency_output +! +!> \brief Compute MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date 2015/06/12 +!> \details +!> This routine conducts all computation required for this +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_compute_high_frequency_output(domain, timeLevel, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + integer, intent(in) :: timeLevel + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + type (dm_info) :: dminfo + type (block_type), pointer :: block + type (mpas_pool_type), pointer :: meshPool + type (mpas_pool_type), pointer :: statePool + type (mpas_pool_type), pointer :: diagnosticsPool + type (mpas_pool_type), pointer :: forcingPool + type (mpas_pool_type), pointer :: highFrequencyOutputAMPool + + integer :: iLevel, iLevelTarget + integer, pointer :: nVertLevels + real (kind=RKIND), dimension(:), pointer :: refBottomDepth, kineticEnergyAt100m, relativeVorticityAt100m + real (kind=RKIND), dimension(:,:), pointer :: kineticEnergyCell, relativeVorticityCell, tracersAtSurface + real (kind=RKIND), dimension(:,:,:), pointer :: tracers + + err = 0 + + dminfo = domain % dminfo + + block => domain % blocklist + do while (associated(block)) + ! get dimensions + call mpas_pool_get_dimension(block % dimensions, 'nVertLevels', nVertLevels) + + ! get pointers to pools + call mpas_pool_get_subpool(block % structs, 'state', statePool) + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) + call mpas_pool_get_subpool(block % structs, 'forcing', forcingPool) + call mpas_pool_get_subpool(domain % blocklist % structs, 'highFrequencyOutputAM', highFrequencyOutputAMPool) + + ! get static data from mesh pool + call mpas_pool_get_array(meshPool, 'refBottomDepth', refBottomDepth) + + ! get arrays that will be 'sliced' and put into high frequency output + call mpas_pool_get_array(diagnosticsPool, 'kineticEnergyCell', kineticEnergyCell, timeLevel) + call mpas_pool_get_array(diagnosticsPool, 'relativeVorticityCell', relativeVorticityCell, timeLevel) + call mpas_pool_get_array(statePool, 'tracers', tracers, timeLevel) + + ! get arrays that can be written to output at high freqency + call mpas_pool_get_array(highFrequencyOutputAMPool, 'kineticEnergyAt100m', kineticEnergyAt100m) + call mpas_pool_get_array(highFrequencyOutputAMPool, 'relativeVorticityAt100m', relativeVorticityAt100m) + call mpas_pool_get_array(highFrequencyOutputAMPool, 'tracersAtSurface', tracersAtSurface) + + ! + ! note for future build out + ! HERE: interpolate vertically to target z-level or isopycnal or some other surface + ! + + ! for now, just get close enough + iLevelTarget = 1 + do iLevel=2,nVertLevels + if(refBottomDepth(iLevel) > 100.0_RKIND) then + iLevelTarget = iLevel-1 + exit + endif + enddo + + ! copy data into high frequency output fields + ! tracer data will be converted to new tracer infrastrcture (and this line removed) before June 23 2015. + kineticEnergyAt100m(:) = kineticEnergyCell(iLevelTarget,:) + relativeVorticityAt100m(:) = relativeVorticityCell(iLevelTarget,:) + tracersAtSurface(1,:) = tracers(1,1,:) + tracersAtSurface(2,:) = tracers(2,1,:) + + block => block % next + end do + + end subroutine ocn_compute_high_frequency_output!}}} + +!*********************************************************************** +! +! routine ocn_restart_high_frequency_output +! +!> \brief Save restart for MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date 2015/06/12 +!> \details +!> This routine conducts computation required to save a restart state +!> for the MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_restart_high_frequency_output(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_restart_high_frequency_output!}}} + +!*********************************************************************** +! +! routine ocn_finalize_high_frequency_output +! +!> \brief Finalize MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date 2015/06/12 +!> \details +!> This routine conducts all finalizations required for this +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_finalize_high_frequency_output(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_finalize_high_frequency_output!}}} + +end module ocn_high_frequency_output + +! vim: foldmethod=marker diff --git a/src/core_ocean/analysis_members/mpas_ocn_layer_volume_weighted_averages.F b/src/core_ocean/analysis_members/mpas_ocn_layer_volume_weighted_averages.F new file mode 100644 index 0000000000..0d77586c53 --- /dev/null +++ b/src/core_ocean/analysis_members/mpas_ocn_layer_volume_weighted_averages.F @@ -0,0 +1,626 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| +! +! ocn_layer_volume_weighted_averages +! +!> \brief MPAS ocean analysis member: horizonal layer volume weighted averages at each vertical level +!> \author Todd Ringler +!> \date April 24, 2015 +!> \details +!> MPAS ocean analysis member: layer_volume_weighted_averages +! +!----------------------------------------------------------------------- + +module ocn_layer_volume_weighted_averages + + use mpas_derived_types + use mpas_pool_routines + use mpas_dmpar + use mpas_timekeeping + use mpas_stream_manager + + use ocn_constants + use ocn_diagnostics_routines + + implicit none + private + save + + !-------------------------------------------------------------------- + ! + ! Public parameters + ! + !-------------------------------------------------------------------- + + !-------------------------------------------------------------------- + ! + ! Public member functions + ! + !-------------------------------------------------------------------- + + public :: ocn_init_layer_volume_weighted_averages, & + ocn_compute_layer_volume_weighted_averages, & + ocn_restart_layer_volume_weighted_averages, & + ocn_finalize_layer_volume_weighted_averages + + !-------------------------------------------------------------------- + ! + ! Private module variables + ! + !-------------------------------------------------------------------- + +!*********************************************************************** + +contains + +!*********************************************************************** +! +! routine ocn_init_layer_volume_weighted_averages +! +!> \brief Initialize MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date April 24, 2015 +!> \details +!> This routine conducts all initializations required for the +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_init_layer_volume_weighted_averages(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_init_layer_volume_weighted_averages!}}} + +!*********************************************************************** +! +! routine ocn_compute_layer_volume_weighted_averages +! +!> \brief Compute MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date April 24, 2015 +!> \details +!> This routine conducts all computation required for this +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_compute_layer_volume_weighted_averages(domain, timeLevel, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + integer, intent(in) :: timeLevel + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + type (dm_info) :: dminfo + type (block_type), pointer :: block + type (mpas_pool_type), pointer :: layerVolumeWeightedAverageAMPool + type (mpas_pool_type), pointer :: statePool + type (mpas_pool_type), pointer :: meshPool + type (mpas_pool_type), pointer :: scratchPool + type (mpas_pool_type), pointer :: diagnosticsPool + type (mpas_pool_type), pointer :: forcingPool + + real (kind=RKIND), dimension(:,:,:), pointer :: minValueWithinOceanLayerRegion + real (kind=RKIND), dimension(:,:,:), pointer :: maxValueWithinOceanLayerRegion + real (kind=RKIND), dimension(:,:,:), pointer :: avgValueWithinOceanLayerRegion + real (kind=RKIND), dimension(:,:), pointer :: minValueWithinOceanVolumeRegion + real (kind=RKIND), dimension(:,:), pointer :: maxValueWithinOceanVolumeRegion + real (kind=RKIND), dimension(:,:), pointer :: avgValueWithinOceanVolumeRegion + + ! pointers to data in pools to be analyzed + real (kind=RKIND), dimension(:,:), pointer :: layerThickness + real (kind=RKIND), dimension(:,:), pointer :: density + real (kind=RKIND), dimension(:,:), pointer :: potentialDensity + real (kind=RKIND), dimension(:,:), pointer :: BruntVaisalaFreqTop + real (kind=RKIND), dimension(:,:), pointer :: velocityZonal + real (kind=RKIND), dimension(:,:), pointer :: velocityMeridional + real (kind=RKIND), dimension(:,:), pointer :: vertVelocityTop + real (kind=RKIND), dimension(:,:,:), pointer :: tracers + real (kind=RKIND), dimension(:,:), pointer :: kineticEnergyCell + real (kind=RKIND), dimension(:,:), pointer :: relativeVorticityCell + real (kind=RKIND), dimension(:,:), pointer :: divergence + + ! pointers to data in mesh pool + integer, pointer :: nVertLevels, nCells, nCellsSolve, nLayerVolWeightedAvgFields, nOceanRegionsTmp + integer, pointer :: indexTemperature, indexSalinity + integer, dimension(:), pointer :: maxLevelCell + real (kind=RKIND), dimension(:), pointer :: areaCell, lonCell, latCell + + ! scratch space + type(field2DReal), pointer :: workArrayField + real (kind=RKIND), dimension(:,:), pointer :: workArray + type(field1DReal), pointer :: workMaskField, workMinField, workMaxField, workSumField + real (kind=RKIND), dimension(:), pointer :: workMask, workMin, workMax, workSum + + ! local variables + integer :: iDataField, nDefinedDataFields + integer :: iCell, iLevel, iRegion, iTracer, err_tmp + + ! package flag + logical, pointer :: layerVolumeWeightedAverageAMPKGActive + + ! buffers data for message passaging + integer :: kBuffer, kBufferLength + real (kind=RKIND), dimension(:), allocatable :: workBufferSum, workBufferSumReduced + real (kind=RKIND), dimension(:), allocatable :: workBufferMin, workBufferMinReduced + real (kind=RKIND), dimension(:), allocatable :: workBufferMax, workBufferMaxReduced + + ! assume no error + err = 0 + + ! set highest level pointer + dminfo = domain % dminfo + + ! find the number of regions, number of data fields and number of vertical levels + call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nOceanRegionsTmp', nOceanRegionsTmp) + call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nLayerVolWeightedAvgFields', nLayerVolWeightedAvgFields) + call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nVertLevels', nVertLevels) + + ! allocate buffer for message passing + kBuffer=0 + kBufferLength=nOceanRegionsTmp*nLayerVolWeightedAvgFields*nVertLevels + allocate(workBufferSum(kBufferLength)) + allocate(workBufferMin(kBufferLength)) + allocate(workBufferMax(kBufferLength)) + allocate(workBufferSumReduced(kBufferLength)) + allocate(workBufferMinReduced(kBufferLength)) + allocate(workBufferMaxReduced(kBufferLength)) + workBufferSum=0.0_RKIND + workBufferMin=0.0_RKIND + workBufferMax=0.0_RKIND + workBufferSumReduced=0.0_RKIND + workBufferMinReduced=0.0_RKIND + workBufferMaxReduced=0.0_RKIND + + ! get pointers to analysis member arrays + call mpas_pool_get_subpool(domain % blocklist % structs, 'layerVolumeWeightedAverageAM', layerVolumeWeightedAverageAMPool) + call mpas_pool_get_array(layerVolumeWeightedAverageAMPool, 'minValueWithinOceanLayerRegion', minValueWithinOceanLayerRegion) + call mpas_pool_get_array(layerVolumeWeightedAverageAMPool, 'maxValueWithinOceanLayerRegion', maxValueWithinOceanLayerRegion) + call mpas_pool_get_array(layerVolumeWeightedAverageAMPool, 'avgValueWithinOceanLayerRegion', avgValueWithinOceanLayerRegion) + call mpas_pool_get_array(layerVolumeWeightedAverageAMPool, 'minValueWithinOceanVolumeRegion', minValueWithinOceanVolumeRegion) + call mpas_pool_get_array(layerVolumeWeightedAverageAMPool, 'maxValueWithinOceanVolumeRegion', maxValueWithinOceanVolumeRegion) + call mpas_pool_get_array(layerVolumeWeightedAverageAMPool, 'avgValueWithinOceanVolumeRegion', avgValueWithinOceanVolumeRegion) + + ! loop over blocks + ! NOTE: code is not valid for multiple blocks ! + block => domain % blocklist + do while (associated(block)) + + ! get pointers to scratch variables + call mpas_pool_get_subpool(block % structs, 'layerVolumeWeightedAverageAMScratch', scratchPool) + call mpas_pool_get_field(scratchPool, 'workArrayLayerVolume', workArrayField) + call mpas_pool_get_field(scratchPool, 'workMaskLayerVolume', workMaskField) + call mpas_pool_get_field(scratchPool, 'workMinLayerVolume', workMinField) + call mpas_pool_get_field(scratchPool, 'workMaxLayerVolume', workMaxField) + call mpas_pool_get_field(scratchPool, 'workSumLayerVolume', workSumField) + call mpas_allocate_scratch_field(workArrayField, .true.) + call mpas_allocate_scratch_field(workMaskField, .true.) + call mpas_allocate_scratch_field(workMinField, .true.) + call mpas_allocate_scratch_field(workMaxField, .true.) + call mpas_allocate_scratch_field(workSumField, .true.) + workArray => workArrayField % array + workMask => workMaskField % array + workMin => workMinField % array + workMax => workMaxField % array + workSum => workSumField % array + + ! get pointers to pools + call mpas_pool_get_subpool(block % structs, 'state', statePool) + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) + call mpas_pool_get_subpool(block % structs, 'forcing', forcingPool) + + ! get pointers to mesh + call mpas_pool_get_dimension(block % dimensions, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(block % dimensions, 'nCells', nCells) + call mpas_pool_get_dimension(block % dimensions, 'nVertLevels', nVertLevels) + call mpas_pool_get_dimension(block % dimensions, 'nLayerVolWeightedAvgFields', nLayerVolWeightedAvgFields) + call mpas_pool_get_dimension(block % dimensions, 'nOceanRegionsTmp', nOceanRegionsTmp) + call mpas_pool_get_dimension(statePool, 'index_temperature', indexTemperature) + call mpas_pool_get_dimension(statePool, 'index_salinity', indexSalinity) + call mpas_pool_get_array(meshPool, 'areaCell', areaCell) + call mpas_pool_get_array(meshPool, 'lonCell', lonCell) + call mpas_pool_get_array(meshPool, 'latCell', latCell) + call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) + + ! test to make sure the arrays are big enough + nDefinedDataFields = size(avgValueWithinOceanLayerRegion,dim=1) + if (nDefinedDataFields > nLayerVolWeightedAvgFields) then + write (stderrUnit,*) 'Abort: nDefinedDataFields > nLayerVolWeightedAvgFields' + write (stderrUnit,*) ' : increase size of ocn_layer_volume_weighted_averages scratch space' + call mpas_dmpar_abort(dminfo) + endif + + ! get pointers to data that will be analyzed + ! listed in the order in which the fields appear in {avg,min,max}ValueWithinOceanLayerRegion + call mpas_pool_get_array(statePool, 'layerThickness', layerThickness, 1) + call mpas_pool_get_array(diagnosticsPool, 'density', density) + call mpas_pool_get_array(diagnosticsPool, 'potentialDensity', potentialDensity) + call mpas_pool_get_array(diagnosticsPool, 'BruntVaisalaFreqTop', BruntVaisalaFreqTop) + call mpas_pool_get_array(diagnosticsPool, 'velocityZonal', velocityZonal) + call mpas_pool_get_array(diagnosticsPool, 'velocityMeridional', velocityMeridional) + call mpas_pool_get_array(diagnosticsPool, 'vertVelocityTop', vertVelocityTop) + call mpas_pool_get_array(statePool, 'tracers', tracers, 1) + call mpas_pool_get_array(diagnosticsPool, 'kineticEnergyCell', kineticEnergyCell) + call mpas_pool_get_array(diagnosticsPool, 'relativeVorticityCell', relativeVorticityCell) + call mpas_pool_get_array(diagnosticsPool, 'divergence', divergence) + + ! initialize buffers + workBufferSum(:) = 0.0_RKIND + workBufferMin(:) = +1.0e20_RKIND + workBufferMax(:) = -1.0e20_RKIND + + ! loop over all ocean regions + do iRegion=1,nOceanRegionsTmp + + ! loop over the vertical + do iLevel=1,nVertLevels + + ! compute mask + call compute_mask(iLevel, maxLevelCell, nCells, nCellsSolve, iRegion, lonCell, latCell, workMask) + + ! copy data into work array + workArray( :,:) = 0.0 + workArray( 1,:) = workMask(:) + workArray( 2,:) = areaCell(:) + workArray( 3,:) = layerThickness(iLevel,:) + workArray( 4,:) = density(iLevel,:) + workArray( 5,:) = potentialDensity(iLevel,:) + workArray( 6,:) = BruntVaisalaFreqTop(iLevel,:) + workArray( 7,:) = velocityZonal(iLevel,:) + workArray( 8,:) = velocityMeridional(iLevel,:) + workArray( 9,:) = vertVelocityTop(iLevel,:) + workArray(10,:) = tracers(indexTemperature,iLevel,:) + workArray(11,:) = tracers(indexSalinity,iLevel,:) + workArray(12,:) = kineticEnergyCell(iLevel,:) + workArray(13,:) = relativeVorticityCell(iLevel,:) + workArray(14,:) = divergence(iLevel,:) + workArray(15,:) = relativeVorticityCell(iLevel,:)*relativeVorticityCell(iLevel,:) + + call compute_statistics(nDefinedDataFields, nCellsSolve, workArray, workMask, workMin, workMax, workSum) + + ! store data in buffer in order to allow only three dmpar calls + do iDataField=1,nDefinedDataFields + kBuffer = kBuffer+1 + workBufferSum(kBuffer) = workBufferSum(kBuffer) + workSum(iDataField) + workBufferMin(kBuffer) = min(workBufferMin(kBuffer), workMin(iDataField)) + workBufferMax(kBuffer) = max(workBufferMax(kBuffer), workMax(iDataField)) + enddo + + enddo ! iLevel + + end do ! iRegion + kBuffer = 0 + + ! deallocate scratch fields + call mpas_deallocate_scratch_field(workArrayField, .true.) + call mpas_deallocate_scratch_field(workMaskField, .true.) + call mpas_deallocate_scratch_field(workMinField, .true.) + call mpas_deallocate_scratch_field(workMaxField, .true.) + call mpas_deallocate_scratch_field(workSumField, .true.) + + block => block % next + end do + + ! communication + call mpas_dmpar_sum_real_array(dminfo, kBufferLength, workBufferSum, workBufferSumReduced ) + call mpas_dmpar_min_real_array(dminfo, kBufferLength, workBufferMin, workBufferMinReduced ) + call mpas_dmpar_max_real_array(dminfo, kBufferLength, workBufferMax, workBufferMaxReduced ) + + ! unpack the buffer into intent(out) of this analysis member + kBuffer=0 + do iRegion=1,nOceanRegionsTmp + do iLevel=1,nVertLevels + do iDataField=1,nDefinedDataFields + kBuffer = kBuffer+1 + avgValueWithinOceanLayerRegion(iDataField,iLevel,iRegion)=workBufferSumReduced(kBuffer) + minValueWithinOceanLayerRegion(iDataField,iLevel,iRegion)=workBufferMinReduced(kBuffer) + maxValueWithinOceanLayerRegion(iDataField,iLevel,iRegion)=workBufferMaxReduced(kBuffer) + enddo + enddo + enddo + + ! compute vertical sum before layer-by-layer normalization + minValueWithinOceanVolumeRegion = 0.0_RKIND + maxValueWithinOceanVolumeRegion = 0.0_RKIND + avgValueWithinOceanVolumeRegion = 0.0_RKIND + do iRegion=1,nOceanRegionsTmp + do iDataField=1,nDefinedDataFields + do iLevel=1,nVertLevels + avgValueWithinOceanVolumeRegion(iDataField, iRegion) = avgValueWithinOceanVolumeRegion(iDataField, iRegion) + avgValueWithinOceanLayerRegion(iDataField,iLevel,iRegion) + enddo + enddo + do iDataField=4,nDefinedDataFields + avgValueWithinOceanVolumeRegion(iDataField, iRegion) = avgValueWithinOceanVolumeRegion(iDataField, iRegion) / max(avgValueWithinOceanVolumeRegion(3,iRegion),1.0e-8_RKIND) + enddo + ! normalize total region volume by total volume cell area + avgValueWithinOceanVolumeRegion(3,iRegion) = avgValueWithinOceanVolumeRegion(3,iRegion) / max(avgValueWithinOceanVolumeRegion(2,iRegion),1.0e-8_RKIND) + ! normalize total volume cell area by total number of cells + avgValueWithinOceanVolumeRegion(2,iRegion) = avgValueWithinOceanVolumeRegion(2,iRegion) / max(avgValueWithinOceanVolumeRegion(1,iRegion),1.0e-8_RKIND) + enddo + + ! find min/max with region volume + do iRegion=1,nOceanRegionsTmp + do iDataField=1,nDefinedDataFields + minValueWithinOceanVolumeRegion(iDataField, iRegion) = minval(minValueWithinOceanLayerRegion(iDataField,:,iRegion)) + maxValueWithinOceanVolumeRegion(iDataField, iRegion) = maxval(minValueWithinOceanLayerRegion(iDataField,:,iRegion)) + enddo + enddo + + ! normalize averages layer-by-layer + do iRegion=1,nOceanRegionsTmp + do iLevel=1,nVertLevels + ! normalize all field by total volume in each layer + do iDataField=4,nDefinedDataFields + avgValueWithinOceanLayerRegion(iDataField,iLevel,iRegion) = avgValueWithinOceanLayerRegion(iDataField,iLevel,iRegion) / max(avgValueWithinOceanLayerRegion(3,iLevel,iRegion),1.0e-8_RKIND) + enddo + ! normalize total layer volume by layer area + avgValueWithinOceanLayerRegion(3,iLevel,iRegion) = avgValueWithinOceanLayerRegion(3,iLevel,iRegion) / max(avgValueWithinOceanLayerRegion(2,iLevel,iRegion),1.0e-8_RKIND) + ! normalize total layer area by number of cells in region + avgValueWithinOceanLayerRegion(2,iLevel,iRegion) = avgValueWithinOceanLayerRegion(2,iLevel,iRegion) / max(avgValueWithinOceanLayerRegion(1,iLevel,iRegion),1.0e-8_RKIND) + enddo + enddo + + ! deallocate buffers + deallocate(workBufferSumReduced) + deallocate(workBufferMinReduced) + deallocate(workBufferMaxReduced) + + contains + + subroutine compute_mask(iLevel, maxLevelCell, nCells, nCellsSolve, iRegion, lonCell, latCell, workMask) + ! this subroutines produces a 0/1 mask that is multiplied with workArray to + ! allow for min/max/avg to represent specific regions of the ocean domain + ! + ! NOTE: computes_mask is temporary. workMask should be intent(in) to this entire module ! + ! + integer, intent(in) :: iLevel, nCells, nCellsSolve, iRegion + integer, intent(in), dimension(:) :: maxLevelCell + real(kind=RKIND), dimension(:), intent(in) :: lonCell, latCell + real(kind=RKIND), dimension(:), intent(out) :: workMask + integer :: iCell + real(kind=RKIND) :: dtr + + dtr = 4.0_RKIND*atan(1.0_RKIND) / 180.0_RKIND + workMask(:) = 0.0_RKIND + do iCell=1,nCellsSolve + if(iLevel.le.maxLevelCell(iCell)) workMask(iCell) = 1.0_RKIND + enddo + + if (iRegion.eq.1) then + ! Arctic + do iCell=1,nCellsSolve + if(latCell(iCell).lt. 60.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + enddo + elseif (iRegion.eq.2) then + ! Equatorial + do iCell=1,nCellsSolve + if(latCell(iCell).gt. 15.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(latCell(iCell).lt.-15.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + enddo + elseif (iRegion.eq.3) then + ! Southern Ocean + do iCell=1,nCellsSolve + if(latCell(iCell).gt.-50.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + enddo + elseif (iRegion.eq.4) then + ! Nino 3 + do iCell=1,nCellsSolve + if(latCell(iCell).gt. 5.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(latCell(iCell).lt. -5.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).lt.210.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).gt.270.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + enddo + elseif (iRegion.eq.5) then + ! Nino 4 + do iCell=1,nCellsSolve + if(latCell(iCell).gt. 5.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(latCell(iCell).lt. -5.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).lt.160.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).gt.210.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + enddo + elseif (iRegion.eq.6) then + ! Nino 3.4 + do iCell=1,nCellsSolve + if(latCell(iCell).gt. 5.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(latCell(iCell).lt. -5.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).lt.190.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).gt.240.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + enddo + else + ! global (do nothing!) + endif + + end subroutine compute_mask + + + subroutine compute_statistics(nDefinedDataFields, nCellsSolve, workArray, workMask, workMin, workMax, workSum) + ! this subroutines does the actual summing, min, max, masking ect + ! this hides the messy code from the high-level subroutine + + integer, intent(in) :: nDefinedDataFields, nCellsSolve + real(kind=RKIND), dimension(:,:), intent(in) :: workArray + real(kind=RKIND), dimension(:), intent(in) :: workMask + real(kind=RKIND), dimension(:), intent(out) :: workMin, workMax, workSum + integer :: iCell, iDataField + real(kind=RKIND) :: cellMask, cellArea, cellVolume + + workSum = 0.0 + do iCell=1,nCellsSolve + cellMask = workMask(iCell) ! mask + cellArea = cellMask * workArray(2,iCell) ! area + cellVolume = cellArea * workArray(3,iCell) ! volume + workSum(1) = workSum(1) + cellMask ! 0/1 mask sum + workSum(2) = workSum(2) + cellArea ! area sum + workSum(3) = workSum(3) + cellVolume ! volume sum + do iDataField=4,nDefinedDataFields + workSum(iDataField) = workSum(iDataField) + cellVolume*workArray(iDataField,iCell) ! volume-weighted sum + enddo + enddo + + do iDataField=1,nDefinedDataFields + workMin(iDataField) = minval(workArray(iDataField,1:nCellsSolve),workMask(1:nCellsSolve)>0.5_RKIND) + workMax(iDataField) = maxval(workArray(iDataField,1:nCellsSolve),workMask(1:nCellsSolve)>0.5_RKIND) + enddo + + end subroutine compute_statistics + + end subroutine ocn_compute_layer_volume_weighted_averages!}}} + +!*********************************************************************** +! +! routine ocn_restart_layer_volume_weighted_averages +! +!> \brief Save restart for MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date April 24, 2015 +!> \details +!> This routine conducts computation required to save a restart state +!> for the MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_restart_layer_volume_weighted_averages(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_restart_layer_volume_weighted_averages!}}} + +!*********************************************************************** +! +! routine ocn_finalize_layer_volume_weighted_averages +! +!> \brief Finalize MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date April 24, 2015 +!> \details +!> This routine conducts all finalizations required for this +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_finalize_layer_volume_weighted_averages(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_finalize_layer_volume_weighted_averages!}}} + +end module ocn_layer_volume_weighted_averages + +! vim: foldmethod=marker diff --git a/src/core_ocean/analysis_members/mpas_ocn_meridional_heat_transport.F b/src/core_ocean/analysis_members/mpas_ocn_meridional_heat_transport.F new file mode 100644 index 0000000000..64bc16b483 --- /dev/null +++ b/src/core_ocean/analysis_members/mpas_ocn_meridional_heat_transport.F @@ -0,0 +1,503 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| +! +! ocn_meridional_heat_transport +! +!> \brief MPAS ocean analysis core member: meridional_heat_transport +!> \author Mark Petersen +!> \date March 2014 +!> \details +!> MPAS ocean analysis core member: meridional_heat_transport +!> Compute zonal means of selected variables +! +!----------------------------------------------------------------------- + +module ocn_meridional_heat_transport + + use mpas_derived_types + use mpas_pool_routines + use mpas_dmpar + use mpas_timekeeping + use mpas_stream_manager + + use ocn_constants + use ocn_diagnostics_routines + + implicit none + private + save + + !-------------------------------------------------------------------- + ! + ! Public parameters + ! + !-------------------------------------------------------------------- + + !-------------------------------------------------------------------- + ! + ! Public member functions + ! + !-------------------------------------------------------------------- + + public :: ocn_init_meridional_heat_transport, & + ocn_compute_meridional_heat_transport, & + ocn_restart_meridional_heat_transport, & + ocn_finalize_meridional_heat_transport + + !-------------------------------------------------------------------- + ! + ! Private module variables + ! + !-------------------------------------------------------------------- + + integer :: nMerHeatTransBinsUsed + +!*********************************************************************** + +contains + +!*********************************************************************** +! +! routine ocn_init_meridional_heat_transport +! +!> \brief Initialize MPAS-Ocean analysis member +!> \author Mark Petersen +!> \date March 2014 +!> \details +!> This routine conducts all initializations required for the +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_init_meridional_heat_transport(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + type (dm_info) :: dminfo + type (block_type), pointer :: block + type (mpas_pool_type), pointer :: meridionalHeatTransportAMPool + type (mpas_pool_type), pointer :: meshPool + + integer :: iBin + integer, pointer :: nMerHeatTransBins + + real (kind=RKIND) :: binWidth + ! These are array size 1 because mpas_dmpar_min_real_array calls require arrays. + real (kind=RKIND), dimension(1) :: minBin, maxBin, minBinDomain, maxBinDomain + real (kind=RKIND), dimension(:), pointer :: binBoundaryMerHeatTrans, binVariable + + integer, pointer :: config_AM_meridionalHeatTransport_num_bins + real (kind=RKIND), pointer :: config_AM_meridionalHeatTransport_min_bin, config_AM_meridionalHeatTransport_max_bin + + logical, pointer :: on_a_sphere + + dminfo = domain % dminfo + + err = 0 + + minBin = 1.0e34_RKIND + maxBin = -1.0e34_RKIND + + call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nMerHeatTransBins', nMerHeatTransBins) + call mpas_pool_get_subpool(domain % blocklist % structs, 'meridionalHeatTransportAM', meridionalHeatTransportAMPool) + + call mpas_pool_get_config(domain % configs, 'config_AM_meridionalHeatTransport_num_bins', config_AM_meridionalHeatTransport_num_bins) + call mpas_pool_get_config(domain % configs, 'config_AM_meridionalHeatTransport_min_bin', config_AM_meridionalHeatTransport_min_bin) + call mpas_pool_get_config(domain % configs, 'config_AM_meridionalHeatTransport_max_bin', config_AM_meridionalHeatTransport_max_bin) + + nMerHeatTransBinsUsed = config_AM_meridionalHeatTransport_num_bins + + call mpas_pool_get_array(meridionalHeatTransportAMPool, 'binBoundaryMerHeatTrans', binBoundaryMerHeatTrans) + + ! Find min and max values of binning variable. + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + call mpas_pool_get_config(meshPool, 'on_a_sphere', on_a_sphere) + + ! Bin by latitude on a sphere, by yCell otherwise. + if (on_a_sphere) then + call mpas_pool_get_array(meshPool, 'latCell', binVariable) + else + call mpas_pool_get_array(meshPool, 'yCell', binVariable) + end if + + minBin = min(minBin, minval(binVariable) ) + maxBin = max(maxBin, maxval(binVariable) ) + + block => block % next + end do + + call mpas_dmpar_min_real_array(dminfo, 1, minBin, minBinDomain) + call mpas_dmpar_max_real_array(dminfo, 1, maxBin, maxBinDomain) + + ! Set up bins. + binBoundaryMerHeatTrans = -1.0e34_RKIND + + ! Change min and max bin bounds to configuration settings, if applicable. + if (config_AM_meridionalHeatTransport_min_bin > -1.0e33_RKIND) then + minBinDomain(1) = config_AM_meridionalHeatTransport_min_bin + else + ! use measured min value, but decrease slightly to include least value. + minBinDomain(1) = minBinDomain(1) - 1.0e-10_RKIND * abs(minBinDomain(1)) + end if + + if (config_AM_meridionalHeatTransport_max_bin > -1.0e33_RKIND) then + maxBinDomain(1) = config_AM_meridionalHeatTransport_max_bin + else + ! use measured max value, but increase slightly to include max value. + maxBinDomain(1) = maxBinDomain(1) + 1.0e-10_RKIND * abs(maxBinDomain(1)) + end if + + binBoundaryMerHeatTrans(1) = minBinDomain(1) + binWidth = (maxBinDomain(1) - minBinDomain(1)) / nMerHeatTransBinsUsed + + do iBin = 2, nMerHeatTransBinsUsed + binBoundaryMerHeatTrans(iBin) = binBoundaryMerHeatTrans(iBin-1) + binWidth + end do + binBoundaryMerHeatTrans(nMerHeatTransBinsUsed+1) = binBoundaryMerHeatTrans(nMerHeatTransBinsUsed) + binWidth + + end subroutine ocn_init_meridional_heat_transport!}}} + +!*********************************************************************** +! +! routine ocn_compute_meridional_heat_transport +! +!> \brief Compute MPAS-Ocean analysis member +!> \author Mark Petersen +!> \date March 2014 +!> \details +!> This routine conducts all computation required for this +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_compute_meridional_heat_transport(domain, timeLevel, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + integer, intent(in) :: timeLevel + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + type (dm_info) :: dminfo + type (block_type), pointer :: block + type (mpas_pool_type), pointer :: meridionalHeatTransportAMPool + type (mpas_pool_type), pointer :: statePool + type (mpas_pool_type), pointer :: meshPool + type (mpas_pool_type), pointer :: scratchPool + type (mpas_pool_type), pointer :: diagnosticsPool + + integer :: iTracer, k, iCell, kMax, i, iEdge + integer :: iBin, iField, nMerHeatTransVariables + integer, pointer :: nCellsSolve, nVertLevels, nMerHeatTransBins, indexTemperature + integer, dimension(:), pointer :: maxLevelCell, nEdgesOnCell + integer, dimension(:,:), pointer :: edgeSignOnCell, cellsOnEdge, edgesOnCell + + real (kind=RKIND) :: div_huT + real (kind=RKIND), dimension(:), pointer :: areaCell, binVariable, binBoundaryMerHeatTrans, dvEdge + real (kind=RKIND), dimension(:), pointer :: meridionalHeatTransportLat + real (kind=RKIND), dimension(:,:), pointer :: layerThicknessEdge, normalTransportVelocity + real (kind=RKIND), dimension(:,:), pointer :: meridionalHeatTransportLatZ + real (kind=RKIND), dimension(:,:,:), pointer :: tracers + real (kind=RKIND), dimension(:,:), allocatable :: mht_meridional_integral + real (kind=RKIND), dimension(:,:,:), allocatable :: sumMerHeatTrans, totalSumMerHeatTrans + + logical, pointer :: on_a_sphere + + err = 0 + dminfo = domain % dminfo + + call mpas_pool_get_subpool(domain % blocklist % structs, 'meridionalHeatTransportAM', meridionalHeatTransportAMPool) + call mpas_pool_get_subpool(domain % blocklist % structs, 'state', statePool) + call mpas_pool_get_subpool(domain % blocklist % structs, 'mesh', meshPool) + + nMerHeatTransVariables = 1 + + call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nMerHeatTransBins', nMerHeatTransBins) + call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nVertLevels', nVertLevels) + + call mpas_pool_get_array(meridionalHeatTransportAMPool, 'binBoundaryMerHeatTrans', binBoundaryMerHeatTrans) + + allocate(sumMerHeatTrans(nMerHeatTransVariables,nVertLevels,nMerHeatTransBinsUsed)) + allocate(totalSumMerHeatTrans(nMerHeatTransVariables,nVertLevels,nMerHeatTransBinsUsed)) + allocate(mht_meridional_integral(nVertLevels,nMerHeatTransBinsUsed)) + + sumMerHeatTrans = 0.0_RKIND + + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'state', statePool) + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + call mpas_pool_get_subpool(block % structs, 'scratch', scratchPool) + call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) + + call mpas_pool_get_dimension(block % dimensions, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(statePool, 'index_temperature', indexTemperature) + + call mpas_pool_get_config(meshPool, 'on_a_sphere', on_a_sphere) + + call mpas_pool_get_array(meshPool, 'areaCell', areaCell) + call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) + call mpas_pool_get_array(meshPool, 'nEdgesOnCell', nEdgesOnCell) + call mpas_pool_get_array(meshPool, 'edgesOnCell', edgesOnCell) + call mpas_pool_get_array(meshPool, 'dvEdge', dvEdge) + call mpas_pool_get_array(meshPool, 'edgeSignOnCell', edgeSignOnCell) + call mpas_pool_get_array(meshPool, 'cellsOnEdge', cellsOnEdge) + call mpas_pool_get_array(statePool, 'tracers', tracers, timeLevel) + call mpas_pool_get_array(diagnosticsPool, 'layerThicknessEdge', layerThicknessEdge) + call mpas_pool_get_array(diagnosticsPool, 'normalTransportVelocity', normalTransportVelocity) + + ! Bin by latitude on a sphere, by yCell otherwise. + if (on_a_sphere) then + call mpas_pool_get_array(meshPool, 'latCell', binVariable) + else + call mpas_pool_get_array(meshPool, 'yCell', binVariable) + end if + + do iCell = 1, nCellsSolve + kMax = maxLevelCell(iCell) + + if (binVariable(iCell) .lt. binBoundaryMerHeatTrans(1)) cycle + + do iBin = 1, nMerHeatTransBinsUsed + if (binVariable(iCell) .lt. binBoundaryMerHeatTrans(iBin+1) ) then + + do k = 1, kMax + + ! Compute divergence of huT, i.e. layerThicknessEdge * normalVelocity * temperature, at an edge + ! for meridional heat transport. Here we use a centered difference to compute the temperature at + ! the edge, which is an approximation to the actual edge temperature used in the horizontal + ! advection scheme (for example, FCT). We expect that the error in this approximation is small. + ! Here we do not divide by the area, as one normally does in a divergence calculation, so that + ! div_huT is weighted by area here. + iField = 1 + div_huT = 0.0_RKIND + do i = 1, nEdgesOnCell(iCell) + iEdge = edgesOnCell(i, iCell) + div_huT = div_huT - layerThicknessEdge(k, iEdge) * normalTransportVelocity(k, iEdge) & + * 0.5_RKIND * (tracers(indexTemperature,k,cellsOnEdge(1,iEdge)) + tracers(indexTemperature,k,cellsOnEdge(2,iEdge))) & + * edgeSignOnCell(i, iCell) * dvEdge(iEdge) + end do + sumMerHeatTrans(iField,k,iBin) = sumMerHeatTrans(iField,k,iBin) + div_huT + + end do + exit + + endif + end do + + end do + + block => block % next + end do + + ! mpi summation over all processors + ! Note the input and output arrays are of the same dimension, so summation is + ! over the domain decompositon (by processor), not over an array index. + call mpas_dmpar_sum_real_array(dminfo, nVertLevels*nMerHeatTransBinsUsed*nMerHeatTransVariables, sumMerHeatTrans, totalSumMerHeatTrans) + + ! Even though these variables do not include an index that is decomposed amongst + ! domain partitions, we assign them within a block loop so that all blocks have the + ! correct values for writing output. + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_dimension(block % dimensions, 'nMerHeatTransBins', nMerHeatTransBins) + call mpas_pool_get_dimension(block % dimensions, 'nVertLevels', nVertLevels) + + call mpas_pool_get_subpool(block % structs, 'meridionalHeatTransportAM', meridionalHeatTransportAMPool) + call mpas_pool_get_subpool(block % structs, 'state', statePool) + + call mpas_pool_get_array(meridionalHeatTransportAMPool, 'meridionalHeatTransportLat', meridionalHeatTransportLat) + call mpas_pool_get_array(meridionalHeatTransportAMPool, 'meridionalHeatTransportLatZ', meridionalHeatTransportLatZ) + + do iBin = 1, nMerHeatTransBinsUsed + do k = 1, nVertLevels + + ! MHT = sum ( div(huT) A ) * rho c_p, in PW + ! where the sum is over each latitude bin + ! Here we simply multiply by (rho c_p) and convert to PW: + iField = 1 + mht_meridional_integral(k,iBin) = totalSumMerHeatTrans(iField,k,iBin)*rho_sw*cp_sw*1.0e-15_RKIND + + end do + end do + + ! Compute integral of ( sum ( div(huT) A ) * rho c_p ) from southernmost latitude to bin boundary. + ! Note that mht_meridional_integral is indexed by bin, spanning 1:nMerHeatTransBinsUsed, while + ! meridionalHeatTransportLatZ (second dimension) is indexed by bin boundary, spanning 1:nMerHeatTransBinsUsed+1 + meridionalHeatTransportLatZ(:,1) = 0.0_RKIND + do iBin = 2, nMerHeatTransBinsUsed+1 + meridionalHeatTransportLatZ(:,iBin) = meridionalHeatTransportLatZ(:,iBin-1) + mht_meridional_integral(:,iBin-1) + end do + + ! meridionalHeatTransportLatZ is a function of depth. Sum in vertical to get + ! meridionalHeatTransportLat, a single value for each latitude bin boundary. + ! meridionalHeatTransportLat is indexed by bin boundary, spanning 1:nMerHeatTransBinsUsed+1 + do iBin = 1, nMerHeatTransBinsUsed+1 + meridionalHeatTransportLat(iBin) = sum(meridionalHeatTransportLatZ(:,iBin)) + end do + + + block => block % next + end do + + deallocate(sumMerHeatTrans) + deallocate(totalSumMerHeatTrans) + deallocate(mht_meridional_integral) + + end subroutine ocn_compute_meridional_heat_transport!}}} + +!*********************************************************************** +! +! routine ocn_restart_meridional_heat_transport +! +!> \brief Save restart for MPAS-Ocean analysis member +!> \author Mark Petersen +!> \date March 2014 +!> \details +!> This routine conducts computation required to save a restart state +!> for the MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_restart_meridional_heat_transport(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_restart_meridional_heat_transport!}}} + +!*********************************************************************** +! +! routine ocn_finalize_meridional_heat_transport +! +!> \brief Finalize MPAS-Ocean analysis member +!> \author Mark Petersen +!> \date March 2014 +!> \details +!> This routine conducts all finalizations required for this +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_finalize_meridional_heat_transport(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_finalize_meridional_heat_transport!}}} + +end module ocn_meridional_heat_transport + +! vim: foldmethod=marker diff --git a/src/core_ocean/analysis_members/mpas_ocn_okubo_weiss.F b/src/core_ocean/analysis_members/mpas_ocn_okubo_weiss.F new file mode 100644 index 0000000000..adf6a8b483 --- /dev/null +++ b/src/core_ocean/analysis_members/mpas_ocn_okubo_weiss.F @@ -0,0 +1,1703 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| +! +! ocn_okubo_weiss +! +!> \brief MPAS ocean analysis core member: okubo_weiss +!> \author Andre Schmeisser +!> \date August 2014 +!> \details +!> MPAS ocean analysis core member: okubo_weiss +! +!----------------------------------------------------------------------- + +module ocn_okubo_weiss + + use mpas_derived_types + use mpas_pool_routines + use mpas_timer + use mpas_dmpar + use mpas_timekeeping + use mpas_constants + + use ocn_constants + use ocn_diagnostics_routines + use mpas_tensor_operations + use mpas_matrix_operations + use iso_c_binding + + implicit none + private + save + +#ifdef SINGLE_PRECISION + integer, parameter :: C_REAL = C_FLOAT + integer, parameter :: SIZE_REAL = 4 +#else + integer, parameter :: C_REAL = C_DOUBLE + integer, parameter :: SIZE_REAL = 8 +#endif + + !-------------------------------------------------------------------- + ! + ! Public parameters + ! + !-------------------------------------------------------------------- + + !-------------------------------------------------------------------- + ! + ! Public member functions + ! + !-------------------------------------------------------------------- + + public :: ocn_init_okubo_weiss, & + ocn_compute_okubo_weiss, & + ocn_write_okubo_weiss, & + ocn_restart_okubo_weiss, & + ocn_finalize_okubo_weiss + + !-------------------------------------------------------------------- + ! + ! Private module variables + ! + !-------------------------------------------------------------------- + + interface + subroutine qsort(array, elem_count, elem_size, compare) bind(C, name="qsort")!{{{ + import + type(c_ptr), value :: array + integer(c_size_t), value :: elem_count + integer(c_size_t), value :: elem_size + type(c_funptr), value :: compare ! int (*compare)(const void*, const void*) + end subroutine qsort!}}} + end interface + + interface + subroutine compute_ev_2(A, wr, wi)!{{{ + use iso_c_binding, only: c_double + real (c_double), dimension(2,2) :: A + real (c_double), dimension(2) :: wr + real (c_double), dimension(2) :: wi + end subroutine compute_ev_2!}}} + end interface + + interface + subroutine compute_ev_3(A, wr, wi)!{{{ + use iso_c_binding, only: c_double + real (c_double), dimension(3,3) :: A + real (c_double), dimension(3) :: wr + real (c_double), dimension(3) :: wi + end subroutine compute_ev_3!}}} + end interface + + integer :: nCellsGlobal + integer :: nTotalCellsGlobal + character (len=StrKIND), pointer :: config_AM_okuboWeiss_directory + +!*********************************************************************** + +contains + +!*********************************************************************** +! +! routine ocn_init_okubo_weiss +! +!> \brief Initialize MPAS-Ocean analysis member +!> \author Andre Schmeisser +!> \date August 2014 +!> \details +!> This routine conducts all initializations required for the +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_init_okubo_weiss(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + type (dm_info) :: dminfo + type (block_type), pointer :: block + type (mpas_pool_type), pointer :: meshPool + integer, pointer :: nCellsSolve, nVertLevels + integer :: cells, cellsSum + + !----------------------------------------------------------------- + ! + ! Compute the global number of cells by doing an MPI sum over all + ! local number of cells per domain. + ! + ! TO DO: Replace this with a global constant to avoid this unnecessary + ! computation + ! + !----------------------------------------------------------------- + + cells = 0 + + call mpas_pool_get_config(ocnConfigs, 'config_AM_okuboWeiss_directory',config_AM_okuboWeiss_directory) + + dminfo = domain % dminfo + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + call mpas_pool_get_dimension(block % dimensions, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(block % dimensions, 'nVertLevels', nVertLevels) + cells = cells + nCellsSolve + + block => block % next + end do + + call mpas_dmpar_sum_int(dminfo, cells, cellsSum) + nCellsGlobal = cellsSum + nTotalCellsGlobal = nCellsGlobal*nVertLevels + +#ifdef SINGLE_PRECISION + ! The MPI communication represents eddy IDs as reals instead + ! of integers so the IDs don't have to be send separately from + ! the real-valued variables. + ! Check that the cell IDs are actually exactly representable in + ! floating point format. + if (nTotalCellsGlobal >= 16777216) then + write(stderrUnit,*) "Error: Maximum number of cells is not exactly representable " // & + "as float. Compile in double precision or change OW eddy " // & + "tracking method to use integers" + err = 1 + end if +#endif + + err = 0 + + end subroutine ocn_init_okubo_weiss!}}} + + +!*********************************************************************** +! +! routine ocn_compute_OW_values +! +!> \brief Compute values of Okubo-Weiss and Lamba_2 field +!> \author Andre Schmeisser +!> \date August 2014 +!> \details +!> Computes the values for Okubo-Weiss field OW, as well as the second +!> eigenvalue of the strain rate tensor Lambda_2. +!> The OW and Lam2 values use the x/y components of the velocity gradient +!> only. On the sphere, this is only correct if the velocity gradient is +!> rotated to the local tangential plane first. +!> Lam2_R3 uses the full three dimensional tensor. +!> +!> Note that the lambda_2 values are multiplied by 4 to be on the same +!> scale as the OW value! +! +!----------------------------------------------------------------------- + + subroutine ocn_compute_OW_values(dvel, nVertLevels, nCells, maxLevelCell, &!{{{ + OW, OW_norm, Lam2, Lam2_R3, Lam2_norm, & + S, om, Lam1) + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + real (kind=RKIND), dimension(:,:,:,:), intent(in) :: dvel + real (kind=RKIND), intent(in) :: OW_norm, Lam2_norm + integer, intent(in) :: nVertLevels, nCells + integer, dimension(:), intent(in) :: maxLevelCell + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + real (kind=RKIND), dimension(:,:), intent(out) :: S, om, OW, Lam1, Lam2, Lam2_R3 + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + integer k, iCell, kmax + real (kind=RKIND) :: sn, ss + real (kind=RKIND), dimension(2,2) :: sym2, asym2 + real (kind=RKIND), dimension(2,2) :: T2 + real (kind=RKIND), dimension(3,3) :: sym3, asym3 + real (kind=RKIND), dimension(3,3) :: T3 + real (kind=RKIND), dimension(2) :: lambda, lambdaI + real (kind=RKIND), dimension(3) :: lambda3, lambdaI3 + + ! Compute OW according to (12): OW = s_n^2 + s_s^2 - \omega^2 + ! This only considers the x/y component of the velocity gradient. + ! TO DO: + ! To get correct values on a sphere, the velocity gradient needs to + ! be rotated such that the local tangential plane is the x/y plane. + do iCell = 1, nCells + kmax = maxLevelCell(iCell) + do k = 1, kmax + sn = dvel(1, 1, k, iCell) - dvel(2, 2, k, iCell) + ss = dvel(1, 2, k, iCell) + dvel(2, 1, k, iCell) + S(k, iCell) = sn*sn + ss*ss + om(k, iCell) = dvel(1, 2, k, iCell) - dvel(2, 1, k, iCell) + OW(k, iCell) = S(k, iCell) - om(k, iCell)*om(k, iCell) + end do + do k = kmax+1, nVertLevels + S(k, iCell) = 0.0_RKIND + om(k, iCell) = 0.0_RKIND + OW(k, iCell) = 0.0_RKIND + end do + end do + S(:, nCells+1) = 0.0_RKIND + om(:, nCells+1) = 0.0_RKIND + OW(:, nCells+1) = 0.0_RKIND + OW = OW / OW_norm; + + ! Compute Lambda_2 parameter + ! Lam2 only considers the x/y components of the velocity gradient, + ! analogously to the OW computation above. + ! Lam2_R3 considers the full 3-dimensional velocity gradient. + do iCell = 1, nCells + kmax = maxLevelCell(iCell) + do k = 1, kmax + !sym = 0.5_RKIND * (dvel(1:2, 1:2, k, iCell) + Transpose(dvel(1:2, 1:2, k, iCell))) + !asym = 0.5_RKIND * (dvel(1:2, 1:2, k, iCell) - Transpose(dvel(1:2, 1:2, k, iCell))) + sym3 = 0.5_RKIND * (dvel(:, :, k, iCell) + Transpose(dvel(:, :, k, iCell))) + asym3 = 0.5_RKIND * (dvel(:, :, k, iCell) - Transpose(dvel(:, :, k, iCell))) + sym2 = sym3(1:2, 1:2) + asym2 = asym3(1:2, 1:2) + T2 = matmul(sym2, sym2) + matmul(asym2, asym2) + T3 = matmul(sym3, sym3) + matmul(asym3, asym3) + + ! Compute eigen-values of 2x2 matrix + call compute_ev_2(T2, lambda, lambdaI) + ! Take the second eigen-value, multiply by 4 to be on the same scale as OW + Lam1(k, iCell) = 4*lambda(1) + Lam2(k, iCell) = 4*lambda(2) + + ! Compute eigen-values of 3x3 matrix + call compute_ev_3(T3, lambda3, lambdaI3) + ! Take the second eigen-value, multiply by 4 to be on the same scale as OW + Lam2_R3(k, iCell) = 4*lambda3(2) + end do + do k = kmax+1, nVertLevels + Lam1(k, iCell) = 0.0_RKIND + Lam2(k, iCell) = 0.0_RKIND + Lam2_R3(k, iCell) = 0.0_RKIND + end do + end do + Lam1(:, nCells+1) = 0.0_RKIND + Lam2(:, nCells+1) = 0.0_RKIND + Lam2_R3(:, nCells+1) = 0.0_RKIND + + Lam2 = Lam2 / Lam2_norm; + Lam2_R3 = Lam2_R3 / Lam2_norm; + + end subroutine ocn_compute_OW_values!}}} + + +!*********************************************************************** +! +! routine ocn_threshold_OW +! +!> \brief Threshold OW values +!> \author Andre Schmeisser +!> \date August 2014 +! +!----------------------------------------------------------------------- + + subroutine ocn_threshold_OW(nVertLevels, nCells, threshold, OW, OW_thresh)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + real (kind=RKIND), dimension(:,:), intent(in) :: OW + real (kind=RKIND), intent(in) :: threshold + integer, intent(in) :: nVertLevels, nCells + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, dimension(:,:), intent(out) :: OW_thresh + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + integer k, iCell + + do k = 1, nVertLevels + do iCell = 1, nCells + if (OW(k, iCell) < threshold) then + OW_thresh(k,iCell) = 1 + else + OW_thresh(k,iCell) = 0.0_RKIND + end if + end do + end do + + end subroutine ocn_threshold_OW!}}} + + + +!*********************************************************************** +! +! function find +! +!> \brief The "find" part of a Union-Find data-structure +!> \author Andre Schmeisser +!> \date August 2014 +! +!----------------------------------------------------------------------- + + recursive integer function find(unionMap, x) result(res)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + integer, intent(in) :: x + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + integer, dimension(:), intent(inout) :: unionMap + + integer xEquiv + + xEquiv = unionMap(x) + if (xEquiv < 0 .or. xEquiv == x) then + res = x + return + end if + + ! follow chain to root and do path compression + unionMap(x) = find(unionMap, xEquiv) + res = unionMap(x) + + end function find!}}} + +!*********************************************************************** +! +! routine union +! +!> \brief The "union" part of a Union-Find data-structure +!> \author Andre Schmeisser +!> \date August 2014 +! +!----------------------------------------------------------------------- + + subroutine union(unionMap, x, y)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + integer, intent(in) :: x, y + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + integer, dimension(:), intent(inout) :: unionMap + + integer xRoot, yRoot + + ! find canonical elements for sets x and y + xRoot = find(unionMap, x) + yRoot = find(unionMap, y) + + if (xRoot == yRoot) then + return + end if + + ! join sets + if (xRoot < yRoot) then ! this check enforces a monotony in the indices + unionMap(yRoot) = xRoot + else + unionMap(xRoot) = yRoot + end if + + end subroutine union!}}} + +!*********************************************************************** +! +! routine ocn_compute_OW_component_IDs +! +!> \brief Compute connected components IDs of the thresholded OW field +!> \author Andre Schmeisser +!> \date August 2014 +! +!----------------------------------------------------------------------- + + subroutine ocn_compute_OW_component_IDs(dminfo, block, meshPool, processorId, nVertLevels, &!{{{ + nCells, OW_thresh, OW_cc_id) + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + type (dm_info), intent(in) :: dminfo + type (block_type), intent(in) :: block + integer, intent(in) :: nVertLevels, nCells, processorId + type (mpas_pool_type), intent(in) :: meshPool + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + integer, dimension(:,:), intent(inout) :: OW_thresh + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, dimension(:,:), intent(out) :: OW_cc_id + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + type (mpas_pool_type), pointer :: okuboWeissAMPool + + integer, dimension(:), pointer :: nEdgesOnCell, indexToCellID + integer, dimension(:,:), pointer :: cellsOnCell + + + integer, pointer :: nCellsSolve + integer k, iCell, i, iOtherCell, cellIdx, otherCellIdx + integer origK, origCell, nLocalCCs + integer, dimension(nVertLevels*nCells) :: unionMap + integer communcationLoop + integer :: update, globalUpdate + + call mpas_timer_start("CC local") + + call mpas_pool_get_subpool(block % structs, 'okuboWeissAM', okuboWeissAMPool) + + call mpas_pool_get_dimension(meshPool, 'nCellsSolve', nCellsSolve) + + call mpas_pool_get_array(meshPool, 'nEdgesOnCell', nEdgesOnCell) + call mpas_pool_get_array(meshPool, 'cellsOnCell', cellsOnCell) + call mpas_pool_get_array(meshPool, 'indexToCellID', indexToCellID) + + unionMap(:) = -1 + + ! Compute local connected components for this domain + ! + ! TODO: To be completely correct, this should go to maxVertLevels only + ! and be in the inner loop. But then, the check for adjacent cells needs + ! to check if these are valid for that vert level, too. + ! It is ok to do it like this because for land cells the OW is zero anyway, + ! thus OW_thresh is not set. + do k = 1, nVertLevels + do iCell = 1, nCells + cellIdx = iCell + (k-1)*nCells + + if (OW_thresh(k, iCell) > 0) then + unionMap(cellIdx) = cellIdx + ! Check all neighboring cells, if any of them is "set", mark + ! them as belonging to the same component + + ! Check adjacent cells on same height level + do i = 1, nEdgesOnCell(iCell) + iOtherCell = cellsOnCell(i, iCell) + if (iOtherCell > 0 .and. iOtherCell <= nCells) then + if (OW_thresh(k, iOtherCell) > 0) then + otherCellIdx = iOtherCell + (k-1)*nCells + call union(unionMap, cellIdx, otherCellIdx) + end if + end if + end do + + ! Check cell with lower k-level (higher level has not yet been processed) + if (k > 1) then + if (OW_thresh(k-1, iCell) > 0) then + otherCellIdx = cellIdx - nCells + call union(unionMap, cellIdx, otherCellIdx) + end if + end if + end if + end do + end do + + ! The number of local connected components is an upper bound for the + ! number of globally connected components in this block. + ! (Two separate local CCs might be globally connected through other blocks) + nLocalCCs = ocn_count_local_connected_components(unionMap) + + do k = 1, nVertLevels + do iCell = 1, nCells + cellIdx = iCell + (k-1)*nCells + if (unionMap(cellIdx) >= 0) then + OW_cc_id(k, iCell) = find(unionMap, cellIdx) + ! Convert local ID in the range [1, nVertLevels*nCells] to + ! global ID in the range [1, nVertLevels*nCellsGlobal] + origK = OW_cc_id(k, iCell) / nCells + 1 + origCell = mod(OW_cc_id(k, iCell)-1, nCells)+1 + OW_cc_id(k, iCell) = (nCellsGlobal*(origK-1))+indexToCellID(origCell) + else + OW_cc_id(k, iCell) = -1 + end if + end do + end do + + call mpas_timer_stop("CC local") + call mpas_timer_start("CC communication loop") + + ! Merge components that are connected over domain boundaries + ! Communicate the CC IDs over the halo field. + ! If a component is connected over domains, the ID assigned on this domain + ! and the one from a neighboring domain differ. Use the lower ID as the + ! canonical one and update the affected IDs appropriately. + ! Iterate this process until no more changes occur to propagate the correct + ! results. + ! This is not the most efficient way to globally merge components, but + ! uses the existing infrastructure to communicate with neighboring domains. + ! By the nature of eddies being local, components should not stretch over + ! many components and thus this quickly converges. + do communcationLoop = 1, 20 + call mpas_dmpar_field_halo_exch(block % domain, 'eddyID') + + ! Loop over the ghost cells updated with data from adjacent blocks + ! and merge + update = 0.0_RKIND + do k = 1, nVertLevels + do iCell = nCellsSolve+1, nCells + ! Check if ghost cell is part of an eddy + if (OW_cc_id(k, iCell) > 0) then + ! Loop over neighbors of ghost cell + do i = 1, nEdgesOnCell(iCell) + iOtherCell = cellsOnCell(i, iCell) + if (iOtherCell > 0 .and. iOtherCell <= nCellsSolve) then + ! Check if neighbor is part of an eddy + if (OW_cc_id(k, iOtherCell) > 0) then + if (OW_cc_id(k, iCell) < OW_cc_id(k, iOtherCell)) then + ! Ghost cell has lower ID, it takes precedence + ! over this block. + update = 1 + call ocn_update_components(OW_cc_id, k, iCell, iOtherCell, nCells, nVertLevels) + else if (OW_cc_id(k, iCell) > OW_cc_id(k, iOtherCell)) then + ! Ghost cell has higher ID, this block takes precedende. + OW_cc_id(k, iCell) = OW_cc_id(k, iOtherCell) + update = 1 + end if + end if + end if + end do + end if + end do + end do + + if (communcationLoop > 2) then + call mpas_dmpar_max_int(dminfo, update, globalUpdate) + if (globalUpdate == 0) then + exit + end if + end if + end do + call mpas_timer_stop("CC communication loop") + + call mpas_timer_start("CC eddy stats") + call ocn_compute_eddy_stats(dminfo, block, nVertLevels, nCells, nCellsSolve, nLocalCCs, & + nEdgesOnCell, cellsOnCell, OW_cc_id, OW_thresh) + call mpas_timer_stop("CC local") + + end subroutine ocn_compute_OW_component_IDs!}}} + +!*********************************************************************** +! +! function ocn_count_local_connected_components +! +!> \brief Count the number of local connected components +!> \author Andre Schmeisser +!> \date August 2014 +!> \details +!> Local connected components are defined by being mapped to the same ID. +!> For each component, there is one entry in the Union-Find data structure +!> that is representative of the union, by mapping to itself. Thus this +!> function simply counts the entries "unionMap(i) == i" that map to +!> themselves. +! +!----------------------------------------------------------------------- + + integer function ocn_count_local_connected_components(unionMap) !{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + integer, dimension(:), intent(in) :: unionMap + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + integer i, count + + count = 0 + do i = 1, size(unionMap) + if (unionMap(i) == i) then + count = count+1 + end if + end do + + ocn_count_local_connected_components = count + + end function ocn_count_local_connected_components!}}} + + +!*********************************************************************** +! +! routine ocn_update_components +! +!> \brief Update CC ID for all cells with a given ID to that of another ID +!> \author Andre Schmeisser +!> \date August 2014 +! +!----------------------------------------------------------------------- + + subroutine ocn_update_components(OW_cc_id, srcK, iCell, iOtherCell, nCells, nVertLevels)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + integer, intent(in) :: srcK, iCell, iOtherCell, nVertLevels, nCells + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + integer, dimension(:,:), intent(inout) :: OW_cc_id + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + integer k, i + integer fromId, replaceId + + fromId = OW_cc_id(srcK, iOtherCell) + replaceId = OW_cc_id(srcK, iCell) + + do k = 1, nVertLevels + do i = 1, nCells + if (OW_cc_id(k, i) == fromId) then + OW_cc_id(k, i) = replaceId + end if + end do + end do + + end subroutine ocn_update_components!}}} + +!*********************************************************************** +! +! routine ocn_compute_eddy_stats +! +!> \brief Aggregate statistics of partial eddies +!> \author Andre Schmeisser +!> \date August 2014 +!> \details +!> Each (partial) eddy has an ID used for sorting and several statistics +!> associated with it. For this, statistics are stored in an "array of structs" +!> fashion, where the first value represents the ID. +!> QSort is used to sort these blocks of size elemSize. +! +!----------------------------------------------------------------------- + + subroutine ocn_compute_eddy_stats(dminfo, block, nVertLevels, nCells, nCellsSolve, &!{{{ + nLocalCCs, nEdgesOnCell, cellsOnCell, OW_cc_id, OW_thresh) + + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + type (dm_info), intent(in) :: dminfo + type (block_type), intent(in) :: block + integer, intent(in) :: nVertLevels, nCells, nCellsSolve, nLocalCCs + integer, dimension(:), intent(in) :: nEdgesOnCell + integer, dimension(:,:), intent(in) :: cellsOnCell + integer, dimension(:,:), intent(in) :: OW_thresh + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + integer, dimension(:,:), intent(inout) :: OW_cc_id + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + type (mpas_pool_type), pointer :: statePool + type (mpas_pool_type), pointer :: meshPool + type (mpas_pool_type), pointer :: diagnosticsPool + + real (kind=RKIND), dimension(:), pointer :: areaCell + real (kind=RKIND), dimension(:,:), pointer :: layerThickness, zMid + real (kind=RKIND), dimension(:), pointer :: posX, posY + real (kind=RKIND), dimension(:,:), pointer :: velX, velY, velZ + logical, pointer :: on_a_sphere, use_lat_lon_coords + integer, pointer :: min_cells + + integer, dimension(nVertLevels*nCells) :: nextCellIdxK, nextCellIdxI + real (kind=RKIND), dimension(nLocalCCs) :: sumVol, numCells, origCCId, wsVelX, wsVelY, wsVelZ, & + wsPosX, wsPosY, wsPosZ + real (kind=RKIND) :: vol + integer k, iCell, i, curCC, iIdx, curI, curK, iOtherCell, maxNumCCs + + integer, dimension(size(OW_thresh,1), size(OW_thresh,2)) :: OW_thresh_local + character (len=StrKIND), pointer :: xtime + + call mpas_pool_get_subpool(block % structs, 'state', statePool) + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) + + call mpas_pool_get_config(meshPool, 'on_a_sphere', on_a_sphere) + call mpas_pool_get_config(ocnConfigs, 'config_AM_okuboWeiss_use_lat_lon_coords', use_lat_lon_coords) + call mpas_pool_get_config(ocnConfigs, 'config_AM_okuboWeiss_eddy_min_cells', min_cells) + + call mpas_pool_get_array(statePool, 'layerThickness', layerThickness, 1) + call mpas_pool_get_array(meshPool, 'areaCell', areaCell) + + if (.not. on_a_sphere) then + use_lat_lon_coords = .false. + end if + + if (use_lat_lon_coords) then + call mpas_pool_get_array(meshPool, 'lonCell', posX) + call mpas_pool_get_array(meshPool, 'latCell', posY) + call mpas_pool_get_array(diagnosticsPool, 'velocityZonal', velX) + call mpas_pool_get_array(diagnosticsPool, 'velocityMeridional', velY) + else + call mpas_pool_get_array(meshPool, 'xCell', posX) + call mpas_pool_get_array(meshPool, 'yCell', posY) + call mpas_pool_get_array(diagnosticsPool, 'velocityX', velX) + call mpas_pool_get_array(diagnosticsPool, 'velocityY', velY) + end if + call mpas_pool_get_array(diagnosticsPool, 'zMid', zMid) + call mpas_pool_get_array(diagnosticsPool, 'vertVelocityTop', velZ) + call mpas_pool_get_array(diagnosticsPool, 'xtime', xtime) + + call mpas_timer_start("stats per proc") + + OW_thresh_local = OW_thresh + + ! Compute stats on each connected component of an eddy + ! + ! Loop through cells, marking cells as processed on the go + ! by resetting the OW_thresh_local variable + ! If a cell belongs to a component (and has not been marked as + ! processed), give it a new eddy ID and start doing statistics on + ! all connected cells. This is done by doing a "flood fill" of + ! the connected cells, marking each neighboring cell as processed + ! first (so that it's processed only once) and then putting it into + ! a list of cells to be processed (nextCellIdxI, nextCellIdxK). + ! + ! Ghost cells are not processed in the statistics so that they are not + ! counted multiple times. They are processed for finding all the neighbors + ! in the flood fill process, though. Otherwise, more local eddies parts + ! might be created than there is memory alloated for statistics arrays, + ! as the counting of connected components further up also includes these + ! cells. + curCC = 0 + do k = 1, nVertLevels + do iCell = 1, nCellsSolve + if (OW_thresh_local(k, iCell) > 0) then + ! Found a new eddy. + curCC = curCC + 1 + if (curCC > nLocalCCs) then + write (stderrUnit,*) "ERROR: curCC > nLocalCCs", curCC, nLocalCCs + write (stderrUnit,*) "THIS IS A BUG!" + end if + ! Put this cell into the list of cells to be processed as + ! seed for the flood fill of this eddy + OW_thresh_local(k, iCell) = 0.0_RKIND + iIdx = 1 + nextCellIdxK(1) = k + nextCellIdxI(1) = iCell + sumVol(curCC) = 0.0_RKIND + numCells(curCC) = 0.0_RKIND + wsPosX(curCC) = 0.0_RKIND + wsPosY(curCC) = 0.0_RKIND + wsPosZ(curCC) = 0.0_RKIND + wsVelX(curCC) = 0.0_RKIND + wsVelY(curCC) = 0.0_RKIND + wsVelZ(curCC) = 0.0_RKIND + origCCId(curCC) = OW_cc_id(k, iCell) + ! Flood fill: loop over list of cells to be processed + ! iIdx is the position in the list, while curI/curK are the + ! actual indices for the cell being processed + do while (iIdx > 0) + curI = nextCellIdxI(iIdx) + curK = nextCellIdxK(iIdx) + iIdx = iIdx - 1 + if (curI <= nCellsSolve) then + ! Compute stats on cell + vol = areaCell(curI)*layerThickness(curK, curI) + sumVol(curCC) = sumVol(curCC) + vol + numCells(curCC) = numCells(curCC) + 1 + wsPosX(curCC) = wsPosX(curCC) + vol * posX(curI) + wsPosY(curCC) = wsPosY(curCC) + vol * posY(curI) + wsPosZ(curCC) = wsPosZ(curCC) + vol * zMid(curK, curI) + wsVelX(curCC) = wsVelX(curCC) + vol * velX(curK, curI) + wsVelY(curCC) = wsVelY(curCC) + vol * velY(curK, curI) + ! velZ is at top of cell, so take average of vertical velocity at top and bottom. + wsVelZ(curCC) = wsVelZ(curCC) + vol * 0.5_RKIND*(velZ(curK, curI)+velZ(curK+1, curI)) + end if + + ! Check neighbors of cell and put them into list: + ! Adjacent cells on same height level + do i = 1, nEdgesOnCell(curI) + iOtherCell = cellsOnCell(i, curI) + if (iOtherCell > 0 .and. iOtherCell <= nCells) then + if (OW_thresh_local(curK, iOtherCell) > 0) then + iIdx = iIdx + 1 + nextCellIdxK(iIdx) = curK + nextCellIdxI(iIdx) = iOtherCell + OW_thresh_local(curK, iOtherCell) = 0.0_RKIND + end if + end if + end do + ! Cell on lower k-level + if (curK > 1) then + if (OW_thresh_local(curK-1, curI) > 0) then + iIdx = iIdx + 1 + nextCellIdxK(iIdx) = curK-1 + nextCellIdxI(iIdx) = curI + OW_thresh_local(curK-1, curI) = 0.0_RKIND + end if + end if + ! Cell on higher k-level + if (curK < nVertLevels) then + if (OW_thresh_local(curK+1, curI) > 0) then + iIdx = iIdx + 1 + nextCellIdxK(iIdx) = curK+1 + nextCellIdxI(iIdx) = curI + OW_thresh_local(curK+1, curI) = 0.0_RKIND + end if + end if + end do + end if + end do + end do + + ! for lat/lon coordinates, convert from radians to degrees for output + if (use_lat_lon_coords) then + wsPosX = wsPosX *180.0_RKIND/pii + wsPosY = wsPosY *180.0_RKIND/pii + end if + + call mpas_timer_stop("stats per proc") + + call mpas_dmpar_max_int(dminfo, curCC, maxNumCCs) + + ! this merges eddy stats AND outputs the results + call mpas_timer_start("aggregate stats") + call ocn_aggregate_eddy_stats(dmInfo, use_lat_lon_coords, min_cells, & + curCC, maxNumCCs, & + origCCId, sumVol, numCells, & + wsPosX, wsPosY, wsPosZ, & + wsVelX, wsVelY, wsVelZ, & + OW_cc_id, nVertLevels, nCellsSolve,xtime) + call mpas_timer_stop("aggregate stats") + + end subroutine ocn_compute_eddy_stats!}}} + + +!*********************************************************************** +! +! routine ocn_aggregate_eddy_stats +! +!> \brief Aggregate statistics of partial eddies +!> \author Andre Schmeisser +!> \date August 2014 +!> \details +!> Send all partial eddy statistics to IO node and merge them into +!> statistics of complete eddies. +! +!----------------------------------------------------------------------- + + subroutine ocn_aggregate_eddy_stats(dmInfo, use_lat_lon_coords, min_cells, &!{{{ + numCCs, maxCCsPerDomain, & + origCCId, sumVol, numCells, & + wsPosX, wsPosY, wsPosZ, & + wsVelX, wsVelY, wsVelZ, & + OW_cc_id, nVertLevels, nCellsSolve,xtime) + + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + type (dm_info), intent(in) :: dminfo + logical, intent(in) :: use_lat_lon_coords + integer, intent(in) :: numCCs, maxCCsPerDomain, min_cells + real (kind=RKIND), dimension(:), intent(in) :: origCCId, sumVol, numCells, & + wsPosX, wsPosY, wsPosZ, wsVelX, wsVelY, wsVelZ + integer, intent(in) :: nVertLevels, nCellsSolve + character (len=StrKIND), intent(in) :: xtime + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + integer, dimension(:,:), intent(inout) :: OW_cc_id + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + real (kind=RKIND), dimension(9 * maxCCsPerDomain * (dmInfo % nprocs)) :: globalStats, aggregated + + integer i, j, k, ierr, curId, listIdx, totalCCs, cullId, offset + + ! Multiplex all eddy statistics into one array so it can be send with + ! one MPI communication call + ! TODO this could be possibly be made more memory and network + ! efficient with lists, because a large array is allocated + ! allocated and communicated across all processors to merge eddies + call mpas_timer_start("all gather") + + offset = 9 * maxCCsPerDomain * (dmInfo % my_proc_id) + + aggregated = -1.0e34 + do i = 1, numCCs + aggregated(9*(i-1) + 1 + offset) = origCCId(i) + aggregated(9*(i-1) + 2 + offset) = sumVol(i) + aggregated(9*(i-1) + 3 + offset) = wsPosX(i) + aggregated(9*(i-1) + 4 + offset) = wsPosY(i) + aggregated(9*(i-1) + 5 + offset) = wsPosZ(i) + aggregated(9*(i-1) + 6 + offset) = wsVelX(i) + aggregated(9*(i-1) + 7 + offset) = wsVelY(i) + aggregated(9*(i-1) + 8 + offset) = wsVelZ(i) + aggregated(9*(i-1) + 9 + offset) = numCells(i) + end do + + ! Send all local statistics + ! TODO if there is an mpas_dmpar_allgather, that should be used instead + ! of max -- in theory, that should be more efficient because + ! data just has to be copied to all procs, rather than compared + ! to make the max + call mpas_dmpar_max_real_array(dmInfo, & + 9 * maxCCsPerDomain * (dmInfo % nprocs), & + aggregated, & + globalStats) + + call mpas_timer_stop("all gather") + + ! Sort partial eddy stats according to eddy ID for efficient merging + call mpas_timer_start("sort/reduce") + + totalCCs = maxCCsPerDomain * dmInfo % nprocs + if (totalCCs > 0) then + call ocn_sort_eddy_stats(globalStats, totalCCs, 9) + end if + + ! Aggregate the results + listIdx = 0 + aggregated = 0.0_RKIND + curId = -1 + do i = 1, totalCCs + ! if our current id is not the same (new eddy) + if (globalStats(9*(i-1)+1) /= curId) then + ! get ID + if (globalStats(9*(i-1)+1) .lt. 0) then + curId = 0 + else + curId = globalStats(9*(i-1)+1) + end if + + ! check to see if the last one was big enough + if (listIdx .gt. 0) then + ! cull it if it wasn't + if (aggregated(9*(listIdx-1)+9) .lt. min_cells) then + cullId = aggregated(9*(listIdx-1)+1) + + do j = 1, 9 + aggregated(9*(listIdx-1)+j) = 0.0_RKIND + end do + listIdx = listIdx - 1 + + ! remove the CC from the local domain + do k = 1, nCellsSolve + do j = 1, nVertLevels + if (OW_cc_id(j, k) == cullId) then + OW_cc_id(j, k) = -1 + end if + end do + end do + end if + end if + + ! if 0, then end of the list + if (curId == 0) then + exit + end if + + ! new eddy + listIdx = listIdx + 1 + aggregated(9*(listIdx-1)+1) = curId + end if + + ! Merge stats about weighted position, velocity, etc. by summing + do j = 2, 9 + aggregated(9*(listIdx-1)+j) = aggregated(9*(listIdx-1)+j) & + + globalStats(9*(i-1)+j) + end do + end do + call mpas_timer_stop("sort/reduce") + + ! only output if IO node + if (dminfo % my_proc_id == IO_NODE) then + ! Output aggregated + call mpas_timer_start("output") + call ocn_output_eddy_stats(listIdx, aggregated, use_lat_lon_coords,xtime) + call mpas_timer_stop("output") + end if + + end subroutine ocn_aggregate_eddy_stats!}}} + + +!*********************************************************************** +! +! routine ocn_sort_eddy_stats +! +!> \brief Sorts eddy statistics according to eddy ID. +!> \author Andre Schmeisser +!> \date August 2014 +!> \details +!> Each (partial) eddy has an ID used for sorting and several statistics +!> associated with it. For this, statistics are stored in an "array of structs" +!> fashion (blocks of size elemSize), where the first value represents the ID. +!> QSort is used to sort these blocks. +!> Eddies split over domains will be represented by several partial eddies +!> with the same ID, which after sorting will be adjacent in memory, +!> allowing for efficient aggregation. +! +!----------------------------------------------------------------------- + + subroutine ocn_sort_eddy_stats(stats, numRecords, recordLength)!{{{ + use iso_c_binding + + integer, intent(in) :: numRecords, recordLength + real (kind=RKIND), dimension(numRecords*recordLength), intent(inout), target :: stats + integer(c_size_t) elemCount, elemSize + + elemCount = numRecords + elemSize = recordLength * SIZE_REAL + + call qsort(c_loc(stats(1)), elemCount, elemSize, c_funloc(compareRealDescending)) + + end subroutine ocn_sort_eddy_stats!}}} + + + integer(c_int) function compareIntDescending(a, b) bind(C)!{{{ + integer(c_int), intent(in) :: a, b + + if (a < b) then + compareIntDescending = 1 + else if (a == b) then + compareIntDescending = 0 + else + compareIntDescending = -1 + end if + end function compareIntDescending!}}} + + integer(c_int) function compareRealDescending( a, b ) bind(C)!{{{ + real (C_REAL), intent(in) :: a, b + + if (a < b) then + compareRealDescending = 1 + else if (a == b) then + compareRealDescending = 0 + else + compareRealDescending = -1 + end if + end function compareRealDescending!}}} + + +!*********************************************************************** +! +! routine ocn_output_eddy_stats +! +!> \brief Outputs eddy statistics +!> \author Andre Schmeisser +!> \date August 2014 +! +!----------------------------------------------------------------------- + + subroutine ocn_output_eddy_stats(numEddies, aggData, use_lat_lon_coords,xtime)!{{{ + + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + integer, intent(in) :: numEddies + real (kind=RKIND), dimension(:), intent(in) :: aggData + logical, intent(in) :: use_lat_lon_coords + character (len=StrKIND), intent(in) :: xtime + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + real (kind=RKIND) :: v + integer i, fileID + + fileID = mpas_get_free_unit() + + ! Create output file. + open(fileID, file=trim(config_AM_okuboWeiss_directory)//'/eddy_census_'//trim(xtime)//'.txt', STATUS='UNKNOWN', POSITION='rewind') + + if (use_lat_lon_coords) then + write (fileID, '(10A)') '"eddy ID", "number of cells", "volume sum, m^3", "average longitude, degrees", "average latitude, degrees", "average depth, m", "average zonal velocity, m/s", "average meridional velocity, m/s", "average vertical velocity, m/s"' + else + write (fileID, '(10A)') '"eddy ID", "number of cells", "volume sum, m^3", "average x-position, m", "average y-position, m", "average depth, m", "average x-velocity, m/s", "average y-velocity, m/s", "average z-velocity, m/s"' + end if + + ! Output number of eddies and statistics for each eddy + do i = 1, numEddies + write (fileID, '(I10, A)', advance='no') int(aggData(9*(i-1)+1)), ', ' + v = aggData(9*(i-1)+2) + write (fileID, '(I10, A)', advance='no') int(aggData(9*(i-1)+9)), ', ' + write (fileID, '(ES12.5, A)', advance='no') v, ', ' + write (fileID, '(ES12.5, A, ES12.5, A, ES12.5, A)', advance='no') aggData(9*(i-1)+3)/v, ', ', aggData(9*(i-1)+4)/v, ', ', aggData(9*(i-1)+5)/v, ', ' + write (fileID, '(ES12.5, A, ES12.5, A, ES12.5)') aggData(9*(i-1)+6)/v, ', ', aggData(9*(i-1)+7)/v, ', ', aggData(9*(i-1)+8)/v + end do + + close(fileID) + + end subroutine ocn_output_eddy_stats!}}} + +!*********************************************************************** +! +! function mpas_get_free_unit +! +!----------------------------------------------------------------------- + + integer function mpas_get_free_unit()!{{{ + implicit none + + integer :: index + logical :: isOpened + + mpas_get_free_unit = 0 + do index = 1,99 + if((index /= 5) .and. (index /= 6)) then + inquire(unit = index, opened = isOpened) + if( .not. isOpened) then + mpas_get_free_unit = index + return + end if + end if + end do + end function mpas_get_free_unit!}}} + +!*********************************************************************** +! +! routine mpas_velocity_gradient_R3Cell +! +!> \brief Computes the velocity gradient at cell centers, in R3 +!> \author Andre Schmeisser +!> \date August 2014 +!> \details +!> This routine computes the velocity gradient at cell centers using the weak +!> derivative. Output is an R3 velocity gradient tensor in 3x3 format. +! +!----------------------------------------------------------------------- + + subroutine mpas_velocity_gradient_R3Cell(normalVelocity, tangentialVelocity, &!{{{ + meshPool, edgeSignOnCell, edgeTangentVectors, includeHalo, & + nVertLevels, nEdges, & + velocityGradient) + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + real (kind=RKIND), dimension(:,:), intent(in) :: & + edgeTangentVectors, &!< Input: unit vector tangent to an edge + normalVelocity, &!< Input: Horizontal velocity normal to edge + tangentialVelocity !< Input: Horizontal velocity tangent to edge + + integer, dimension(:,:), intent(in) :: & + edgeSignOnCell !< Input: Direction of vector connecting cells + + integer, intent(in) :: & + nVertLevels, nEdges + + type (mpas_pool_type), intent(in) :: & + meshPool !< Input: mesh information + + logical, intent(in) :: & + includeHalo !< Input: If true, halo cells and edges are included in computation + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + real (kind=RKIND), dimension(:,:,:,:), intent(out) :: & + velocityGradient !< Output: strain rate tensor at cell center, R3, in symmetric 6-index form + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + integer :: iEdge, iCell, i, j, k + integer, pointer :: nCellsCompute + + integer, dimension(:), pointer :: nEdgesOnCell, maxLevelCell + integer, dimension(:,:), pointer :: edgesOnCell + + + real (kind=RKIND) :: invAreaCell + real (kind=RKIND), dimension(3,3) :: outerProductEdge3x3 + real (kind=RKIND), dimension(:), pointer :: dvEdge, areaCell + real (kind=RKIND), dimension(:,:), pointer :: edgeNormalVectors + + real (kind=RKIND), dimension(3,3, nVertLevels,nEdges) :: outerProductEdgeFull + + if (includeHalo) then + call mpas_pool_get_dimension(meshPool, 'nCells', nCellsCompute) + else + call mpas_pool_get_dimension(meshPool, 'nCellsSolve', nCellsCompute) + end if + + !call mpas_pool_get_dimension(meshPool, 'nVertLevels', nVertLevels) + + call mpas_pool_get_array(meshPool, 'nEdgesOnCell', nEdgesOnCell) + call mpas_pool_get_array(meshPool, 'edgesOnCell', edgesOnCell) + call mpas_pool_get_array(meshPool, 'dvEdge', dvEdge) + call mpas_pool_get_array(meshPool, 'areaCell', areaCell) + call mpas_pool_get_array(meshPool, 'edgeNormalVectors', edgeNormalVectors) + call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) + + do iEdge=1,nEdges + do k=1,nVertLevels + do i=1,3 + do j=1,3 + ! outer produce at each edge: + ! u_e n_e n_e* + v_e n_e \tilde{n}_e* + outerProductEdge3x3(i,j) = edgeNormalVectors(i,iEdge) & + *( normalVelocity(k,iEdge) *edgeNormalVectors(j,iEdge) & + + tangentialVelocity(k,iEdge)*edgeTangentVectors(j,iEdge) & + ) + enddo + enddo + outerProductEdgeFull(:,:,k,iEdge) = outerProductEdge3x3(:,:) + enddo + enddo + + velocityGradient = 0.0 + do iCell = 1, nCellsCompute + invAreaCell = 1.0 / areaCell(iCell) + do i = 1, nEdgesOnCell(iCell) + iEdge = edgesOnCell(i, iCell) + do k = 1, maxLevelCell(iCell) + ! edgeSignOnCell is to get outward unit normal on edgeNormalVectors + ! minus sign in front is to match form on divergence operator + velocityGradient(:,:,k,iCell) = velocityGradient(:,:,k,iCell) & + - edgeSignOnCell(i,iCell)*outerProductEdgeFull(:,:,k,iEdge)*invAreaCell*dvEdge(iEdge) + end do + end do + end do + + end subroutine mpas_velocity_gradient_R3Cell!}}} + + +!*********************************************************************** +! +! routine ocn_compute_okubo_weiss +! +!> \brief Compute MPAS-Ocean analysis member +!> \author Andre Schmeisser +!> \date August 2014 +!> \details +!> This routine conducts all computation required for this +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_compute_okubo_weiss(domain, timeLevel, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + integer, intent(in) :: timeLevel + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + type (mpas_pool_type), pointer :: okuboWeissAMPool + type (dm_info) :: dminfo + type (block_type), pointer :: block + type (mpas_pool_type), pointer :: statePool + type (mpas_pool_type), pointer :: meshPool + type (mpas_pool_type), pointer :: scratchPool + type (mpas_pool_type), pointer :: diagnosticsPool + + integer, pointer :: nVertLevels, nCells, nEdges + integer, dimension(:), pointer :: maxLevelCell + integer, dimension(:,:), pointer :: edgeSignOnCell + + real (kind=RKIND), dimension(:,:), pointer :: normalVelocity + real (kind=RKIND), dimension(:,:), pointer :: tangentialVelocity + real (kind=RKIND), dimension(:,:), pointer :: edgeTangentVectors + + real (kind=RKIND), dimension(:,:), pointer :: om, OW + integer, dimension(:,:), pointer :: OW_cc_id + + type(field2DReal), pointer :: SField, Lam1Field, Lam2Field, Lam2_R3Field + type(field4DReal), pointer :: velocityGradientField + type(field2DInteger), pointer :: OW_threshField + + real (kind=RKIND), dimension(:,:), pointer :: S, Lam1, Lam2, Lam2_R3 + real (kind=RKIND), dimension(:,:,:,:), pointer :: velocityGradient + integer, dimension(:,:), pointer :: OW_thresh + + logical, pointer :: config_AM_okuboWeiss_compute_eddy_census + real (kind=RKIND), pointer :: OW_normalization, Lam2_normalization, threshold + + err = 0 + dminfo = domain % dminfo + + call mpas_pool_get_config(ocnConfigs, 'config_AM_okuboWeiss_normalization', OW_normalization) + call mpas_pool_get_config(ocnConfigs, 'config_AM_okuboWeiss_lambda2_normalization', Lam2_normalization) + call mpas_pool_get_config(ocnConfigs, 'config_AM_okuboWeiss_compute_eddy_census', config_AM_okuboWeiss_compute_eddy_census) + call mpas_pool_get_config(ocnConfigs, 'config_AM_okuboWeiss_threshold_value', threshold) + + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'state', statePool) + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + call mpas_pool_get_subpool(block % structs, 'okuboWeissScratch', scratchPool) + call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) + call mpas_pool_get_subpool(block % structs, 'okuboWeissAM', okuboWeissAMPool) + + call mpas_pool_get_dimension(block % dimensions, 'nVertLevels', nVertLevels) + call mpas_pool_get_dimension(block % dimensions, 'nCells', nCells) + call mpas_pool_get_dimension(meshPool, 'nEdges', nEdges) + + call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) + call mpas_pool_get_array(meshPool, 'edgeSignOnCell', edgeSignOnCell) + call mpas_pool_get_array(meshPool, 'edgeTangentVectors', edgeTangentVectors) + + call mpas_pool_get_array(statePool, 'normalVelocity', normalVelocity, timeLevel) + call mpas_pool_get_array(diagnosticsPool, 'tangentialVelocity', tangentialVelocity) + + call mpas_pool_get_array(okuboWeissAMPool, 'okuboWeiss', OW) + call mpas_pool_get_array(okuboWeissAMPool, 'eddyID', OW_cc_id) + call mpas_pool_get_array(okuboWeissAMPool, 'vorticity', om) + + call mpas_pool_get_field(scratchPool, 'velocityGradient', velocityGradientField) + call mpas_pool_get_field(scratchPool, 'thresholdedOkuboWeiss', OW_threshField) + call mpas_pool_get_field(scratchPool, 'shearAndStrain', SField) + call mpas_pool_get_field(scratchPool, 'lambda1', Lam1Field) + call mpas_pool_get_field(scratchPool, 'lambda2', Lam2Field) + call mpas_pool_get_field(scratchPool, 'lambda2R3', Lam2_R3Field) + + call mpas_allocate_scratch_field(velocityGradientField, .true.) + call mpas_allocate_scratch_field(OW_threshField, .true.) + call mpas_allocate_scratch_field(SField, .true.) + call mpas_allocate_scratch_field(Lam1Field, .true.) + call mpas_allocate_scratch_field(Lam2Field, .true.) + call mpas_allocate_scratch_field(Lam2_R3Field, .true.) + + velocityGradient => velocityGradientField % array + OW_thresh => OW_threshField % array + S => SField % array + Lam1 => Lam1Field % array + Lam2 => Lam2Field % array + Lam2_R3 => Lam2_R3Field % array + + ! Compute velocity gradient + call mpas_velocity_gradient_R3Cell(normalVelocity, tangentialVelocity, & + meshPool, edgeSignOnCell, edgeTangentVectors, .true., & + nVertLevels, nEdges, velocityGradient) + + ! Compute Okubo-Weiss and Lambda 2 values + call ocn_compute_OW_values(velocityGradient, nVertLevels, nCells, maxLevelCell, OW, OW_normalization, & + Lam2, Lam2_R3, Lam2_normalization, S, om, Lam1) + + ! Threshold field OW + call ocn_threshold_OW(nVertLevels, nCells, threshold, OW, OW_thresh) + + ! Compute connected components of thresholded field + if (config_AM_okuboWeiss_compute_eddy_census) then + call mpas_timer_start("OW connected components") + call ocn_compute_OW_component_IDs(dminfo, block, meshPool, dminfo % my_proc_id, nVertLevels, & + nCells, OW_thresh, OW_cc_id) + call mpas_timer_stop("OW connected components") + end if + + call mpas_deallocate_scratch_field(velocityGradientField, .true.) + call mpas_deallocate_scratch_field(OW_threshField, .true.) + call mpas_deallocate_scratch_field(SField, .true.) + call mpas_deallocate_scratch_field(Lam1Field, .true.) + call mpas_deallocate_scratch_field(Lam2Field, .true.) + call mpas_deallocate_scratch_field(Lam2_R3Field, .true.) + + block => block % next + end do + + end subroutine ocn_compute_okubo_weiss!}}} + +!*********************************************************************** +! +! routine ocn_restart_okubo_weiss +! +!> \brief Save restart for MPAS-Ocean analysis member +!> \author Andre Schmeisser +!> \date August 2014 +!> \details +!> This routine conducts computation required to save a restart state +!> for the MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_restart_okubo_weiss(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_restart_okubo_weiss!}}} + +!*********************************************************************** +! +! routine ocn_write_okubo_weiss +! +!> \brief MPAS-Ocean analysis output +!> \author Andre Schmeisser +!> \date August 2014 +!> \details +!> This routine writes all output for this MPAS-Ocean analysis member. +!> At this time this is just a stub, and all analysis output is written +!> to the output file specified by config_output_name. +! +!----------------------------------------------------------------------- + + subroutine ocn_write_okubo_weiss(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(in) :: domain + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_write_okubo_weiss!}}} + +!*********************************************************************** +! +! routine ocn_finalize_okubo_weiss +! +!> \brief Finalize MPAS-Ocean analysis member +!> \author Andre Schmeisser +!> \date August 2014 +!> \details +!> This routine conducts all finalizations required for this +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_finalize_okubo_weiss(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_finalize_okubo_weiss!}}} + +end module ocn_okubo_weiss + +! vim: foldmethod=marker diff --git a/src/core_ocean/analysis_members/mpas_ocn_okubo_weiss_eigenvalues.c b/src/core_ocean/analysis_members/mpas_ocn_okubo_weiss_eigenvalues.c new file mode 100644 index 0000000000..8b6783c77b --- /dev/null +++ b/src/core_ocean/analysis_members/mpas_ocn_okubo_weiss_eigenvalues.c @@ -0,0 +1,220 @@ +/* +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| +! +! ocn_okubo_weiss_eigenvalues +! +!> \brief MPAS ocean analysis core member: okubo_weiss +!> \author Andre Schmeisser +!> \date August 2014 +!> \details +!> MPAS ocean analysis core member: okubo_weiss +! +!----------------------------------------------------------------------- +*/ + +#include + +#ifdef UNDERSCORE +#define compute_ev_2 compute_ev_2_ +#define compute_ev_3 compute_ev_3_ +#else +#ifdef DOUBLEUNDERSCORE +#define compute_ev_2 compute_ev_2__ +#define compute_ev_3 compute_ev_3__ +#endif +#endif + +#ifdef SINGLE_PRECISION + typedef float real; +#else + typedef double real; +#endif + +#define swap(A, B) tmp = A; A = B; B = tmp; + +static inline void sort_descending_complex_3(real* wr, real* wi) +{ + real tmp; + if (wr[0] < wr[1]) + { + swap(wr[0], wr[1]); + swap(wi[0], wi[1]); + } + if (wr[1] < wr[2]) + { + swap(wr[1], wr[2]); + swap(wi[1], wi[2]); + } + if (wr[0] < wr[1]) + { + swap(wr[0], wr[1]); + swap(wi[0], wi[1]); + } +} + +/* +!*********************************************************************** +! +! routine compute_ev_2 +! +!> \brief Compute the eigenvalues of real 2x2 matrix +!> \author Andre Schmeisser +!> \date August 2014 +!> \details +!> Compute the eigenvalues of real 2x2 matrix +! +!----------------------------------------------------------------------- +*/ +void compute_ev_2(real A[4], real wr[2], real wi[2]) +{ + real a = A[0]; + real b = A[1]; + real c = A[2]; + real d = A[3]; + real trA = a+d; + real detA = a*d - b*c; + real discr = trA*trA - 4*detA; + real root; + if (discr >= 0) + { + root = sqrt(discr); + wr[0] = (trA + root) / 2; + wr[1] = (trA - root) / 2; + wi[0] = wi[1] = 0; + } + else + { + wr[0] = wr[1] = trA / 2; + if (fabs(discr) < 1e-10) + { + wi[0] = wi[1] = 0; + } + else + { + root = sqrt(-discr); + wi[0] = root / 2; + wi[1] = - root / 2; + } + } + +} + +/* +!*********************************************************************** +! +! routine compute_ev_3 +! +!> \brief Compute the eigenvalues of real 3x3 matrix +!> \author Andre Schmeisser +!> \date August 2014 +!> \details +!> Compute the eigenvalues of real 3x3 matrix +!> characteristic polynomial: det(A-x*I) = 0 for eigenvalues x +!> x^3 + x^2 (- Tr(A)) + x (-1/2*(Tr(A^2)-Tr(A)^2)) - det(A) = 0 +!> x^3 + b*x^2 + c*x + d = 0 +!> +!> reduce do depressed cubic t^3 + pt + qt = 0 +!> x = t - b/3 +!> p = (3c - b^2)/3 = c - b^2/3 +!> q = (2b^3 - 9bc + 27d)/27 = (2b^3 - 9bc)/27 + d +!> +! +!----------------------------------------------------------------------- +*/ +void compute_ev_3(real* mat, real* wr, real* wi ) +{ + /* find value to normalize with, to protect against over-/underflow */ + double maxAbsVal = 0.; + int i; + + for (i = 0; i < 9; i++) + if (fabs(mat[i]) > maxAbsVal) + maxAbsVal = fabs(mat[i]); + + if (maxAbsVal == 0.) + { + wr[0] = wr[1] = wr[2] = 0.; + wi[0] = wi[1] = wi[2] = 0.; + return; + } + + double normVal = maxAbsVal; + double A[9]; + for (i = 0; i < 9; i++) + A[i] = mat[i] / normVal; + + /* + characteristic polynomial: det(A-x*I) = 0 for eigenvalues x + x^3 + x^2 (- Tr(A)) + x (-1/2*(Tr(A^2)-Tr(A)^2)) - det(A) = 0 + x^3 + b*x^2 + c*x + d = 0 + */ + + double b = -A[0] -A[4] -A[8]; + double c = A[0]*A[4] + A[0]*A[8] + A[4]*A[8] - A[2]*A[6] - A[1]*A[3] - A[5]*A[7]; + double det = A[0] * A[4] * A[8] + + A[1] * A[5] * A[6] + + A[2] * A[3] * A[7] + - A[2] * A[4] * A[6] + - A[1] * A[3] * A[8] + - A[0] * A[5] * A[7]; + double d = -det; + + /* + reduce do depressed cubic t^3 + pt + qt = 0 + x = t - b/3 + p = (3c - b^2)/3 = c - b^2/3 + q = (2b^3 - 9bc + 27d)/27 = (2b^3 - 9bc)/27 + d + */ + + double Q = (b*b - 3*c)/9; + double R = (b*(2*b*b - 9*c) + 27*d) / 54; + double RR = R*R; + double QQQ = Q*Q*Q; + double b3 = b / 3; + if (RR < QQQ) + { + // three real roots + double theta = acos(R/sqrt(QQQ)); + double f = -2 * sqrt(Q); + wr[0] = f * cos(theta/3) -b3; + wr[1] = f * cos((theta + 2*M_PI)/3) -b3; + wr[2] = f * cos((theta - 2*M_PI)/3) -b3; + wi[0] = wi[1] = wi[2] = 0; + } + else + { + // one real root, two complex conjugates + double sign = R >= 0 ? 1 : -1; + double rad = fabs(R) - sqrt(RR - QQQ); + double root3; + if (rad >= 0) + root3 = pow(rad, 1./3.); + else + root3 = -pow(-rad, 1./3.); + double A = -sign*root3; + double B = A == 0 ? 0 : Q / A; + wr[0] = (A+B) - b3; + wi[0] = 0; + + wr[1] = -0.5*(A+B)-b3; + wi[1] = 0.5*sqrt(3)*(A-B); + + wr[2] = wr[1]; + wi[2] = -wi[1]; + } + sort_descending_complex_3(wr, wi); + + for (i = 0; i < 3; i++) + { + wr[i] *= normVal; + wi[i] *= normVal; + } +} + diff --git a/src/core_ocean/analysis_members/mpas_ocn_surface_area_weighted_averages.F b/src/core_ocean/analysis_members/mpas_ocn_surface_area_weighted_averages.F new file mode 100644 index 0000000000..086d762159 --- /dev/null +++ b/src/core_ocean/analysis_members/mpas_ocn_surface_area_weighted_averages.F @@ -0,0 +1,638 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| +! +! ocn_surface_area_weighted_averages +! +!> \brief MPAS ocean analysis member: areal-{min,max,avg} of 2D fields +!> \author Todd Ringler +!> \date April 24, 2015 +!> \details +!> MPAS ocean analysis member: surface-area-weighted averages +! +!----------------------------------------------------------------------- + +module ocn_surface_area_weighted_averages + + use mpas_derived_types + use mpas_pool_routines + use mpas_dmpar + use mpas_timekeeping + use mpas_stream_manager + + use ocn_constants + use ocn_diagnostics_routines + + implicit none + private + save + + !-------------------------------------------------------------------- + ! + ! Public parameters + ! + !-------------------------------------------------------------------- + + !-------------------------------------------------------------------- + ! + ! Public member functions + ! + !-------------------------------------------------------------------- + + public :: ocn_init_surface_area_weighted_averages, & + ocn_compute_surface_area_weighted_averages, & + ocn_restart_surface_area_weighted_averages, & + ocn_finalize_surface_area_weighted_averages + + !-------------------------------------------------------------------- + ! + ! Private module variables + ! + !-------------------------------------------------------------------- + +!*********************************************************************** + +contains + + +!*********************************************************************** +! +! routine ocn_init_surface_area_weighted_averages +! +!> \brief Initialize MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date April 24, 2015 +!> \details +!> This routine conducts all initializations required for the +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_init_surface_area_weighted_averages(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_init_surface_area_weighted_averages!}}} + +!*********************************************************************** +! +! routine ocn_compute_surface_area_weighted_averages +! +!> \brief Compute MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date April 24, 2015 +!> \details +!> This routine conducts all computation required for this +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_compute_surface_area_weighted_averages(domain, timeLevel, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + integer, intent(in) :: timeLevel + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + type (dm_info) :: dminfo + type (block_type), pointer :: block + type (mpas_pool_type), pointer :: surfaceAreaWeightedAveragesAMPool + type (mpas_pool_type), pointer :: statePool + type (mpas_pool_type), pointer :: meshPool + type (mpas_pool_type), pointer :: scratchPool + type (mpas_pool_type), pointer :: diagnosticsPool + type (mpas_pool_type), pointer :: forcingPool + + real (kind=RKIND), dimension(:,:), pointer :: minValueWithinOceanRegion + real (kind=RKIND), dimension(:,:), pointer :: maxValueWithinOceanRegion + real (kind=RKIND), dimension(:,:), pointer :: avgValueWithinOceanRegion + + ! pointers to data in pools to be analyzed + ! bulkForcing Pkg + real (kind=RKIND), dimension(:), pointer :: latentHeatFlux + real (kind=RKIND), dimension(:), pointer :: sensibleHeatFlux + real (kind=RKIND), dimension(:), pointer :: longWaveHeatFluxUp + real (kind=RKIND), dimension(:), pointer :: longWaveHeatFluxDown + real (kind=RKIND), dimension(:), pointer :: seaIceHeatFlux + real (kind=RKIND), dimension(:), pointer :: shortWaveHeatFlux + real (kind=RKIND), dimension(:), pointer :: evaporationFlux + real (kind=RKIND), dimension(:), pointer :: seaIceFreshWaterFlux + real (kind=RKIND), dimension(:), pointer :: riverRunoffFlux + real (kind=RKIND), dimension(:), pointer :: iceRunoffFlux + real (kind=RKIND), dimension(:), pointer :: rainFlux + real (kind=RKIND), dimension(:), pointer :: snowFlux + + ! frazilIce Pkg + real (kind=RKIND), dimension(:), pointer :: seaIceEnergy + + real (kind=RKIND), dimension(:), pointer :: surfaceThicknessFlux + real (kind=RKIND), dimension(:,:), pointer :: surfaceTracerFlux + real (kind=RKIND), dimension(:), pointer :: penetrativeTemperatureFlux + + real (kind=RKIND), dimension(:), pointer :: seaIceSalinityFlux + real (kind=RKIND), dimension(:), pointer :: surfaceWindStressMagnitude + real (kind=RKIND), dimension(:), pointer :: windStressZonal + real (kind=RKIND), dimension(:), pointer :: windStressMeridional + real (kind=RKIND), dimension(:), pointer :: seaSurfacePressure + real (kind=RKIND), dimension(:), pointer :: ssh + real (kind=RKIND), dimension(:), pointer :: boundaryLayerDepth + real (kind=RKIND), dimension(:,:,:), pointer :: tracers + + ! pointers to data in mesh pool + integer, pointer :: nCells, nCellsSolve, nSfcAreaWeightedAvgFields, nOceanRegions + integer, pointer :: indexTemperature, indexSalinity + real (kind=RKIND), dimension(:), pointer :: areaCell, lonCell, latCell + + ! scratch space + type(field2DReal), pointer :: workArrayField + real (kind=RKIND), dimension(:,:), pointer :: workArray + type(field1DReal), pointer :: workMaskField, workMinField, workMaxField, workSumField + real (kind=RKIND), dimension(:), pointer :: workMask, workMin, workMax, workSum + + ! local variables + integer :: iDataField, nDefinedDataFields + integer :: iCell, iRegion, iTracer, err_tmp + + ! package flag + logical, pointer :: surfaceAreaWeightedAveragesAMPKGActive + logical, pointer :: bulkForcingPkgActive + logical, pointer :: frazilIcePkgActive + + ! buffers data for message passaging + integer :: kBuffer, kBufferLength + real (kind=RKIND), dimension(:), allocatable :: workBufferSum, workBufferSumReduced + real (kind=RKIND), dimension(:), allocatable :: workBufferMin, workBufferMinReduced + real (kind=RKIND), dimension(:), allocatable :: workBufferMax, workBufferMaxReduced + + ! assume no error + err = 0 + + ! get status of other packages + call mpas_pool_get_package(ocnPackages, 'bulkForcingActive', bulkForcingPkgActive) + call mpas_pool_get_package(ocnPackages, 'frazilIceActive', frazilIcePkgActive) + + ! set highest level pointer + dminfo = domain % dminfo + + ! find the number of regions + call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nOceanRegions', nOceanRegions) + + ! find the number of data fields + call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nSfcAreaWeightedAvgFields', nSfcAreaWeightedAvgFields) + + ! allocate buffer for message passing + kBuffer=0 + kBufferLength=nOceanRegions*nSfcAreaWeightedAvgFields + allocate(workBufferSum(kBufferLength)) + allocate(workBufferMin(kBufferLength)) + allocate(workBufferMax(kBufferLength)) + allocate(workBufferSumReduced(kBufferLength)) + allocate(workBufferMinReduced(kBufferLength)) + allocate(workBufferMaxReduced(kBufferLength)) + workBufferSum=0.0 + workBufferMin=0.0 + workBufferMax=0.0 + workBufferSumReduced=0.0 + workBufferMinReduced=0.0 + workBufferMaxReduced=0.0 + + ! loop over all ocean regions + do iRegion=1,nOceanRegions + + ! get pointers to analysis member arrays + call mpas_pool_get_subpool(domain % blocklist % structs, 'surfaceAreaWeightedAveragesAM', surfaceAreaWeightedAveragesAMPool) + call mpas_pool_get_array(surfaceAreaWeightedAveragesAMPool, 'minValueWithinOceanRegion', minValueWithinOceanRegion) + call mpas_pool_get_array(surfaceAreaWeightedAveragesAMPool, 'maxValueWithinOceanRegion', maxValueWithinOceanRegion) + call mpas_pool_get_array(surfaceAreaWeightedAveragesAMPool, 'avgValueWithinOceanRegion', avgValueWithinOceanRegion) + + ! get pointers to scratch variables + call mpas_pool_get_subpool(domain % blocklist % structs, 'surfaceAreaWeightedAveragesAMScratch', scratchPool) + call mpas_pool_get_field(scratchPool, 'workArray', workArrayField) + call mpas_pool_get_field(scratchPool, 'workMask', workMaskField) + call mpas_pool_get_field(scratchPool, 'workMin', workMinField) + call mpas_pool_get_field(scratchPool, 'workMax', workMaxField) + call mpas_pool_get_field(scratchPool, 'workSum', workSumField) + call mpas_allocate_scratch_field(workArrayField, .true.) + call mpas_allocate_scratch_field(workMaskField, .true.) + call mpas_allocate_scratch_field(workMinField, .true.) + call mpas_allocate_scratch_field(workMaxField, .true.) + call mpas_allocate_scratch_field(workSumField, .true.) + workArray => workArrayField % array + workMask => workMaskField % array + workMin => workMinField % array + workMax => workMaxField % array + workSum => workSumField % array + + ! loop over blocks + ! NOTE: code is not valid for multiple blocks ! + block => domain % blocklist + do while (associated(block)) + ! get pointers to pools + call mpas_pool_get_subpool(block % structs, 'state', statePool) + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) + call mpas_pool_get_subpool(block % structs, 'forcing', forcingPool) + + ! get pointers to mesh + call mpas_pool_get_dimension(block % dimensions, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(block % dimensions, 'nCells', nCells) + call mpas_pool_get_dimension(block % dimensions, 'nSfcAreaWeightedAvgFields', nSfcAreaWeightedAvgFields) + call mpas_pool_get_dimension(block % dimensions, 'nOceanRegions', nOceanRegions) + call mpas_pool_get_dimension(statePool, 'index_temperature', indexTemperature) + call mpas_pool_get_dimension(statePool, 'index_salinity', indexSalinity) + call mpas_pool_get_array(meshPool, 'areaCell', areaCell) + call mpas_pool_get_array(meshPool, 'lonCell', lonCell) + call mpas_pool_get_array(meshPool, 'latCell', latCell) + + ! test to make sure the arrays are big enough + nDefinedDataFields = size(avgValueWithinOceanRegion,dim=1) + if (nDefinedDataFields > nSfcAreaWeightedAvgFields) then + write (stderrUnit,*) 'Abort: nDefinedDataFields > nSfcAreaWeightedAvgFields' + write (stderrUnit,*) ' : increase size of ocn_surface_area_weighted_averages scratch space' + call mpas_dmpar_abort(dminfo) + endif + + ! get pointers to data that will be analyzed + ! listed in the order in which the fields appear in {avg,min,max}SurfaceStatistics + if (bulkForcingPkgActive) call mpas_pool_get_array(forcingPool, 'latentHeatFlux', latentHeatFlux) + if (bulkForcingPkgActive) call mpas_pool_get_array(forcingPool, 'sensibleHeatFlux', sensibleHeatFlux) + if (bulkForcingPkgActive) call mpas_pool_get_array(forcingPool, 'longWaveHeatFluxUp', longWaveHeatFluxUp) + if (bulkForcingPkgActive) call mpas_pool_get_array(forcingPool, 'longWaveHeatFluxDown', longWaveHeatFluxDown) + if (bulkForcingPkgActive) call mpas_pool_get_array(forcingPool, 'seaIceHeatFlux', seaIceHeatFlux) + if (bulkForcingPkgActive) call mpas_pool_get_array(forcingPool, 'shortWaveHeatFlux', shortWaveHeatFlux) + if (bulkForcingPkgActive) call mpas_pool_get_array(forcingPool, 'evaporationFlux', evaporationFlux) + if (bulkForcingPkgActive) call mpas_pool_get_array(forcingPool, 'seaIceFreshWaterFlux', seaIceFreshWaterFlux) + if (bulkForcingPkgActive) call mpas_pool_get_array(forcingPool, 'riverRunoffFlux', riverRunoffFlux) + if (bulkForcingPkgActive) call mpas_pool_get_array(forcingPool, 'iceRunoffFlux', iceRunoffFlux) + if (bulkForcingPkgActive) call mpas_pool_get_array(forcingPool, 'rainFlux', rainFlux) + if (bulkForcingPkgActive) call mpas_pool_get_array(forcingPool, 'snowFlux', snowFlux) + if (frazilIcePkgActive) call mpas_pool_get_array(forcingPool, 'seaIceEnergy', seaIceEnergy) + call mpas_pool_get_array(forcingPool, 'surfaceThicknessFlux', surfaceThicknessFlux) + call mpas_pool_get_array(forcingPool, 'surfaceTracerFlux', surfaceTracerFlux) + if (bulkForcingPkgActive) call mpas_pool_get_array(forcingPool, 'seaIceSalinityFlux', seaIceSalinityFlux) + call mpas_pool_get_array(forcingPool, 'surfaceWindStressMagnitude', surfaceWindStressMagnitude) + if (bulkForcingPkgActive) call mpas_pool_get_array(forcingPool, 'windStressZonal', windStressZonal) + if (bulkForcingPkgActive) call mpas_pool_get_array(forcingPool, 'windStressMeridional', windStressMeridional) + call mpas_pool_get_array(forcingPool, 'seaSurfacePressure', seaSurfacePressure) + call mpas_pool_get_array(statePool, 'ssh', ssh, 1) + call mpas_pool_get_array(statePool, 'tracers', tracers, 1) + call mpas_pool_get_array(diagnosticsPool, 'boundaryLayerDepth',boundaryLayerDepth) + + ! compute mask + call compute_mask(nCells, nCellsSolve, iRegion, lonCell, latCell, workMask) + + ! copy data into work array + workArray( :,:) = 0.0 + workArray( 1,:) = workMask(:) + workArray( 2,:) = areaCell(:) + if (bulkForcingPkgActive) workArray( 3,:) = latentHeatFlux(:) + if (bulkForcingPkgActive) workArray( 4,:) = sensibleHeatFlux(:) + if (bulkForcingPkgActive) workArray( 5,:) = longWaveHeatFluxUp(:) + if (bulkForcingPkgActive) workArray( 6,:) = longWaveHeatFluxDown(:) + if (bulkForcingPkgActive) workArray( 7,:) = seaIceHeatFlux(:) + if (bulkForcingPkgActive) workArray( 8,:) = shortWaveHeatFlux(:) + if (bulkForcingPkgActive) workArray( 9,:) = evaporationFlux(:) + if (bulkForcingPkgActive) workArray(10,:) = seaIceFreshWaterFlux(:) + if (bulkForcingPkgActive) workArray(11,:) = riverRunoffFlux(:) + if (bulkForcingPkgActive) workArray(12,:) = iceRunoffFlux(:) + if (bulkForcingPkgActive) workArray(13,:) = rainFlux(:) + if (bulkForcingPkgActive) workArray(14,:) = snowFlux(:) + if (frazilIcePkgActive) workArray(15,:) = seaIceEnergy(:) + workArray(16,:) = surfaceThicknessFlux(:) + workArray(17,:) = surfaceTracerFlux(indexTemperature,:) + workArray(18,:) = surfaceTracerFlux(indexSalinity,:) + if (bulkForcingPkgActive) workArray(19,:) = seaIceSalinityFlux(:) + workArray(20,:) = surfaceWindStressMagnitude(:) + if (bulkForcingPkgActive) workArray(21,:) = windStressZonal(:) + if (bulkForcingPkgActive) workArray(22,:) = windStressMeridional(:) + workArray(23,:) = seaSurfacePressure(:) + workArray(24,:) = ssh(:) + workArray(25,:) = tracers(indexTemperature,1,:) + workArray(26,:) = tracers(indexSalinity,1,:) + workArray(27,:) = boundaryLayerDepth(:) + + ! build net heat, salinity and fresh water budget + ! net heat into ocean = latentHeatFlux+sensibleHeatFlux+longWaveHeatFluxUp+longWaveHeatFluxDown+shortWaveHeatFlux+seaIceHeatFlux+(?seaIceEnergy?) + ! net salinity into ocean = seaIceSalinityFlux + ! net freshwater into ocean = evaporationFlux+seaIceFreshWaterFlux+riverRunoffFlux+iceRunoffFlux+rainFlux+snowFlux+(?seaIceEnergy?) + if (bulkForcingPkgActive) then + workArray(28,:) = latentHeatFlux(:) & + + sensibleHeatFlux(:) & + + longWaveHeatFluxUp(:) & + + longWaveHeatFluxDown(:) & + + shortWaveHeatFlux(:) & + + seaIceHeatFlux(:) + ! + seaIceEnergy + workArray(29,:) = seaIceSalinityFlux(:) + workArray(30,:) = evaporationFlux(:) & + + seaIceFreshWaterFlux(:) & + + riverRunoffFlux(:) & + + iceRunoffFlux(:) & + + rainFlux(:) & + + snowFlux(:) + ! + seaIceEnergy(:) + end if + + call compute_statistics(nDefinedDataFields, nCellsSolve, workArray, workMask, workMin, workMax, workSum) + + ! store data in buffer in order to allow only three dmpar calls + do iDataField=1,nDefinedDataFields + kBuffer = kBuffer+1 + workBufferSum(kBuffer) = workSum(iDataField) + workBufferMin(kBuffer) = workMin(iDataField) + workBufferMax(kBuffer) = workMax(iDataField) + enddo + + block => block % next + end do + + end do ! iRegion + + ! communication + call mpas_dmpar_sum_real_array(dminfo, kBufferLength, workBufferSum, workBufferSumReduced ) + call mpas_dmpar_min_real_array(dminfo, kBufferLength, workBufferMin, workBufferMinReduced ) + call mpas_dmpar_max_real_array(dminfo, kBufferLength, workBufferMax, workBufferMaxReduced ) + + ! unpack the buffer into intent(out) of this analysis member + kBuffer=0 + do iRegion=1,nOceanRegions + do iDataField=1,nDefinedDataFields + kBuffer = kBuffer+1 + avgValueWithinOceanRegion(iDataField,iRegion)=workBufferSumReduced(kBuffer) + minValueWithinOceanRegion(iDataField,iRegion)=workBufferMinReduced(kBuffer) + maxValueWithinOceanRegion(iDataField,iRegion)=workBufferMaxReduced(kBuffer) + enddo + enddo + + ! normalize averages + do iRegion=1,nOceanRegions + ! normalize all field by total area + do iDataField=3,nDefinedDataFields + avgValueWithinOceanRegion(iDataField,iRegion) = avgValueWithinOceanRegion(iDataField,iRegion) / max(avgValueWithinOceanRegion(2,iRegion),1.0e-8_RKIND) + enddo + ! normalize total area by number of cells in region + avgValueWithinOceanRegion(2,iRegion) = avgValueWithinOceanRegion(2,iRegion) / max(avgValueWithinOceanRegion(1,iRegion),1.0e-8_RKIND) + enddo + + ! deallocate scratch fields + call mpas_deallocate_scratch_field(workArrayField, .true.) + call mpas_deallocate_scratch_field(workMaskField, .true.) + call mpas_deallocate_scratch_field(workMinField, .true.) + call mpas_deallocate_scratch_field(workMaxField, .true.) + call mpas_deallocate_scratch_field(workSumField, .true.) + + ! deallocate buffers + deallocate(workBufferSumReduced) + deallocate(workBufferMinReduced) + deallocate(workBufferMaxReduced) + + contains + + subroutine compute_mask(nCells, nCellsSolve, iRegion, lonCell, latCell, workMask)!{{{ + ! this subroutines produces a 0/1 mask that is multiplied with workArray to + ! allow for min/max/avg to represent specific regions of the ocean domain + ! + ! NOTE: computes_mask is temporary. workMask should be intent(in) to this entire module ! + ! + integer, intent(in) :: nCells, nCellsSolve, iRegion + real(kind=RKIND), dimension(:), intent(in) :: lonCell, latCell + real(kind=RKIND), dimension(:), intent(out) :: workMask + integer :: iCell + real(kind=RKIND) :: dtr + + dtr = 4.0*atan(1.0) / 180.0_RKIND + workMask(:) = 0.0_RKIND + do iCell=1,nCellsSolve + workMask(iCell) = 1.0_RKIND + enddo + + if (iRegion.eq.1) then + ! Arctic + do iCell=1,nCellsSolve + if(latCell(iCell).lt. 60.0*dtr) workMask(iCell) = 0.0_RKIND + enddo + elseif (iRegion.eq.2) then + ! Equatorial + do iCell=1,nCellsSolve + if(latCell(iCell).gt. 15.0*dtr) workMask(iCell) = 0.0_RKIND + if(latCell(iCell).lt.-15.0*dtr) workMask(iCell) = 0.0_RKIND + enddo + elseif (iRegion.eq.3) then + ! Southern Ocean + do iCell=1,nCellsSolve + if(latCell(iCell).gt.-50.0*dtr) workMask(iCell) = 0.0_RKIND + enddo + elseif (iRegion.eq.4) then + ! Nino 3 + do iCell=1,nCellsSolve + if(latCell(iCell).gt. 5.0*dtr) workMask(iCell) = 0.0_RKIND + if(latCell(iCell).lt. -5.0*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).lt.210.0*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).gt.270.0*dtr) workMask(iCell) = 0.0_RKIND + enddo + elseif (iRegion.eq.5) then + ! Nino 4 + do iCell=1,nCellsSolve + if(latCell(iCell).gt. 5.0*dtr) workMask(iCell) = 0.0_RKIND + if(latCell(iCell).lt. -5.0*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).lt.160.0*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).gt.210.0*dtr) workMask(iCell) = 0.0_RKIND + enddo + elseif (iRegion.eq.6) then + ! Nino 3.4 + do iCell=1,nCellsSolve + if(latCell(iCell).gt. 5.0*dtr) workMask(iCell) = 0.0_RKIND + if(latCell(iCell).lt. -5.0*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).lt.190.0*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).gt.240.0*dtr) workMask(iCell) = 0.0_RKIND + enddo + else + ! global (do nothing!) + endif + + end subroutine compute_mask!}}} + + + subroutine compute_statistics(nDefinedDataFields, nCellsSolve, workArray, workMask, workMin, workMax, workSum)!{{{ + ! this subroutines does the actual summing, min, max, masking ect + ! this hides the messy code from the high-level subroutine + + integer, intent(in) :: nDefinedDataFields, nCellsSolve + real(kind=RKIND), dimension(:,:), intent(in) :: workArray + real(kind=RKIND), dimension(:), intent(in) :: workMask + real(kind=RKIND), dimension(:), intent(out) :: workMin, workMax, workSum + integer :: iCell, iData + + workSum = 0.0 + do iCell=1,nCellsSolve + workSum(1) = workSum(1) + workMask(iCell) + workSum(2) = workSum(2) + workArray(2,iCell)*workMask(iCell) + do iData=3,nDefinedDataFields + workSum(iData) = workSum(iData) + workArray(2,iCell)*workArray(iData,iCell)*workMask(iCell) + enddo + enddo + + do iData=1,nDefinedDataFields + workMin(iData) = minval(workArray(iData,1:nCellsSolve),workMask(1:nCellsSolve)>0.5_RKIND) + workMax(iData) = maxval(workArray(iData,1:nCellsSolve),workMask(1:nCellsSolve)>0.5_RKIND) + enddo + + end subroutine compute_statistics!}}} + + end subroutine ocn_compute_surface_area_weighted_averages!}}} + +!*********************************************************************** +! +! routine ocn_restart_surface_area_weighted_averages +! +!> \brief Save restart for MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date April 24, 2015 +!> \details +!> This routine conducts computation required to save a restart state +!> for the MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_restart_surface_area_weighted_averages(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_restart_surface_area_weighted_averages!}}} + +!*********************************************************************** +! +! routine ocn_finalize_surface_area_weighted_averages +! +!> \brief Finalize MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date April 24, 2015 +!> \details +!> This routine conducts all finalizations required for this +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_finalize_surface_area_weighted_averages(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_finalize_surface_area_weighted_averages!}}} + +end module ocn_surface_area_weighted_averages + +! vim: foldmethod=marker diff --git a/src/core_ocean/analysis_members/mpas_ocn_test_compute_interval.F b/src/core_ocean/analysis_members/mpas_ocn_test_compute_interval.F new file mode 100644 index 0000000000..e215f315d1 --- /dev/null +++ b/src/core_ocean/analysis_members/mpas_ocn_test_compute_interval.F @@ -0,0 +1,301 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| +! +! ocn_test_compute_interval +! +!> \brief MPAS ocean analysis core member: test_compute_interval +!> \author Mark Petersen +!> \date May 2015 +!> \details +!> MPAS ocean analysis core member: test_compute_interval +!> +! +!----------------------------------------------------------------------- + +module ocn_test_compute_interval + + use mpas_derived_types + use mpas_pool_routines + use mpas_dmpar + use mpas_timekeeping + use mpas_stream_manager + + use ocn_constants + use ocn_diagnostics_routines + + implicit none + private + save + + !-------------------------------------------------------------------- + ! + ! Public parameters + ! + !-------------------------------------------------------------------- + + !-------------------------------------------------------------------- + ! + ! Public member functions + ! + !-------------------------------------------------------------------- + + public :: ocn_init_test_compute_interval, & + ocn_compute_test_compute_interval, & + ocn_restart_test_compute_interval, & + ocn_finalize_test_compute_interval + + !-------------------------------------------------------------------- + ! + ! Private module variables + ! + !-------------------------------------------------------------------- + +!*********************************************************************** + +contains + +!*********************************************************************** +! +! routine ocn_init_test_compute_interval +! +!> \brief Initialize MPAS-Ocean analysis member +!> \author Mark Petersen +!> \date May 2015 +!> \details +!> This routine conducts all initializations required for the +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_init_test_compute_interval(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + type (mpas_pool_type), pointer :: testComputeIntervalAMPool + type (block_type), pointer :: block + real (kind=RKIND), pointer :: testComputeIntervalCounter + + err = 0 + + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'testComputeIntervalAM', testComputeIntervalAMPool) + call mpas_pool_get_array(testComputeIntervalAMPool, 'testComputeIntervalCounter',testComputeIntervalCounter) + + testComputeIntervalCounter = 0 + + block => block % next + end do + + end subroutine ocn_init_test_compute_interval!}}} + +!*********************************************************************** +! +! routine ocn_compute_test_compute_interval +! +!> \brief Compute MPAS-Ocean analysis member +!> \author Mark Petersen +!> \date May 2015 +!> \details +!> This routine conducts all computation required for this +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_compute_test_compute_interval(domain, timeLevel, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + integer, intent(in) :: timeLevel + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + type (dm_info) :: dminfo + type (block_type), pointer :: block + type (mpas_pool_type), pointer :: statePool + type (mpas_pool_type), pointer :: meshPool + type (mpas_pool_type), pointer :: scratchPool + type (mpas_pool_type), pointer :: diagnosticsPool + type (mpas_pool_type), pointer :: testComputeIntervalAMPool + character (len=StrKIND), pointer :: xtime + + ! Here are some example variables which may be needed for your analysis member + integer, pointer :: nVertLevels, nCellsSolve, nEdgesSolve, nVerticesSolve, num_tracers + integer :: iTracer, k, iCell + integer, dimension(:), pointer :: maxLevelCell, maxLevelEdgeTop, maxLevelVertexBot + + real (kind=RKIND), pointer :: testComputeIntervalCounter + + err = 0 + + dminfo = domain % dminfo + + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'testComputeIntervalAM', testComputeIntervalAMPool) + call mpas_pool_get_array(testComputeIntervalAMPool, 'testComputeIntervalCounter',testComputeIntervalCounter) + + testComputeIntervalCounter = testComputeIntervalCounter + 1 + call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) + call mpas_pool_get_array(diagnosticsPool, 'xtime', xtime) + block => block % next + end do + + write(stderrUnit,'(3A,F10.1)') 'in ocn_compute_test_compute_interval, time = ', trim(xtime), & + ', testComputeIntervalCounter = ',testComputeIntervalCounter + + end subroutine ocn_compute_test_compute_interval!}}} + +!*********************************************************************** +! +! routine ocn_restart_test_compute_interval +! +!> \brief Save restart for MPAS-Ocean analysis member +!> \author Mark Petersen +!> \date May 2015 +!> \details +!> This routine conducts computation required to save a restart state +!> for the MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_restart_test_compute_interval(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_restart_test_compute_interval!}}} + +!*********************************************************************** +! +! routine ocn_finalize_test_compute_interval +! +!> \brief Finalize MPAS-Ocean analysis member +!> \author Mark Petersen +!> \date May 2015 +!> \details +!> This routine conducts all finalizations required for this +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_finalize_test_compute_interval(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_finalize_test_compute_interval!}}} + +end module ocn_test_compute_interval + +! vim: foldmethod=marker diff --git a/src/core_ocean/analysis_members/mpas_ocn_water_mass_census.F b/src/core_ocean/analysis_members/mpas_ocn_water_mass_census.F new file mode 100644 index 0000000000..423b4b8a8c --- /dev/null +++ b/src/core_ocean/analysis_members/mpas_ocn_water_mass_census.F @@ -0,0 +1,578 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| +! +! ocn_water_mass_census +! +!> \brief MPAS ocean analysis member: water mass census +!> \author Todd Ringler +!> \date May 12, 2015 +!> \details +!> MPAS ocean analysis member: water_mass_census +!> This analysis member sorts the ocean water volume based on it +!> temperature and salinity. +! +!----------------------------------------------------------------------- + +module ocn_water_mass_census + + use mpas_derived_types + use mpas_pool_routines + use mpas_dmpar + use mpas_timekeeping + use mpas_stream_manager + + use ocn_constants + use ocn_diagnostics_routines + + implicit none + private + save + + !-------------------------------------------------------------------- + ! + ! Public parameters + ! + !-------------------------------------------------------------------- + + !-------------------------------------------------------------------- + ! + ! Public member functions + ! + !-------------------------------------------------------------------- + + public :: ocn_init_water_mass_census, & + ocn_compute_water_mass_census, & + ocn_restart_water_mass_census, & + ocn_finalize_water_mass_census + + !-------------------------------------------------------------------- + ! + ! Private module variables + ! + !-------------------------------------------------------------------- + +!*********************************************************************** + +contains + +!*********************************************************************** +! +! routine ocn_init_water_mass_census +! +!> \brief Initialize MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date May 10, 2015 +!> \details +!> This routine conducts all initializations required for the +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_init_water_mass_census(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_init_water_mass_census!}}} + +!*********************************************************************** +! +! routine ocn_compute_water_mass_census +! +!> \brief Compute MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date May 10, 2015 +!> \details +!> This routine conducts all computation required for this +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_compute_water_mass_census(domain, timeLevel, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + integer, intent(in) :: timeLevel + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + type (dm_info) :: dminfo + type (block_type), pointer :: block + type (mpas_pool_type), pointer :: waterMassCensusAMPool + type (mpas_pool_type), pointer :: statePool + type (mpas_pool_type), pointer :: meshPool + type (mpas_pool_type), pointer :: diagnosticsPool + + real (kind=RKIND), dimension(:,:,:), pointer :: waterMassFractionalDistribution + real (kind=RKIND), dimension(:,:,:), pointer :: potentialDensityOfTSDiagram + real (kind=RKIND), dimension(:,:,:), pointer :: zPositionOfTSDiagram + real (kind=RKIND), dimension(:,:), pointer :: waterMassCensusTemperatureValues + real (kind=RKIND), dimension(:,:), pointer :: waterMassCensusSalinityValues + + ! pointers to data in pools required for T/S water mass census + real (kind=RKIND), dimension(:,:), pointer :: layerThickness + real (kind=RKIND), dimension(:,:,:), pointer :: tracers + real (kind=RKIND), dimension(:,:), pointer :: potentialDensity + real (kind=RKIND), dimension(:,:), pointer :: zMid + + ! pointers to data in mesh pool + ! (note: nOceanRegionsTmpCensus, lonCell, latCell to be removed when region mask is intent(in)) + integer, pointer :: nCells, nCellsSolve, nOceanRegionsTmpCensus + integer, pointer :: index_temperature, index_salinity + integer, pointer :: nTemperatureBins, nSalinityBins + integer, pointer :: nTemperatureBinsP1, nSalinityBinsP1 + integer, dimension(:), pointer :: maxLevelCell + real (kind=RKIND), dimension(:), pointer :: areaCell, lonCell, latCell + + ! local variables + integer :: iCell, iRegion, iLevel, iTracer, iTemperatureBin, iSalinityBin, err_tmp + real (kind=RKIND), pointer :: minTemperature, maxTemperature + real (kind=RKIND), pointer :: minSalinity, maxSalinity + real (kind=RKIND) :: deltaTemperature, deltaSalinity, temperature, salinity, density, zPosition, volume + + ! package flag + logical, pointer :: waterMassCensusAMPKGActive + + ! buffers data for message passaging + integer :: kBuffer, kBufferLength + real (kind=RKIND), dimension(:), allocatable :: workBufferSum, workBufferSumReduced + ! (note: regionMask will (soon) be intent(in) and workMask can then be elimated) + real (kind=RKIND), dimension(:), allocatable :: workMask + real (kind=RKIND), dimension(:,:), allocatable :: regionMask + + ! assume no error + err = 0 + + ! set highest level pointer + dminfo = domain % dminfo + + ! find the number of regions, number of data fields and number of vertical levels + call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nOceanRegionsTmpCensus', nOceanRegionsTmpCensus) + call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nTemperatureBins', nTemperatureBins) + call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nSalinityBins', nSalinityBins) + call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nTemperatureBinsP1', nTemperatureBinsP1) + call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nSalinityBinsP1', nSalinityBinsP1) + + ! allocate buffer for message passing + kBuffer=0 + kBufferLength=3*nOceanRegionsTmpCensus*nTemperatureBins*nSalinityBins + allocate(workBufferSum(kBufferLength)) + allocate(workBufferSumReduced(kBufferLength)) + workBufferSum=0.0_RKIND + workBufferSumReduced=0.0_RKIND + + ! all code below will go away when regionMask is intent(in) + ! allocate region mask and fill array + call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nCells', nCells) + allocate(workMask(nCells)) + allocate(regionMask(nOceanRegionsTmpCensus,nCells)) + block => domain % blocklist + do while (associated(block)) + ! get pointers to pools + call mpas_pool_get_dimension(block % dimensions, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(block % dimensions, 'nCells', nCells) + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) + call mpas_pool_get_array(meshPool, 'lonCell', lonCell) + call mpas_pool_get_array(meshPool, 'latCell', latCell) + do iRegion=1,nOceanRegionsTmpCensus + call compute_mask(maxLevelCell, nCells, nCellsSolve, iRegion, lonCell, latCell, workMask) + regionMask(iRegion,:) = workMask(:) + enddo + block => block % next + enddo + ! all code above will go away when regionMask is intent(in) + + ! get pointers to analysis member arrays + call mpas_pool_get_subpool(domain % blocklist % structs, 'waterMassCensusAM', waterMassCensusAMPool) + call mpas_pool_get_array(waterMassCensusAMPool, 'waterMassCensusTemperatureValues', waterMassCensusTemperatureValues) + call mpas_pool_get_array(waterMassCensusAMPool, 'waterMassCensusSalinityValues', waterMassCensusSalinityValues) + call mpas_pool_get_array(waterMassCensusAMPool, 'waterMassFractionalDistribution', waterMassFractionalDistribution) + call mpas_pool_get_array(waterMassCensusAMPool, 'potentialDensityOfTSDiagram', potentialDensityOfTSDiagram) + call mpas_pool_get_array(waterMassCensusAMPool, 'zPositionOfTSDiagram', zPositionOfTSDiagram) + + ! get run-time configure variables + call mpas_pool_get_config(domain % configs, 'config_AM_waterMassCensus_minTemperature', minTemperature) + call mpas_pool_get_config(domain % configs, 'config_AM_waterMassCensus_maxTemperature', maxTemperature) + call mpas_pool_get_config(domain % configs, 'config_AM_waterMassCensus_minSalinity', minSalinity) + call mpas_pool_get_config(domain % configs, 'config_AM_waterMassCensus_maxSalinity', maxSalinity) + + do iRegion=1,nOceanRegionsTmpCensus + ! compute temperature and salinity domains + ! (note: the ability to have different t/s domains for different regions is not yet built out) + deltaTemperature = (maxTemperature-minTemperature)/nTemperatureBins + do iTemperatureBin=1,nTemperatureBinsP1 + waterMassCensusTemperatureValues(iTemperatureBin,iRegion) = minTemperature + deltaTemperature*(iTemperatureBin-1) + enddo + deltaSalinity = (maxSalinity-minSalinity)/nSalinityBins + do iSalinityBin=1,nSalinityBinsP1 + waterMassCensusSalinityValues(iSalinityBin,iRegion) = minSalinity + deltaSalinity*(iSalinityBin-1) + enddo + enddo ! iRegion + + ! initialize intent(out) of this analysis member + waterMassFractionalDistribution(:,:,:)=0.0_RKIND + potentialDensityOfTSDiagram(:,:,:)=0.0_RKIND + zPositionOfTSDiagram(:,:,:)=0.0_RKIND + + ! loop over blocks + block => domain % blocklist + do while (associated(block)) + ! get pointers to pools + call mpas_pool_get_subpool(block % structs, 'state', statePool) + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) + + ! get pointers to mesh + call mpas_pool_get_dimension(block % dimensions, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(block % dimensions, 'nCells', nCells) + call mpas_pool_get_dimension(block % dimensions, 'nOceanRegionsTmpCensus', nOceanRegionsTmpCensus) + call mpas_pool_get_dimension(statePool, 'index_temperature', index_temperature) + call mpas_pool_get_dimension(statePool, 'index_salinity', index_salinity) + call mpas_pool_get_array(meshPool, 'areaCell', areaCell) + call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) + + ! get pointers to data needed for analysis + call mpas_pool_get_array(statePool, 'layerThickness', layerThickness, timeLevel) + call mpas_pool_get_array(diagnosticsPool, 'potentialDensity', potentialDensity) + call mpas_pool_get_array(diagnosticsPool, 'zMid', zMid) + call mpas_pool_get_array(statePool, 'tracers', tracers, timeLevel) + + ! loop over and bin all data + do iCell=1,nCellsSolve + do iLevel=1,maxLevelCell(iCell) + + ! make copies of data for convienence + temperature = tracers(index_temperature,iLevel,iCell) + salinity = tracers(index_salinity,iLevel,iCell) + density = potentialDensity(iLevel,iCell) + zPosition = zMid(iLevel,iCell) + volume = layerThickness(iLevel,iCell) * areaCell(iCell) + + ! find temperature bin, cycle if bin is out of range + iTemperatureBin = int((temperature-minTemperature)/deltaTemperature) + 1 + if (iTemperatureBin < 1) cycle + if (iTemperatureBin > nTemperatureBins) cycle + + ! find salinity bin, cycle if bin is out of range + iSalinityBin = int((salinity-minSalinity)/deltaSalinity) + 1 + if (iSalinityBin < 1) cycle + if (iSalinityBin > nSalinityBins) cycle + + do iRegion=1,nOceanRegionsTmpCensus + ! add volume into water mass census array for each region + waterMassFractionalDistribution(iTemperatureBin,iSalinityBin,iRegion) = & + waterMassFractionalDistribution(iTemperatureBin,iSalinityBin,iRegion) & + + volume * regionMask(iRegion,iCell) + potentialDensityOfTSDiagram(iTemperatureBin,iSalinityBin,iRegion) = & + potentialDensityOfTSDiagram(iTemperatureBin,iSalinityBin,iRegion) & + + density * volume * regionMask(iRegion,iCell) + zPositionOfTSDiagram(iTemperatureBin,iSalinityBin,iRegion) = & + zPositionOfTSDiagram(iTemperatureBin,iSalinityBin,iRegion) & + + zPosition * volume * regionMask(iRegion,iCell) + enddo + + enddo ! iLevel + enddo ! iCell + + block => block % next + end do ! block loop + + ! store data in buffer in order to allow only one dmpar calls + kBuffer=0 + do iTemperatureBin=1,nTemperatureBins + do iSalinityBin=1,nSalinityBins + do iRegion=1,nOceanRegionsTmpCensus + kBuffer = kBuffer+1 + workBufferSum(kBuffer) = waterMassFractionalDistribution(iTemperatureBin,iSalinityBin,iRegion) + kBuffer = kBuffer+1 + workBufferSum(kBuffer) = potentialDensityOfTSDiagram(iTemperatureBin,iSalinityBin,iRegion) + kBuffer = kBuffer+1 + workBufferSum(kBuffer) = zPositionOfTSDiagram(iTemperatureBin,iSalinityBin,iRegion) + enddo + enddo + enddo + + ! communication + call mpas_dmpar_sum_real_array(dminfo, kBufferLength, workBufferSum, workBufferSumReduced ) + + ! unpack the buffer into intent(out) of this analysis member + kBuffer=0 + do iTemperatureBin=1,nTemperatureBins + do iSalinityBin=1,nSalinityBins + do iRegion=1,nOceanRegionsTmpCensus + kBuffer = kBuffer+1 + waterMassFractionalDistribution(iTemperatureBin,iSalinityBin,iRegion) = workBufferSumReduced(kBuffer) + kBuffer = kBuffer+1 + potentialDensityOfTSDiagram(iTemperatureBin,iSalinityBin,iRegion) = workBufferSumReduced(kBuffer) + kBuffer = kBuffer+1 + zPositionOfTSDiagram(iTemperatureBin,iSalinityBin,iRegion) = workBufferSumReduced(kBuffer) + enddo + enddo + enddo + + ! normalize potentialDensityOfTSDiagram by volume in each T,S bin + do iTemperatureBin=1,nTemperatureBins + do iSalinityBin=1,nSalinityBins + do iRegion=1,nOceanRegionsTmpCensus + potentialDensityOfTSDiagram(iTemperatureBin,iSalinityBin,iRegion) = & + potentialDensityOfTSDiagram(iTemperatureBin,iSalinityBin,iRegion) / & + max(waterMassFractionalDistribution(iTemperatureBin,iSalinityBin,iRegion), 1.0e-8_RKIND) + zPositionOfTSDiagram(iTemperatureBin,iSalinityBin,iRegion) = & + zPositionOfTSDiagram(iTemperatureBin,iSalinityBin,iRegion) / & + max(waterMassFractionalDistribution(iTemperatureBin,iSalinityBin,iRegion), 1.0e-8_RKIND) + enddo + enddo + enddo + + ! use workBufferSum as workspace to find total volume for each region + workBufferSum = 0.0_RKIND + do iTemperatureBin=1,nTemperatureBins + do iSalinityBin=1,nSalinityBins + do iRegion=1,nOceanRegionsTmpCensus + workBufferSum(iRegion) = workBufferSum(iRegion) + waterMassFractionalDistribution(iTemperatureBin,iSalinityBin,iRegion) + enddo + enddo + enddo + + ! use this sum to convert waterMassFractionalDistribution from total volume to fractional volume + do iRegion=1,nOceanRegionsTmpCensus + waterMassFractionalDistribution(:,:,iRegion) = waterMassFractionalDistribution(:,:,iRegion) / max(workBufferSum(iRegion),1.0e-8_RKIND) + enddo + + ! deallocate buffers + deallocate(workBufferSum) + deallocate(workBufferSumReduced) + deallocate(regionMask) + deallocate(workMask) + + contains + + subroutine compute_mask(maxLevelCell, nCells, nCellsSolve, iRegion, lonCell, latCell, workMask) + ! this subroutines produces a 0/1 mask that is multiplied with workArray to + ! allow for min/max/avg to represent specific regions of the ocean domain + ! + ! NOTE: computes_mask is temporary. workMask should be intent(in) to this entire module ! + ! + integer, intent(in) :: nCells, nCellsSolve, iRegion + integer, intent(in), dimension(:) :: maxLevelCell + real(kind=RKIND), dimension(:), intent(in) :: lonCell, latCell + real(kind=RKIND), dimension(:), intent(out) :: workMask + integer :: iCell + real(kind=RKIND) :: dtr + + dtr = 4.0*atan(1.0) / 180.0_RKIND + workMask(:) = 0.0_RKIND + do iCell=1,nCellsSolve + workMask(iCell) = 1.0_RKIND + enddo + + if (iRegion.eq.1) then + ! Arctic + do iCell=1,nCellsSolve + if(latCell(iCell).lt. 60.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + enddo + write(6,*) ' Arctic ', sum(workMask) + elseif (iRegion.eq.2) then + ! Equatorial + do iCell=1,nCellsSolve + if(latCell(iCell).gt. 15.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(latCell(iCell).lt.-15.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + enddo + write(6,*) ' Equatorial ', sum(workMask) + elseif (iRegion.eq.3) then + ! Southern Ocean + do iCell=1,nCellsSolve + if(latCell(iCell).gt.-50.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + enddo + write(6,*) ' Southern Ocean ', sum(workMask) + elseif (iRegion.eq.4) then + ! Nino 3 + do iCell=1,nCellsSolve + if(latCell(iCell).gt. 5.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(latCell(iCell).lt. -5.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).lt.210.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).gt.270.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + enddo + write(6,*) ' Nino 3 ', sum(workMask) + elseif (iRegion.eq.5) then + ! Nino 4 + do iCell=1,nCellsSolve + if(latCell(iCell).gt. 5.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(latCell(iCell).lt. -5.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).lt.160.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).gt.210.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + enddo + write(6,*) ' Nino 4 ', sum(workMask) + elseif (iRegion.eq.6) then + ! Nino 3.4 + do iCell=1,nCellsSolve + if(latCell(iCell).gt. 5.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(latCell(iCell).lt. -5.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).lt.190.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + if(lonCell(iCell).gt.240.0_RKIND*dtr) workMask(iCell) = 0.0_RKIND + enddo + write(6,*) ' Nino 3.4 ', sum(workMask) + else + ! global (do nothing!) + write(6,*) ' Global ', sum(workMask) + endif + + end subroutine compute_mask + + end subroutine ocn_compute_water_mass_census!}}} + +!*********************************************************************** +! +! routine ocn_restart_water_mass_census +! +!> \brief Save restart for MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date May 10, 2015 +!> \details +!> This routine conducts computation required to save a restart state +!> for the MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_restart_water_mass_census(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_restart_water_mass_census!}}} + +!*********************************************************************** +! +! routine ocn_finalize_water_mass_census +! +!> \brief Finalize MPAS-Ocean analysis member +!> \author Todd Ringler +!> \date May 10, 2015 +!> \details +!> This routine conducts all finalizations required for this +!> MPAS-Ocean analysis member. +! +!----------------------------------------------------------------------- + + subroutine ocn_finalize_water_mass_census(domain, err)!{{{ + + !----------------------------------------------------------------- + ! + ! input variables + ! + !----------------------------------------------------------------- + + type (domain_type), intent(inout) :: domain + + !----------------------------------------------------------------- + ! + ! input/output variables + ! + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! + ! output variables + ! + !----------------------------------------------------------------- + + integer, intent(out) :: err !< Output: error flag + + !----------------------------------------------------------------- + ! + ! local variables + ! + !----------------------------------------------------------------- + + err = 0 + + end subroutine ocn_finalize_water_mass_census!}}} + +end module ocn_water_mass_census + +! vim: foldmethod=marker diff --git a/src/core_ocean/analysis_members/mpas_ocn_zonal_mean.F b/src/core_ocean/analysis_members/mpas_ocn_zonal_mean.F index 2960dfba03..47e101ee2f 100644 --- a/src/core_ocean/analysis_members/mpas_ocn_zonal_mean.F +++ b/src/core_ocean/analysis_members/mpas_ocn_zonal_mean.F @@ -22,7 +22,6 @@ module ocn_zonal_mean use mpas_derived_types use mpas_pool_routines - use mpas_timer use mpas_dmpar use mpas_timekeeping use mpas_stream_manager @@ -46,8 +45,7 @@ module ocn_zonal_mean ! !-------------------------------------------------------------------- - public :: ocn_setup_packages_zonal_mean, & - ocn_init_zonal_mean, & + public :: ocn_init_zonal_mean, & ocn_compute_zonal_mean, & ocn_restart_zonal_mean, & ocn_finalize_zonal_mean @@ -58,9 +56,6 @@ module ocn_zonal_mean ! !-------------------------------------------------------------------- - integer :: nZonalMeanBinsUsed - type (timer_node), pointer :: amZonalMeanTimer - !*********************************************************************** contains @@ -72,7 +67,7 @@ module ocn_zonal_mean !> \brief Set up packages for MPAS-Ocean analysis member !> \author Mark Petersen !> \date November 2013 -!> \details +!> \details !> This routine is intended to configure the packages for this MPAS !> ocean analysis member ! @@ -107,14 +102,14 @@ subroutine ocn_setup_packages_zonal_mean(configPool, packagePool, err)!{{{ ! local variables ! !----------------------------------------------------------------- - logical, pointer :: amZonalMeanActive + logical, pointer :: zonalMeanAMActive err = 0 - call mpas_pool_get_package(packagePool, 'amZonalMeanActive', amZonalMeanActive) + call mpas_pool_get_package(packagePool, 'zonalMeanAMActive', zonalMeanAMActive) ! turn on package for this analysis member - amZonalMeanActive = .true. + zonalMeanAMActive = .true. end subroutine ocn_setup_packages_zonal_mean!}}} @@ -125,8 +120,8 @@ end subroutine ocn_setup_packages_zonal_mean!}}} !> \brief Initialize MPAS-Ocean analysis member !> \author Mark Petersen !> \date March 2014 -!> \details -!> This routine conducts all initializations required for the +!> \details +!> This routine conducts all initializations required for the !> MPAS-Ocean analysis member. ! !----------------------------------------------------------------------- @@ -163,7 +158,7 @@ subroutine ocn_init_zonal_mean(domain, err)!{{{ type (dm_info) :: dminfo type (block_type), pointer :: block - type (mpas_pool_type), pointer :: amZonalMeanPool + type (mpas_pool_type), pointer :: zonalMeanAMPool type (mpas_pool_type), pointer :: meshPool integer :: iBin @@ -174,8 +169,7 @@ subroutine ocn_init_zonal_mean(domain, err)!{{{ real (kind=RKIND), dimension(1) :: minBin, maxBin, minBinDomain, maxBinDomain real (kind=RKIND), dimension(:), pointer :: binCenterZonalMean, binBoundaryZonalMean, binVariable - integer, pointer :: config_number_zonal_mean_bins - real (kind=RKIND), pointer :: config_min_zonal_mean_bin, config_max_zonal_mean_bin + real (kind=RKIND), pointer :: config_AM_zonalMean_min_bin, config_AM_zonalMean_max_bin logical, pointer :: on_a_sphere @@ -183,20 +177,17 @@ subroutine ocn_init_zonal_mean(domain, err)!{{{ err = 0 - minBin = 1.0e34 - maxBin = -1.0e34 + minBin = 1.0e34_RKIND + maxBin = -1.0e34_RKIND call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nZonalMeanBins', nZonalMeanBins) - call mpas_pool_get_subpool(domain % blocklist % structs, 'amZonalMean', amZonalMeanPool) + call mpas_pool_get_subpool(domain % blocklist % structs, 'zonalMeanAM', zonalMeanAMPool) - call mpas_pool_get_config(domain % configs, 'config_number_zonal_mean_bins', config_number_zonal_mean_bins) - call mpas_pool_get_config(domain % configs, 'config_min_zonal_mean_bin', config_min_zonal_mean_bin) - call mpas_pool_get_config(domain % configs, 'config_max_zonal_mean_bin', config_max_zonal_mean_bin) + call mpas_pool_get_config(domain % configs, 'config_AM_zonalMean_min_bin', config_AM_zonalMean_min_bin) + call mpas_pool_get_config(domain % configs, 'config_AM_zonalMean_max_bin', config_AM_zonalMean_max_bin) - nZonalMeanBinsUsed = min( config_number_zonal_mean_bins, nZonalMeanBins ) - - call mpas_pool_get_array(amZonalMeanPool, 'binCenterZonalMean', binCenterZonalMean) - call mpas_pool_get_array(amZonalMeanPool, 'binBoundaryZonalMean', binBoundaryZonalMean) + call mpas_pool_get_array(zonalMeanAMPool, 'binCenterZonalMean', binCenterZonalMean) + call mpas_pool_get_array(zonalMeanAMPool, 'binBoundaryZonalMean', binBoundaryZonalMean) ! Find min and max values of binning variable. block => domain % blocklist @@ -209,7 +200,7 @@ subroutine ocn_init_zonal_mean(domain, err)!{{{ call mpas_pool_get_array(meshPool, 'latCell', binVariable) else call mpas_pool_get_array(meshPool, 'yCell', binVariable) - end if + end if minBin = min(minBin, minval(binVariable) ) maxBin = max(maxBin, maxval(binVariable) ) @@ -221,33 +212,33 @@ subroutine ocn_init_zonal_mean(domain, err)!{{{ call mpas_dmpar_max_real_array(dminfo, 1, maxBin, maxBinDomain) ! Set up bins. - binBoundaryZonalMean = -1.0e34 - binCenterZonalMean = -1.0e34 + binBoundaryZonalMean = -1.0e34_RKIND + binCenterZonalMean = -1.0e34_RKIND ! Change min and max bin bounds to configuration settings, if applicable. - if (config_min_zonal_mean_bin > -1.0e33) then - minBinDomain(1) = config_min_zonal_mean_bin + if (config_AM_zonalMean_min_bin > -1.0e33_RKIND) then + minBinDomain(1) = config_AM_zonalMean_min_bin else ! use measured min value, but decrease slightly to include least value. - minBinDomain(1) = minBinDomain(1) - 1.0e-10 * abs(minBinDomain(1)) + minBinDomain(1) = minBinDomain(1) - 1.0e-10_RKIND * abs(minBinDomain(1)) end if - if (config_max_zonal_mean_bin > -1.0e33) then - maxBinDomain(1) = config_max_zonal_mean_bin + if (config_AM_zonalMean_max_bin > -1.0e33_RKIND) then + maxBinDomain(1) = config_AM_zonalMean_max_bin else ! use measured max value, but increase slightly to include max value. - maxBinDomain(1) = maxBinDomain(1) + 1.0e-10 * abs(maxBinDomain(1)) + maxBinDomain(1) = maxBinDomain(1) + 1.0e-10_RKIND * abs(maxBinDomain(1)) end if binBoundaryZonalMean(1) = minBinDomain(1) - binWidth = (maxBinDomain(1) - minBinDomain(1)) / nZonalMeanBinsUsed + binWidth = (maxBinDomain(1) - minBinDomain(1)) / nZonalMeanBins - binCenterZonalMean(1) = minBinDomain(1) + binWidth/2.0 - do iBin = 2, nZonalMeanBinsUsed + binCenterZonalMean(1) = minBinDomain(1) + binWidth/2.0_RKIND + do iBin = 2, nZonalMeanBins binBoundaryZonalMean(iBin) = binBoundaryZonalMean(iBin-1) + binWidth binCenterZonalMean(iBin) = binCenterZonalMean(iBin-1) + binWidth end do - binBoundaryZonalMean(nZonalMeanBinsUsed+1) = binBoundaryZonalMean(nZonalMeanBinsUsed) + binWidth + binBoundaryZonalMean(nZonalMeanBins+1) = binBoundaryZonalMean(nZonalMeanBins) + binWidth end subroutine ocn_init_zonal_mean!}}} @@ -258,7 +249,7 @@ end subroutine ocn_init_zonal_mean!}}} !> \brief Compute MPAS-Ocean analysis member !> \author Mark Petersen !> \date March 2014 -!> \details +!> \details !> This routine conducts all computation required for this !> MPAS-Ocean analysis member. ! @@ -298,7 +289,7 @@ subroutine ocn_compute_zonal_mean(domain, timeLevel, err)!{{{ type (dm_info) :: dminfo type (block_type), pointer :: block - type (mpas_pool_type), pointer :: amZonalMeanPool + type (mpas_pool_type), pointer :: zonalMeanAMPool type (mpas_pool_type), pointer :: statePool type (mpas_pool_type), pointer :: meshPool type (mpas_pool_type), pointer :: scratchPool @@ -321,9 +312,7 @@ subroutine ocn_compute_zonal_mean(domain, timeLevel, err)!{{{ err = 0 dminfo = domain % dminfo - call mpas_timer_start("compute_zonal_mean", .false., amZonalMeanTimer) - - call mpas_pool_get_subpool(domain % blocklist % structs, 'amZonalMean', amZonalMeanPool) + call mpas_pool_get_subpool(domain % blocklist % structs, 'zonalMeanAM', zonalMeanAMPool) call mpas_pool_get_subpool(domain % blocklist % structs, 'state', statePool) call mpas_pool_get_subpool(domain % blocklist % structs, 'mesh', meshPool) @@ -333,13 +322,13 @@ subroutine ocn_compute_zonal_mean(domain, timeLevel, err)!{{{ call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nZonalMeanBins', nZonalMeanBins) call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nVertLevels', nVertLevels) - call mpas_pool_get_array(amZonalMeanPool, 'binBoundaryZonalMean', binBoundaryZonalMean) + call mpas_pool_get_array(zonalMeanAMPool, 'binBoundaryZonalMean', binBoundaryZonalMean) - allocate(sumZonalMean(nZonalMeanVariables,nVertLevels,nZonalMeanBinsUsed), & - totalSumZonalMean(nZonalMeanVariables,nVertLevels,nZonalMeanBinsUsed), & + allocate(sumZonalMean(nZonalMeanVariables,nVertLevels,nZonalMeanBins), & + totalSumZonalMean(nZonalMeanVariables,nVertLevels,nZonalMeanBins), & normZonalMean(nZonalMeanVariables,nVertLevels,nZonalMeanBins)) - sumZonalMean = 0.0 + sumZonalMean = 0.0_RKIND block => domain % blocklist do while (associated(block)) @@ -358,11 +347,11 @@ subroutine ocn_compute_zonal_mean(domain, timeLevel, err)!{{{ call mpas_pool_get_dimension(block % dimensions, 'nCellsSolve', nCellsSolve) - call mpas_pool_get_config(meshPool, 'on_a_sphere', on_a_sphere) + call mpas_pool_get_config(meshPool, 'on_a_sphere', on_a_sphere) - call mpas_pool_get_array(meshPool, 'areaCell', areaCell) - call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) - call mpas_pool_get_array(statePool, 'tracers', tracers,timeLevel) + call mpas_pool_get_array(meshPool, 'areaCell', areaCell) + call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) + call mpas_pool_get_array(statePool, 'tracers', tracers, timeLevel) call mpas_pool_get_array(diagnosticsPool, 'velocityZonal', velocityZonal) call mpas_pool_get_array(diagnosticsPool, 'velocityMeridional', velocityMeridional) @@ -371,7 +360,7 @@ subroutine ocn_compute_zonal_mean(domain, timeLevel, err)!{{{ call mpas_pool_get_array(meshPool, 'latCell', binVariable) else call mpas_pool_get_array(meshPool, 'yCell', binVariable) - end if + end if ! note that sum is for each vertical index, which is a little wrong for z-star and very wrong for PBCs. do iCell = 1, nCellsSolve @@ -379,7 +368,7 @@ subroutine ocn_compute_zonal_mean(domain, timeLevel, err)!{{{ if (binVariable(iCell) .lt. binBoundaryZonalMean(1)) cycle - do iBin = 1, nZonalMeanBinsUsed + do iBin = 1, nZonalMeanBins if (binVariable(iCell) .lt. binBoundaryZonalMean(iBin+1) ) then do k = 1, kMax @@ -408,24 +397,24 @@ subroutine ocn_compute_zonal_mean(domain, timeLevel, err)!{{{ end do ! mpi summation over all processors - call mpas_dmpar_sum_real_array(dminfo, nVertLevels*nZonalMeanBinsUsed*nZonalMeanVariables, sumZonalMean, totalSumZonalMean) + call mpas_dmpar_sum_real_array(dminfo, nVertLevels*nZonalMeanBins*nZonalMeanVariables, sumZonalMean, totalSumZonalMean) ! normalize by area - do iBin = 1, nZonalMeanBinsUsed + do iBin = 1, nZonalMeanBins do k = 1, nVertLevels ! Check if there is any area accumulated. If so, normalize by the area. - if (totalSumZonalMean(1,k,iBin) > 1.0e-12) then + if (totalSumZonalMean(1,k,iBin) > 1.0e-12_RKIND) then normZonalMean(:,k,iBin) = totalSumZonalMean(:,k,iBin) / totalSumZonalMean(1,k,iBin) else - normZonalMean(:,k,iBin) = -1.0e34 + normZonalMean(:,k,iBin) = -1.0e34_RKIND end if end do end do - do iBin = nZonalMeanBinsUsed + 1, nZonalMeanBins - normZonalMean(:,:,iBin) = -1.0e34 + do iBin = nZonalMeanBins + 1, nZonalMeanBins + normZonalMean(:,:,iBin) = -1.0e34_RKIND end do - ! Even though these variables do not include an index that is decomposed amongst + ! Even though these variables do not include an index that is decomposed amongst ! domain partitions, we assign them within a block loop so that all blocks have the ! correct values for writing output. block => domain % blocklist @@ -433,14 +422,14 @@ subroutine ocn_compute_zonal_mean(domain, timeLevel, err)!{{{ call mpas_pool_get_dimension(block % dimensions, 'nZonalMeanBins', nZonalMeanBins) call mpas_pool_get_dimension(block % dimensions, 'nVertLevels', nVertLevels) - call mpas_pool_get_subpool(block % structs, 'amZonalMean', amZonalMeanPool) + call mpas_pool_get_subpool(block % structs, 'zonalMeanAM', zonalMeanAMPool) call mpas_pool_get_subpool(block % structs, 'state', statePool) call mpas_pool_get_dimension(statePool, 'num_tracers', num_tracers) - call mpas_pool_get_array(amZonalMeanPool, 'tracersZonalMean', tracersZonalMean) - call mpas_pool_get_array(amZonalMeanPool, 'velocityZonalZonalMean', velocityZonalZonalMean) - call mpas_pool_get_array(amZonalMeanPool, 'velocityMeridionalZonalMean', velocityMeridionalZonalMean) + call mpas_pool_get_array(zonalMeanAMPool, 'tracersZonalMean', tracersZonalMean) + call mpas_pool_get_array(zonalMeanAMPool, 'velocityZonalZonalMean', velocityZonalZonalMean) + call mpas_pool_get_array(zonalMeanAMPool, 'velocityMeridionalZonalMean', velocityMeridionalZonalMean) do iBin = 1, nZonalMeanBins do k = 1, nVertLevels @@ -462,8 +451,6 @@ subroutine ocn_compute_zonal_mean(domain, timeLevel, err)!{{{ deallocate(sumZonalMean,totalSumZonalMean,normZonalMean) - call mpas_timer_stop("zonal_mean", amZonalMeanTimer) - end subroutine ocn_compute_zonal_mean!}}} !*********************************************************************** @@ -473,7 +460,7 @@ end subroutine ocn_compute_zonal_mean!}}} !> \brief Save restart for MPAS-Ocean analysis member !> \author Mark Petersen !> \date March 2014 -!> \details +!> \details !> This routine conducts computation required to save a restart state !> for the MPAS-Ocean analysis member. ! @@ -520,7 +507,7 @@ end subroutine ocn_restart_zonal_mean!}}} !> \brief Finalize MPAS-Ocean analysis member !> \author Mark Petersen !> \date March 2014 -!> \details +!> \details !> This routine conducts all finalizations required for this !> MPAS-Ocean analysis member. ! diff --git a/src/core_ocean/build_options.mk b/src/core_ocean/build_options.mk index 652f74856d..8c6c25a7c7 100644 --- a/src/core_ocean/build_options.mk +++ b/src/core_ocean/build_options.mk @@ -1,18 +1,10 @@ -PWD=$(shell pwd) -ifeq "$(MODE)" "analysis" - EXE_NAME=ocean_analysis_model - NAMELIST_SUFFIX=ocean_analysis - FCINCLUDES += -I$(PWD)/src/core_ocean/mode_analysis -I$(PWD)/src/core_ocean/shared -I$(PWD)/src/core_ocean/analysis_members -I$(PWD)/src/core_ocean/cvmix -else ifeq "$(MODE)" "forward" - EXE_NAME=ocean_forward_model - NAMELIST_SUFFIX=ocean_forward - FCINCLUDES += -I$(PWD)/src/core_ocean/mode_forward -I$(PWD)/src/core_ocean/shared -I$(PWD)/src/core_ocean/analysis_members -I$(PWD)/src/core_ocean/cvmix -else - EXE_NAME=ocean*_model - NAMELIST_SUFFIX=ocean* +ifeq "$(ROOT_DIR)" "" + ROOT_DIR=$(shell pwd)/src endif +EXE_NAME=ocean_model +NAMELIST_SUFFIX=ocean +FCINCLUDES += -I$(ROOT_DIR)/core_ocean/driver -I$(ROOT_DIR)/core_ocean/mode_forward -I$(ROOT_DIR)/core_ocean/mode_analysis -I$(ROOT_DIR)/core_ocean/shared -I$(ROOT_DIR)/core_ocean/analysis_members -I$(ROOT_DIR)/core_ocean/cvmix override CPPFLAGS += -DCORE_OCEAN report_builds: - @echo "CORE=ocean MODE=analysis" - @echo "CORE=ocean MODE=forward" + @echo "CORE=ocean" diff --git a/src/core_ocean/driver/Makefile b/src/core_ocean/driver/Makefile new file mode 100644 index 0000000000..4d5ad04589 --- /dev/null +++ b/src/core_ocean/driver/Makefile @@ -0,0 +1,24 @@ +.SUFFIXES: .F .o + +OBJS = mpas_ocn_core.o \ + mpas_ocn_core_interface.o + +all: core_ocean + +core_ocean: $(OBJS) + +mpas_ocn_core.o: + +mpas_ocn_core_interface.o: mpas_ocn_core.o + +clean: + $(RM) *.o *.mod *.f90 + +.F.o: + $(RM) $@ $*.mod +ifeq "$(GEN_F90)" "true" + $(CPP) $(CPPFLAGS) $(CPPINCLUDES) $< > $*.f90 + $(FC) $(FFLAGS) -c $*.f90 $(FCINCLUDES) +else + $(FC) $(CPPFLAGS) $(FFLAGS) -c $*.F $(CPPINCLUDES) $(FCINCLUDES) +endif diff --git a/src/core_ocean/driver/mpas_ocn_core.F b/src/core_ocean/driver/mpas_ocn_core.F new file mode 100644 index 0000000000..eedc9734c5 --- /dev/null +++ b/src/core_ocean/driver/mpas_ocn_core.F @@ -0,0 +1,141 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| +! +! ocn_core +! +!> \brief Main driver for MPAS ocean core +!> \author Doug Jacobsen, Mark Petersen, Todd Ringler +!> \date September 2011 +!> \details +!> This module contains initialization and timestep drivers for +!> the MPAS ocean core. +! +!----------------------------------------------------------------------- + +module ocn_core + + use mpas_kind_types + use mpas_derived_types + use mpas_pool_routines + use mpas_stream_manager + use mpas_timekeeping + use mpas_dmpar + use mpas_timer + use mpas_io_units + + use ocn_forward_mode + use ocn_analysis_mode + + implicit none + private + + public :: ocn_core_init, ocn_core_run, ocn_core_finalize + + contains + +!*********************************************************************** +! +! function ocn_core_init +! +!> \brief MPAS-Ocean Initialize Driver +!> \author Doug Jacobsen +!> \date 06/15/2015 +!> \details +!> This function is a driver function for the initialization of the different +!> run modes within MPAS-Ocean +! +!----------------------------------------------------------------------- + + function ocn_core_init(domain, startTimeStamp) result(ierr)!{{{ + + type (domain_type), intent(inout) :: domain + character(len=*), intent(out) :: startTimeStamp + integer :: ierr + + character (len=StrKIND), pointer :: config_ocean_run_mode + + ierr = 0 + + call mpas_pool_get_config(domain % configs, 'config_ocean_run_mode', config_ocean_run_mode) + + if ( trim(config_ocean_run_mode) == 'forward' ) then + ierr = ocn_forward_mode_init(domain, startTimeStamp) + else if ( trim(config_ocean_run_mode) == 'analysis' ) then + ierr = ocn_analysis_mode_init(domain, startTimeStamp) + end if + + end function ocn_core_init!}}} + +!*********************************************************************** +! +! function ocn_core_run +! +!> \brief MPAS-Ocean Run Driver +!> \author Doug Jacobsen +!> \date 06/15/2015 +!> \details +!> This function is a driver function for the run portion of the different +!> run modes within MPAS-Ocean. +! +!----------------------------------------------------------------------- + + function ocn_core_run(domain) result(iErr)!{{{ + + type (domain_type), intent(inout) :: domain + + integer :: iErr + + character(len=StrKIND), pointer :: config_ocean_run_mode + + iErr = 0 + + call mpas_pool_get_config(domain % configs, 'config_ocean_run_mode', config_ocean_run_mode) + + if ( trim(config_ocean_run_mode) == 'forward' ) then + ierr = ocn_forward_mode_run(domain) + else if ( trim(config_ocean_run_mode) == 'analysis' ) then + ierr = ocn_analysis_mode_run(domain) + end if + + end function ocn_core_run!}}} + +!*********************************************************************** +! +! function ocn_core_finalize +! +!> \brief MPAS-Ocean Finalize Driver +!> \author Doug Jacobsen +!> \date 06/15/2015 +!> \details +!> This function is a driver function for finalizing the different +!> run modes within MPAS-Ocean. +! +!----------------------------------------------------------------------- + function ocn_core_finalize(domain) result(ierr)!{{{ + + type (domain_type), intent(inout) :: domain + integer :: ierr + + character(len=StrKIND), pointer :: config_ocean_run_mode + + ierr = 0 + + call mpas_pool_get_config(domain % configs, 'config_ocean_run_mode', config_ocean_run_mode) + + if ( trim(config_ocean_run_mode) == 'forward' ) then + ierr = ocn_forward_mode_finalize(domain) + else if (trim(config_ocean_run_mode) == 'analysis' ) then + ierr = ocn_analysis_mode_finalize(domain) + end if + + end function ocn_core_finalize!}}} + +end module ocn_core + +! vim: foldmethod=marker diff --git a/src/core_ocean/mode_forward/mpas_ocn_core_interface.F b/src/core_ocean/driver/mpas_ocn_core_interface.F similarity index 72% rename from src/core_ocean/mode_forward/mpas_ocn_core_interface.F rename to src/core_ocean/driver/mpas_ocn_core_interface.F index 25c4c1fdeb..04e01260bb 100644 --- a/src/core_ocean/mode_forward/mpas_ocn_core_interface.F +++ b/src/core_ocean/driver/mpas_ocn_core_interface.F @@ -13,8 +13,14 @@ module ocn_core_interface use mpas_constants use mpas_io_units use ocn_core + use mpas_attlist - public + use ocn_forward_mode + use ocn_analysis_mode + + private + + public :: ocn_setup_core, ocn_setup_domain contains !*********************************************************************** @@ -42,6 +48,7 @@ subroutine ocn_setup_core(core)!{{{ core % get_mesh_stream => ocn_get_mesh_stream core % setup_immutable_streams => ocn_setup_immutable_streams core % setup_derived_dimensions => ocn_setup_derived_dimensions + core % setup_decomposed_dimensions => ocn_setup_decomposed_dimensions core % setup_block => ocn_setup_block core % setup_namelist => ocn_setup_namelists @@ -86,18 +93,19 @@ end subroutine ocn_setup_domain!}}} !> *not* allocated until after this routine is called. ! !----------------------------------------------------------------------- - function ocn_setup_packages(configPool, packagePool) result(ierr)!{{{ + function ocn_setup_packages(configPool, packagePool, iocontext) result(ierr)!{{{ - use mpas_derived_types use ocn_analysis_driver - implicit none - type (mpas_pool_type), intent(inout) :: configPool type (mpas_pool_type), intent(inout) :: packagePool + type (mpas_io_context_type), intent(inout) :: iocontext + integer :: ierr + integer :: err_tmp + logical, pointer :: forwardModeActive, analysisModeActive logical, pointer :: thicknessFilterActive logical, pointer :: splitTimeIntegratorActive logical, pointer :: surfaceRestoringActive @@ -107,10 +115,12 @@ function ocn_setup_packages(configPool, packagePool) result(ierr)!{{{ logical, pointer :: config_use_freq_filtered_thickness logical, pointer :: config_frazil_ice_formation - character (len=StrKIND), pointer :: config_time_integrator, config_forcing_type, config_pressure_gradient_type - - ierr = 0 + character (len=StrKIND), pointer :: config_time_integrator, config_forcing_type + character (len=StrKIND), pointer :: config_ocean_run_mode, config_pressure_gradient_type + ! Get Packages + call mpas_pool_get_package(packagePool, 'forwardModeActive', forwardModeActive) + call mpas_pool_get_package(packagePool, 'analysisModeActive', analysisModeActive) call mpas_pool_get_package(packagePool, 'thicknessFilterActive', thicknessFilterActive) call mpas_pool_get_package(packagePool, 'splitTimeIntegratorActive', splitTimeIntegratorActive) call mpas_pool_get_package(packagePool, 'surfaceRestoringActive', surfaceRestoringActive) @@ -118,39 +128,49 @@ function ocn_setup_packages(configPool, packagePool) result(ierr)!{{{ call mpas_pool_get_package(packagePool, 'frazilIceActive', frazilIceActive) call mpas_pool_get_package(packagePool, 'inSituEOSActive', inSituEOSActive) - call mpas_pool_get_config(configPool, 'config_use_freq_filtered_thickness', config_use_freq_filtered_thickness) - call mpas_pool_get_config(configPool, 'config_time_integrator', config_time_integrator) - call mpas_pool_get_config(configPool, 'config_forcing_type', config_forcing_type) - call mpas_pool_get_config(configPool, 'config_frazil_ice_formation', config_frazil_ice_formation) - call mpas_pool_get_config(configPool, 'config_pressure_gradient_type', config_pressure_gradient_type) + call mpas_pool_get_config(configPool, 'config_ocean_run_mode', config_ocean_run_mode) - if (config_use_freq_filtered_thickness) then - thicknessFilterActive = .true. - end if + ierr = 0 - if (config_time_integrator == trim('split_explicit') & - .or. config_time_integrator == trim('unsplit_explicit') ) then + if ( trim(config_ocean_run_mode) == 'forward' ) then + forwardModeActive = .true. - splitTimeIntegratorActive = .true. - end if + call mpas_pool_get_config(configPool, 'config_use_freq_filtered_thickness', config_use_freq_filtered_thickness) + call mpas_pool_get_config(configPool, 'config_time_integrator', config_time_integrator) + call mpas_pool_get_config(configPool, 'config_forcing_type', config_forcing_type) + call mpas_pool_get_config(configPool, 'config_frazil_ice_formation', config_frazil_ice_formation) + call mpas_pool_get_config(configPool, 'config_pressure_gradient_type', config_pressure_gradient_type) - if (config_forcing_type == trim('restoring')) then - surfaceRestoringActive = .true. - else if (config_forcing_type == trim('bulk')) then - bulkForcingActive = .true. - end if + if (config_use_freq_filtered_thickness) then + thicknessFilterActive = .true. + end if - if (config_frazil_ice_formation) then - frazilIceActive = .true. - end if + if (config_time_integrator == trim('split_explicit') & + .or. config_time_integrator == trim('unsplit_explicit') ) then - if (config_pressure_gradient_type.eq.'Jacobian_from_TS') then - inSituEOSActive = .true. - end if + splitTimeIntegratorActive = .true. + end if + + if (config_forcing_type == trim('restoring')) then + surfaceRestoringActive = .true. + else if (config_forcing_type == trim('bulk')) then + bulkForcingActive = .true. + end if - call ocn_analysis_setup_packages(configPool, packagePool, err_tmp) - ierr = ior(ierr, err_tmp) + if (config_frazil_ice_formation) then + frazilIceActive = .true. + end if + if (config_pressure_gradient_type.eq.'Jacobian_from_TS') then + inSituEOSActive = .true. + end if + + call ocn_analysis_setup_packages(configPool, packagePool, err_tmp) + ierr = ior(ierr, err_tmp) + else if (trim(config_ocean_run_mode) == 'analysis' ) then + analysisModeActive = .true. + call ocn_analysis_setup_packages(configPool, packagePool, ierr) + end if end function ocn_setup_packages!}}} @@ -221,7 +241,16 @@ function ocn_setup_clock(core_clock, configs) result(ierr)!{{{ type (mpas_pool_type), intent(inout) :: configs integer :: ierr - call ocn_simulation_clock_init(core_clock, configs, ierr) + character(len=StrKIND), pointer :: config_ocean_run_mode + + call mpas_pool_get_config(configs, 'config_ocean_run_mode', config_ocean_run_mode) + + if ( trim(config_ocean_run_mode) == 'forward' ) then + ierr = ocn_forward_mode_setup_clock(core_clock, configs) + else if ( trim(config_ocean_run_mode) == 'analysis' ) then + ierr = ocn_analysis_mode_setup_clock(core_clock, configs) + end if + end function ocn_setup_clock!}}} @@ -252,17 +281,14 @@ function ocn_get_mesh_stream(configs, stream) result(ierr)!{{{ integer :: ierr logical, pointer :: config_do_restart + character(len=StrKIND), pointer :: config_ocean_run_mode ierr = 0 - call mpas_pool_get_config(configs, 'config_do_restart', config_do_restart) + call mpas_pool_get_config(configs, 'config_ocean_run_mode', config_ocean_run_mode) - if (.not. associated(config_do_restart)) then - write(stream,'(a)') '' - else if (config_do_restart) then - write(stream,'(a)') 'restart' - else - write(stream,'(a)') 'input' + if ( trim(config_ocean_run_mode) == 'forward' .or. trim(config_ocean_run_mode) == 'analysis' ) then + write(stream,'(a)') 'mesh' end if end function ocn_get_mesh_stream!}}} diff --git a/src/core_ocean/get_cvmix.sh b/src/core_ocean/get_cvmix.sh index 96c50e161b..736a43623c 100755 --- a/src/core_ocean/get_cvmix.sh +++ b/src/core_ocean/get_cvmix.sh @@ -1,7 +1,7 @@ #!/bin/bash ## CVMix Tag for build -CVMIX_TAG=v0.60-beta +CVMIX_TAG=v0.64-beta ## Subdirectory in CVMix repo to use CVMIX_SUBDIR=src/shared diff --git a/src/core_ocean/mode_analysis/Makefile b/src/core_ocean/mode_analysis/Makefile index 2407f26332..fbf54f8431 100644 --- a/src/core_ocean/mode_analysis/Makefile +++ b/src/core_ocean/mode_analysis/Makefile @@ -1,16 +1,15 @@ .SUFFIXES: .F .o -OBJS = mpas_ocn_analysis_core.o \ - mpas_ocn_core_interface.o +OBJS = mpas_ocn_analysis_mode.o -all: $(OBJS) +all: analysis_mode -mpas_ocn_core_interface.o: mpas_ocn_analysis_core.o +analysis_mode: $(OBJS) -mpas_ocn_analysis_core.o: +mpas_ocn_analysis_mode.o: clean: - $(RM) *.o *.i *.mod *.f90 libdycore.a + $(RM) *.o *.i *.mod *.f90 .F.o: $(RM) $@ $*.mod diff --git a/src/core_ocean/mode_analysis/mpas_ocn_analysis_core.F b/src/core_ocean/mode_analysis/mpas_ocn_analysis_core.F deleted file mode 100644 index 690ca33826..0000000000 --- a/src/core_ocean/mode_analysis/mpas_ocn_analysis_core.F +++ /dev/null @@ -1,460 +0,0 @@ -! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) -! and the University Corporation for Atmospheric Research (UCAR). -! -! Unless noted otherwise source code is licensed under the BSD license. -! Additional copyright and license information can be found in the LICENSE file -! distributed with this code, or at http://mpas-dev.github.com/license.html -! -!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| -! -! ocn_core -! -!> \brief Main driver for MPAS ocean analysis core in post-processing mode -!> \author Mark Petersen -!> \date November 2013 -!> \details -!> This module contains the drivers for the MPAS ocean analysis core in -!> post-processing mode. None of these routines are used in run-time mode -!> by the ocean core. -! -!----------------------------------------------------------------------- - -module ocn_core - - use mpas_framework - use mpas_timekeeping - use mpas_dmpar - use mpas_timer - use mpas_io_units - - use ocn_analysis_driver - use ocn_init - use ocn_diagnostics - use ocn_equation_of_state - use ocn_constants - use ocn_time_average - - type (MPAS_Clock_type), pointer :: clock - - character(len=*), parameter :: statsAlarmID = 'stats' - character(len=*), parameter :: coupleAlarmID = 'couple' - - type (timer_node), pointer :: globalDiagTimer, timeIntTimer, testSuiteTimer - type (timer_node), pointer :: initDiagSolveTimer - - contains - -!*********************************************************************** -! -! routine ocn_core_init -! -!> \brief Initialize MPAS-Ocean analysis post-processing core -!> \author Mark Petersen -!> \date November 2013 -!> \details -!> This routine calls all initializations required to begin a -!> simulation with MPAS-Ocean -! -!----------------------------------------------------------------------- - - function ocn_core_init(domain, startTimeStamp) result(err)!{{{ - - use mpas_derived_types - use mpas_pool_routines - use mpas_stream_manager - - implicit none - - type (domain_type), intent(inout) :: domain - character(len=*), intent(out) :: startTimeStamp - - type (block_type), pointer :: block - type (dm_info) :: dminfo - type (mpas_pool_type), pointer :: diagnosticsPool - - integer :: err, err_tmp - - type (MPAS_timeInterval_type) :: timeStep - - ! remove dt later - real (kind=RKIND) :: dt - character (len=StrKIND), pointer :: xtime - type (MPAS_Time_Type) :: startTime - - err = 0 - - ! - ! Set "local" clock to point to the clock contained in the domain type - ! - clock => domain % clock - - ! - ! Set startTimeStamp based on the start time of the simulation clock - ! - startTime = mpas_get_clock_time(clock, MPAS_START_TIME, err_tmp) - call mpas_get_time(startTime, dateTimeString=startTimeStamp) - err = ior(err, err_tmp) - - ! Setup ocean config pool - call ocn_constants_init(domain % configs, domain % packages) - - ! - ! Read input data for model - ! - call MPAS_stream_mgr_read(domain % streamManager, streamID='input', ierr=err) - call MPAS_stream_mgr_reset_alarms(domain % streamManager, streamID='input', ierr=err) - - dminfo = domain % dminfo - err = 0 - - ! Initialize submodules before initializing blocks. - call ocn_equation_of_state_init(err_tmp) - err = ior(err, err_tmp) - - call ocn_analysis_init(domain, err_tmp) - err = ior(err, err_tmp) - - call ocn_init_vert_coord(domain) - - call ocn_compute_max_level(domain) - - timeStep = mpas_get_clock_timestep(clock, ierr=err_tmp) - call mpas_get_timeInterval(timeStep, dt=dt) - - block => domain % blocklist - do while (associated(block)) - call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) - call mpas_pool_get_array(diagnosticsPool, 'xtime', xtime) - - call mpas_init_block(block, dt, err) - if(err.eq.1) then - call mpas_dmpar_abort(dminfo) - endif - - xtime = startTimeStamp - block => block % next - end do - - end function ocn_core_init!}}} - -!*********************************************************************** -! -! routine mpas_simulation_clock_init -! -!> \brief Initialize timer variables -!> \author Mark Petersen -!> \date November 2013 -!> \details -!> This routine initializes all timer variables -! -!----------------------------------------------------------------------- - - subroutine ocn_simulation_clock_init(core_clock, configs, ierr)!{{{ - - implicit none - - type (MPAS_Clock_type), intent(inout) :: core_clock - type (mpas_pool_type), intent(inout) :: configs - integer, intent(out) :: ierr - - type (MPAS_Time_Type) :: startTime, stopTime, alarmStartTime - type (MPAS_TimeInterval_type) :: runDuration, timeStep, alarmTimeStep - character(len=StrKIND) :: restartTimeStamp - integer :: err_tmp - character (len=StrKIND), pointer :: config_start_time, config_stop_time - character (len=StrKIND), pointer :: config_run_duration - character (len=StrKIND), pointer :: config_stats_interval, config_dt, config_restart_timestamp_name - - ierr = 0 - - call mpas_pool_get_config(configs, 'config_dt', config_dt) - call mpas_pool_get_config(configs, 'config_start_time', config_start_time) - call mpas_pool_get_config(configs, 'config_stop_time', config_stop_time) - call mpas_pool_get_config(configs, 'config_run_duration', config_run_duration) - call mpas_pool_get_config(configs, 'config_stats_interval', config_stats_interval) - call mpas_pool_get_config(configs, 'config_restart_timestamp_name', config_restart_timestamp_name) - - if ( trim(config_start_time) == "file" ) then - open(22,file=config_restart_timestamp_name,form='formatted',status='old') - read(22,*) restartTimeStamp - close(22) - call mpas_set_time(curr_time=startTime, dateTimeString=restartTimeStamp, ierr=err_tmp) - else - call mpas_set_time(curr_time=startTime, dateTimeString=config_start_time, ierr=err_tmp) - end if - - call mpas_set_timeInterval(timeStep, timeString=config_dt, ierr=err_tmp) - if (trim(config_run_duration) /= "none") then - call mpas_set_timeInterval(runDuration, timeString=config_run_duration, ierr=err_tmp) - call mpas_create_clock(core_clock, startTime=startTime, timeStep=timeStep, runDuration=runDuration, ierr=err_tmp) - - if (trim(config_stop_time) /= "none") then - call mpas_set_time(curr_time=stopTime, dateTimeString=config_stop_time, ierr=err_tmp) - if(startTime + runduration /= stopTime) then - write(stderrUnit,*) 'Warning: config_run_duration and config_stop_time are inconsitent: using config_run_duration.' - end if - end if - else if (trim(config_stop_time) /= "none") then - call mpas_set_time(curr_time=stopTime, dateTimeString=config_stop_time, ierr=err_tmp) - call mpas_create_clock(core_clock, startTime=startTime, timeStep=timeStep, stopTime=stopTime, ierr=err_tmp) - else - write(stderrUnit,*) 'Error: Neither config_run_duration nor config_stop_time were specified.' - ierr = 1 - end if - - if (trim(config_stats_interval) /= "none") then - call mpas_set_timeInterval(alarmTimeStep, timeString=config_stats_interval, ierr=err_tmp) - alarmStartTime = startTime + alarmTimeStep - call mpas_add_clock_alarm(core_clock, statsAlarmID, alarmStartTime, alarmTimeStep, ierr=err_tmp) - end if - - end subroutine ocn_simulation_clock_init!}}} - -!*********************************************************************** -! -! routine mpas_init_block -! -!> \brief Initialize blocks within MPAS-Ocean analysis post-processing core -!> \author Mark Petersen -!> \date November 2013 -!> \details -!> This routine calls all block-level initializations required to begin a -!> simulation with MPAS-Ocean -! -!----------------------------------------------------------------------- - subroutine mpas_init_block(block, dt, err)!{{{ - - use mpas_derived_types - use mpas_pool_routines - use mpas_rbf_interpolation - use mpas_vector_operations - use mpas_vector_reconstruction - use mpas_tracer_advection_helpers - - implicit none - - type (block_type), intent(inout) :: block - real (kind=RKIND), intent(in) :: dt - integer, intent(out) :: err - - type (mpas_pool_type), pointer :: meshPool, averagePool, statePool - type (mpas_pool_type), pointer :: forcingPool, diagnosticsPool, scratchPool - integer :: i, iEdge, iCell, k - integer :: err1 - - integer, dimension(:), pointer :: nAdvCellsForEdge, maxLevelCell - integer, dimension(:), pointer :: maxLevelEdgeBot, maxLevelEdgeTop - integer, dimension(:,:), pointer :: advCellsForEdge, highOrderAdvectionMask, boundaryCell - real (kind=RKIND), dimension(:), pointer :: areaCell - real (kind=RKIND), dimension(:,:), pointer :: advCoefs, advCoefs3rd, normalTransportVelocity - real (kind=RKIND), dimension(:,:), pointer :: layerThickness - real (kind=RKIND), dimension(:,:), pointer :: normalVelocity, normalGMBolusVelocity, edgeTangentVectors - real (kind=RKIND), dimension(:,:), pointer :: velocityX, velocityY, velocityZ - real (kind=RKIND), dimension(:,:), pointer :: velocityZonal, velocityMeridional - real (kind=RKIND), dimension(:,:,:), pointer :: derivTwo - - real (kind=RKIND), dimension(:,:,:), pointer :: tracers - - integer, pointer :: nCells, nEdges, nVertices, nVertLevels - integer, pointer :: config_horiz_tracer_adv_order - logical, pointer :: config_hmix_scaleWithMesh - logical, pointer :: config_use_standardGM - real (kind=RKIND), pointer :: config_maxMeshDensity - - call mpas_pool_get_dimension(block % dimensions, 'nCells', nCells) - call mpas_pool_get_dimension(block % dimensions, 'nEdges', nEdges) - call mpas_pool_get_dimension(block % dimensions, 'nVertices', nVertices) - call mpas_pool_get_dimension(block % dimensions, 'nVertLevels', nVertLevels) - - call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) - call mpas_pool_get_subpool(block % structs, 'state', statePool) - call mpas_pool_get_subpool(block % structs, 'forcing', forcingPool) - call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) - call mpas_pool_get_subpool(block % structs, 'scratch', scratchPool) - call mpas_pool_get_subpool(block % structs, 'average', averagePool) - - call mpas_pool_get_array(meshPool, 'derivTwo', derivTwo) - call mpas_pool_get_array(meshPool, 'advCoefs', advCoefs) - call mpas_pool_get_array(meshPool, 'advCoefs3rd', advCoefs3rd) - call mpas_pool_get_array(meshPool, 'nAdvCellsForEdge', nAdvCellsForEdge) - call mpas_pool_get_array(meshPool, 'advCellsForEdge', advCellsForEdge) - call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) - call mpas_pool_get_array(meshPool, 'highOrderAdvectionMask', highOrderAdvectionMask) - call mpas_pool_get_array(meshPool, 'boundaryCell', boundaryCell) - call mpas_pool_get_array(meshPool, 'edgeTangentVectors', edgeTangentVectors) - call mpas_pool_get_array(meshPool, 'areaCell', areaCell) - call mpas_pool_get_array(meshPool, 'boundaryCell', boundaryCell) - call mpas_pool_get_array(meshPool, 'maxLevelEdgeBot', maxLevelEdgeBot) - call mpas_pool_get_array(meshPool, 'maxLevelEdgeTop', maxLevelEdgeTop) - - call mpas_pool_get_array(diagnosticsPool, 'normalTransportVelocity', normalTransportVelocity) - call mpas_pool_get_array(diagnosticsPool, 'normalGMBolusVelocity', normalGMBolusVelocity) - call mpas_pool_get_array(diagnosticsPool, 'velocityX', velocityX) - call mpas_pool_get_array(diagnosticsPool, 'velocityY', velocityY) - call mpas_pool_get_array(diagnosticsPool, 'velocityZ', velocityZ) - call mpas_pool_get_array(diagnosticsPool, 'velocityZonal', velocityZonal) - call mpas_pool_get_array(diagnosticsPool, 'velocityMeridional', velocityMeridional) - - call mpas_pool_get_array(statePool, 'normalVelocity', normalVelocity, 1) - call mpas_pool_get_array(statePool, 'layerThickness', layerThickness, 1) - call mpas_pool_get_array(statePool, 'tracers', tracers, 1) - - call mpas_pool_get_config(block % configs, 'config_horiz_tracer_adv_order', config_horiz_tracer_adv_order) - call mpas_pool_get_config(block % configs, 'config_hmix_scaleWithMesh', config_hmix_scaleWithMesh) - call mpas_pool_get_config(block % configs, 'config_maxMeshDensity', config_maxMeshDensity) - call mpas_pool_get_config(block % configs, 'config_use_standardGM', config_use_standardGM) - call ocn_setup_sign_and_index_fields(meshPool) - call mpas_initialize_deriv_two(meshPool, derivTwo, err) - call mpas_tracer_advection_coefficients(meshPool, & - config_horiz_tracer_adv_order, derivTwo, advCoefs, & - advCoefs3rd, nAdvCellsForEdge, advCellsForEdge, & - err1, maxLevelCell, highOrderAdvectionMask, & - boundaryCell) - err = ior(err, err1) - - call ocn_time_average_init(averagePool) - - call mpas_timer_start("diagnostic solve", .false., initDiagSolveTimer) - call ocn_diagnostic_solve(dt, statePool, forcingPool, meshPool, diagnosticsPool, scratchPool) - call mpas_timer_stop("diagnostic solve", initDiagSolveTimer) - - ! Compute velocity transport, used in advection terms of layerThickness and tracer tendency - normalTransportVelocity(:,:) = normalVelocity(:,:) + normalGMBolusVelocity(:,:) - - call ocn_compute_mesh_scaling(meshPool, config_hmix_scaleWithMesh, config_maxMeshDensity) - - call mpas_rbf_interp_initialize(meshPool) - call mpas_initialize_tangent_vectors(meshPool, edgeTangentVectors) - - call mpas_init_reconstruct(meshPool) - call mpas_reconstruct(meshPool, normalVelocity, & - velocityX, & - velocityY, & - velocityZ, & - velocityZonal, & - velocityMeridional & - ) - - if (config_use_standardGM) then - call ocn_reconstruct_gm_vectors(diagnosticsPool, meshPool) - end if - - ! initialize velocities and tracers on land to be zero. - areaCell(nCells+1) = -1.0e34 - - layerThickness(:, nCells+1) = 0.0 - - do iEdge=1, nEdges - normalVelocity(maxLevelEdgeTop(iEdge)+1:maxLevelEdgeBot(iEdge), iEdge) = 0.0 - - normalVelocity(maxLevelEdgeBot(iEdge)+1:nVertLevels,iEdge) = -1.0e34 - end do - - do iCell=1,nCells - tracers(:, maxLevelCell(iCell)+1:nVertLevels,iCell) = -1.0e34 - end do - - call mpas_pool_initialize_time_levels(statePool) - - end subroutine mpas_init_block!}}} - -!*********************************************************************** -! -! routine ocn_core_run -! -!> \brief Main driver for MPAS-Ocean time-stepping -!> \author Mark Petersen -!> \date November 2013 -!> \details -!> This routine includes the time-stepping loop, and calls timer -!> routines to write output and restart files. -! -!----------------------------------------------------------------------- - - function ocn_core_run(domain) result(err)!{{{ - - use mpas_kind_types - use mpas_stream_manager - use mpas_derived_types - use mpas_pool_routines - use mpas_timer - - implicit none - - type (domain_type), intent(inout) :: domain - - integer :: itimestep - real (kind=RKIND) :: dt - type (block_type), pointer :: block_ptr - - type (MPAS_Time_Type) :: currTime - character(len=StrKIND) :: timeStamp - integer :: err, ierr - - type (mpas_pool_type), pointer :: statePool - type (mpas_pool_type), pointer :: forcingPool - type (mpas_pool_type), pointer :: meshPool - type (mpas_pool_type), pointer :: diagnosticsPool - type (mpas_pool_type), pointer :: scratchPool - - type (MPAS_timeInterval_type) :: timeStep - character (len=StrKIND), pointer :: config_dt - logical, pointer :: config_write_output_on_startup - - err = 0 - - call mpas_pool_get_config(ocnConfigs, 'config_dt', config_dt) - call mpas_pool_get_config(ocnConfigs, 'config_write_output_on_startup', config_write_output_on_startup) - - timeStep = mpas_get_clock_timestep(clock, ierr=ierr) - call mpas_get_timeInterval(timeStep, dt=dt) - - currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr) - call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr) - write(stderrUnit,*) 'Initial time ', trim(timeStamp) - - ! fill in diagnostics variables - call mpas_timer_start("diagnostic solve", .false., initDiagSolveTimer) - block_ptr => domain % blocklist - do while(associated(block_ptr)) - call mpas_pool_get_subpool(block_ptr % structs, 'state', statePool) - call mpas_pool_get_subpool(block_ptr % structs, 'forcing', forcingPool) - call mpas_pool_get_subpool(block_ptr % structs, 'mesh', meshPool) - call mpas_pool_get_subpool(block_ptr % structs, 'diagnostics', diagnosticsPool) - call mpas_pool_get_subpool(block_ptr % structs, 'scratch', scratchPool) - - call ocn_diagnostic_solve(dt, statePool, forcingPool, meshPool, diagnosticsPool, scratchPool, 1) - block_ptr => block_ptr % next - end do - call mpas_timer_stop("diagnostic solve", initDiagSolveTimer) - - if (config_write_output_on_startup) then - call ocn_analysis_compute(domain, err) - call mpas_stream_mgr_write(domain % streamManager, ierr=ierr) - endif - - err = iErr - - end function ocn_core_run!}}} - - function ocn_core_finalize(domain) result(iErr)!{{{ - - use mpas_derived_types - use mpas_decomp - - implicit none - - type (domain_type), intent(inout) :: domain - integer :: iErr - - iErr = 0 - - call mpas_destroy_clock(clock, ierr) - - call mpas_decomp_destroy_decomp_list(domain % decompositions) - - end function ocn_core_finalize!}}} - -end module ocn_core - -! vim: foldmethod=marker diff --git a/src/core_ocean/mode_analysis/mpas_ocn_analysis_mode.F b/src/core_ocean/mode_analysis/mpas_ocn_analysis_mode.F new file mode 100644 index 0000000000..25abc2616f --- /dev/null +++ b/src/core_ocean/mode_analysis/mpas_ocn_analysis_mode.F @@ -0,0 +1,281 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| +! +! ocn_analysis_mode +! +!> \brief Main driver for MPAS ocean analysis core in post-processing mode +!> \author Mark Petersen +!> \date November 2013 +!> \details +!> This module contains the drivers for the MPAS ocean analysis core in +!> post-processing mode. None of these routines are used in run-time mode +!> by the ocean core. +! +!----------------------------------------------------------------------- + +module ocn_analysis_mode + + use mpas_kind_types + use mpas_derived_types + use mpas_pool_routines + use mpas_stream_manager + use mpas_timekeeping + use mpas_dmpar + use mpas_timer + use mpas_io_units + use mpas_decomp + use mpas_abort, only : mpas_dmpar_global_abort + + use ocn_analysis_driver + use ocn_init_routines + use ocn_diagnostics + use ocn_equation_of_state + use ocn_constants + use ocn_time_average + + private + + public :: ocn_analysis_mode_init, ocn_analysis_mode_run, ocn_analysis_mode_finalize + public :: ocn_analysis_mode_setup_clock + + contains + +!*********************************************************************** +! +! function ocn_analysis_mode_init +! +!> \brief Initialize MPAS-Ocean analysis post-processing core +!> \author Mark Petersen +!> \date November 2013 +!> \details +!> This function calls all initializations required to begin a +!> simulation with MPAS-Ocean in analysis mode. +! +!----------------------------------------------------------------------- + + function ocn_analysis_mode_init(domain, startTimeStamp) result(ierr)!{{{ + + type (domain_type), intent(inout) :: domain + character(len=*), intent(out) :: startTimeStamp + integer :: ierr + + type (block_type), pointer :: block + type (mpas_pool_type), pointer :: diagnosticsPool + + integer :: err_tmp + + type (MPAS_timeInterval_type) :: timeStep + + ! remove dt later + real (kind=RKIND) :: dt + character (len=StrKIND), pointer :: xtime + type (MPAS_Time_Type) :: startTime + + ierr = 0 + + ! + ! Set startTimeStamp based on the start time of the simulation clock + ! + startTime = mpas_get_clock_time(domain % clock, MPAS_START_TIME, err_tmp) + call mpas_get_time(startTime, dateTimeString=startTimeStamp) + ierr = ior(ierr, err_tmp) + + ! Setup ocean config pool + call ocn_constants_init(domain % configs, domain % packages) + + ! + ! Read input data for model + ! + call mpas_timer_start('io_read', .false.) + call MPAS_stream_mgr_read(domain % streamManager, streamID='mesh', whence=MPAS_STREAM_NEAREST, ierr=err_tmp) + call MPAS_stream_mgr_read(domain % streamManager, streamID='input', ierr=err_tmp) + call mpas_timer_stop('io_read') + call mpas_timer_start('io_reset_alarms', .false.) + call MPAS_stream_mgr_reset_alarms(domain % streamManager, streamID='input', ierr=err_tmp) + call mpas_timer_stop('io_reset_alarms') + + ! Initialize submodules before initializing blocks. + call ocn_equation_of_state_init(err_tmp) + ierr = ior(ierr, err_tmp) + + call ocn_analysis_init(domain, err_tmp) + ierr = ior(ierr, err_tmp) + + call ocn_init_routines_vert_coord(domain) + + call ocn_init_routines_compute_max_level(domain) + + timeStep = mpas_get_clock_timestep(domain % clock, ierr=err_tmp) + call mpas_get_timeInterval(timeStep, dt=dt) + + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) + call mpas_pool_get_array(diagnosticsPool, 'xtime', xtime) + + call ocn_init_routines_block(block, dt, ierr) + if(ierr.eq.1) then + call mpas_dmpar_global_abort('ERROR: An error was encountered in ocn_init_routines_block') + endif + + xtime = startTimeStamp + block => block % next + end do + + end function ocn_analysis_mode_init!}}} + +!*********************************************************************** +! +! routine ocn_analysis_mode_setup_clock +! +!> \brief Initialize timer variables +!> \author Mark Petersen +!> \date November 2013 +!> \details +!> This routine initializes all timer variables for analysis mode +! +!----------------------------------------------------------------------- + + function ocn_analysis_mode_setup_clock(core_clock, configs) result(ierr)!{{{ + + implicit none + + type (MPAS_Clock_type), intent(inout) :: core_clock + type (mpas_pool_type), intent(inout) :: configs + integer :: ierr + + type (MPAS_Time_Type) :: startTime, stopTime, alarmStartTime + type (MPAS_TimeInterval_type) :: runDuration, timeStep, alarmTimeStep + character(len=StrKIND) :: restartTimeStamp + integer :: err_tmp + character (len=StrKIND), pointer :: config_start_time, config_stop_time + character (len=StrKIND), pointer :: config_run_duration + character (len=StrKIND), pointer :: config_dt, config_restart_timestamp_name + + ierr = 0 + + call mpas_pool_get_config(configs, 'config_dt', config_dt) + call mpas_pool_get_config(configs, 'config_start_time', config_start_time) + call mpas_pool_get_config(configs, 'config_stop_time', config_stop_time) + call mpas_pool_get_config(configs, 'config_run_duration', config_run_duration) + call mpas_pool_get_config(configs, 'config_restart_timestamp_name', config_restart_timestamp_name) + + if ( trim(config_start_time) == "file" ) then + open(22,file=config_restart_timestamp_name,form='formatted',status='old') + read(22,*) restartTimeStamp + close(22) + call mpas_set_time(curr_time=startTime, dateTimeString=restartTimeStamp, ierr=err_tmp) + else + call mpas_set_time(curr_time=startTime, dateTimeString=config_start_time, ierr=err_tmp) + end if + + call mpas_set_timeInterval(timeStep, timeString="0000_00:00:00", ierr=err_tmp) + call mpas_set_timeInterval(runDuration, timeString="0000_00:00:00", ierr=err_tmp) + call mpas_create_clock(core_clock, startTime=startTime, timeStep=timeStep, runDuration=runDuration, ierr=err_tmp) + + end function ocn_analysis_mode_setup_clock!}}} + +!*********************************************************************** +! +! function ocn_analysis_mode_run +! +!> \brief Main driver for MPAS-Ocean analysis mode +!> \author Mark Petersen +!> \date November 2013 +!> \details +!> This function includes the calls to perform analysis of the input file. +! +!----------------------------------------------------------------------- + + function ocn_analysis_mode_run(domain) result(ierr)!{{{ + + type (domain_type), intent(inout) :: domain + + integer :: itimestep + real (kind=RKIND) :: dt + type (block_type), pointer :: block_ptr + + type (MPAS_Time_Type) :: currTime + character(len=StrKIND) :: timeStamp + integer :: err, ierr + + type (mpas_pool_type), pointer :: statePool + type (mpas_pool_type), pointer :: forcingPool + type (mpas_pool_type), pointer :: meshPool + type (mpas_pool_type), pointer :: diagnosticsPool + type (mpas_pool_type), pointer :: scratchPool + + type (MPAS_timeInterval_type) :: timeStep + character (len=StrKIND), pointer :: config_dt + logical, pointer :: config_write_output_on_startup + + ierr = 0 + + call mpas_pool_get_config(ocnConfigs, 'config_dt', config_dt) + call mpas_pool_get_config(ocnConfigs, 'config_write_output_on_startup', config_write_output_on_startup) + + timeStep = mpas_get_clock_timestep(domain % clock, ierr=ierr) + call mpas_get_timeInterval(timeStep, dt=dt) + + currTime = mpas_get_clock_time(domain % clock, MPAS_NOW, ierr) + call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr) + write(stderrUnit,*) 'Initial time ', trim(timeStamp) + + ! fill in diagnostics variables + call mpas_timer_start("diagnostic solve") + block_ptr => domain % blocklist + do while(associated(block_ptr)) + call mpas_pool_get_subpool(block_ptr % structs, 'state', statePool) + call mpas_pool_get_subpool(block_ptr % structs, 'forcing', forcingPool) + call mpas_pool_get_subpool(block_ptr % structs, 'mesh', meshPool) + call mpas_pool_get_subpool(block_ptr % structs, 'diagnostics', diagnosticsPool) + call mpas_pool_get_subpool(block_ptr % structs, 'scratch', scratchPool) + + call ocn_diagnostic_solve(dt, statePool, forcingPool, meshPool, diagnosticsPool, scratchPool, 1) + block_ptr => block_ptr % next + end do + call mpas_timer_stop("diagnostic solve") + + if (config_write_output_on_startup) then + call ocn_analysis_compute_startup(domain, err) + call mpas_timer_start('io_write', .false.) + call mpas_stream_mgr_write(domain % streamManager, ierr=ierr) + call mpas_timer_stop('io_write') + endif + + end function ocn_analysis_mode_run!}}} + +!*********************************************************************** +! +! function ocn_analysis_mode_finalize +! +!> \brief Finalize MPAS-Ocean analysis mode +!> \author Mark Petersen +!> \date November 2013 +!> \details +!> This function finalizes the MPAS-Ocean core that was run with analysis mode. +! +!----------------------------------------------------------------------- + + function ocn_analysis_mode_finalize(domain) result(iErr)!{{{ + + type (domain_type), intent(inout) :: domain + integer :: ierr + + iErr = 0 + + call mpas_destroy_clock(domain % clock, ierr) + + call mpas_decomp_destroy_decomp_list(domain % decompositions) + + end function ocn_analysis_mode_finalize!}}} + +end module ocn_analysis_mode + +! vim: foldmethod=marker diff --git a/src/core_ocean/mode_analysis/mpas_ocn_core_interface.F b/src/core_ocean/mode_analysis/mpas_ocn_core_interface.F deleted file mode 100644 index 663042c890..0000000000 --- a/src/core_ocean/mode_analysis/mpas_ocn_core_interface.F +++ /dev/null @@ -1,256 +0,0 @@ -! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) -! and the University Corporation for Atmospheric Research (UCAR). -! -! Unless noted otherwise source code is licensed under the BSD license. -! Additional copyright and license information can be found in the LICENSE file -! distributed with this code, or at http://mpas-dev.github.com/license.html -! -module ocn_core_interface - - use mpas_derived_types - use mpas_pool_routines - use mpas_dmpar - use mpas_constants - use mpas_io_units - use ocn_core - - public - - contains - !*********************************************************************** - ! - ! routine ocn_setup_core - ! - !> \brief Ocean core setup routine - !> \author Doug Jacobsen - !> \date 03/18/2015 - !> \details - !> This routine is intended to setup the necessary variables within a core_type - !> for the ocean core. - ! - !----------------------------------------------------------------------- - subroutine ocn_setup_core(core)!{{{ - type (core_type), pointer :: core - - core % core_init => ocn_core_init - core % core_run => ocn_core_run - core % core_finalize => ocn_core_finalize - core % define_packages => ocn_define_packages - core % setup_packages => ocn_setup_packages - core % setup_decompositions => ocn_setup_decompositions - core % setup_clock => ocn_setup_clock - core % get_mesh_stream => ocn_get_mesh_stream - core % setup_immutable_streams => ocn_setup_immutable_streams - core % setup_derived_dimensions => ocn_setup_derived_dimensions - core % setup_block => ocn_setup_block - core % setup_namelist => ocn_setup_namelists - - core % Conventions = 'MPAS' - core % source = 'MPAS' -#include "../inc/core_variables.inc" - - end subroutine ocn_setup_core!}}} - - - !*********************************************************************** - ! - ! routine ocn_setup_domain - ! - !> \brief Ocean domain setup routine - !> \author Doug Jacobsen - !> \date 03/18/2015 - !> \details - !> This routine is intended to setup the necessary variables within a domain_type - !> for the ocean core. - ! - !----------------------------------------------------------------------- - subroutine ocn_setup_domain(domain)!{{{ - type (domain_type), pointer :: domain - -#include "../inc/domain_variables.inc" - - end subroutine ocn_setup_domain!}}} - - - !*********************************************************************** - ! - ! function ocn_setup_packages - ! - !> \brief Pacakge setup routine - !> \author Doug Jacobsen - !> \date 03/12/2015 - !> \details - !> This function is intended to correctly configure the packages for this MPAS - !> core. It can use any Fortran logic to properly configure packages, and it - !> can also make use of any namelist options. All variables in the model are - !> *not* allocated until after this routine is called. - ! - !----------------------------------------------------------------------- - function ocn_setup_packages(configPool, packagePool) result(ierr)!{{{ - - use mpas_derived_types - use ocn_analysis_driver - - implicit none - - type (mpas_pool_type), intent(inout) :: configPool - type (mpas_pool_type), intent(inout) :: packagePool - integer :: ierr - integer :: err_tmp - - ierr = 0 - - call ocn_analysis_setup_packages(configPool, packagePool, err_tmp) - ierr = ior(ierr, err_tmp) - - end function ocn_setup_packages!}}} - - - !*********************************************************************** - ! - ! routine ocn_setup_decompositions - ! - !> \brief Decomposition setup routine - !> \author Doug Jacobsen - !> \date 04/08/2015 - !> \details - !> This routine is intended to create the decomposition list within a - !> domain type, and register any decompositons the core wants within it. - ! - !----------------------------------------------------------------------- - function ocn_setup_decompositions(decompList) result(ierr)!{{{ - - use mpas_derived_types - use mpas_decomp - - implicit none - - type (mpas_decomp_list), pointer :: decompList - - integer :: ierr - procedure (mpas_decomp_function), pointer :: decompFunc - - ierr = 0 - - call mpas_decomp_create_decomp_list(decompList) - - decompFunc => mpas_uniform_decomp - - call mpas_decomp_register_method(decompList, 'uniform', decompFunc, iErr) - - if ( iErr == MPAS_DECOMP_NOERR ) then - iErr = 0 - end if - - end function ocn_setup_decompositions!}}} - - - !*********************************************************************** - ! - ! function ocn_setup_clock - ! - !> \brief Pacakge setup routine - !> \author Michael Duda - !> \date 6 August 2014 - !> \details - !> The purpose of this function is to allow the core to set up a simulation - !> clock that will be used by the I/O subsystem for timing reads and writes - !> of I/O streams. - !> This function is called from the superstructure after the framework - !> has been initialized but before any fields have been allocated and - !> initial fields have been read from input files. However, all namelist - !> options are available. - ! - !----------------------------------------------------------------------- - function ocn_setup_clock(core_clock, configs) result(ierr)!{{{ - - use mpas_derived_types - - implicit none - - type (MPAS_Clock_type), intent(inout) :: core_clock - type (mpas_pool_type), intent(inout) :: configs - integer :: ierr - - call ocn_simulation_clock_init(core_clock, configs, ierr) - - end function ocn_setup_clock!}}} - - - !*********************************************************************** - ! - ! function ocn_get_mesh_stream - ! - !> \brief Returns the name of the stream containing mesh information - !> \author Michael Duda - !> \date 8 August 2014 - !> \details - !> This function returns the name of the I/O stream containing dimensions, - !> attributes, and mesh fields needed by the framework bootstrapping - !> routine. At the time this routine is called, only namelist options - !> are available. - ! - !----------------------------------------------------------------------- - function ocn_get_mesh_stream(configs, stream) result(ierr)!{{{ - - use mpas_derived_types - use mpas_pool_routines - - implicit none - - type (mpas_pool_type), intent(inout) :: configs - character(len=StrKIND), intent(out) :: stream - integer :: ierr - - logical, pointer :: config_do_restart - - ierr = 0 - - call mpas_pool_get_config(configs, 'config_do_restart', config_do_restart) - - if (.not. associated(config_do_restart)) then - write(stream,'(a)') '' - else if (config_do_restart) then - write(stream,'(a)') 'restart' - else - write(stream,'(a)') 'input' - end if - - end function ocn_get_mesh_stream!}}} - - - !*********************************************************************** - ! - ! function ocn_setup_block - ! - !> \brief Ocean block setup function - !> \author Doug Jacobsen - !> \date 03/18/2015 - !> \details - !> This function is a wrapper function to properly setup a block to be a - !> ocean core block. - ! - !----------------------------------------------------------------------- - function ocn_setup_block(block) result(iErr)!{{{ - use mpas_derived_types - type (block_type), pointer :: block - integer :: iErr - - iErr = 0 - call ocn_generate_structs(block, block % structs, block % dimensions, block % packages) - end function ocn_setup_block!}}} - -#include "../inc/setup_immutable_streams.inc" - -#include "../inc/block_dimension_routines.inc" - -#include "../inc/define_packages.inc" - -#include "../inc/structs_and_variables.inc" - -#include "../inc/namelist_call.inc" - -#include "../inc/namelist_defines.inc" - -end module ocn_core_interface - diff --git a/src/core_ocean/mode_forward/Makefile b/src/core_ocean/mode_forward/Makefile index eb7f868c1f..5a1ceb6542 100644 --- a/src/core_ocean/mode_forward/Makefile +++ b/src/core_ocean/mode_forward/Makefile @@ -1,16 +1,13 @@ .SUFFIXES: .F .o -OBJS = mpas_ocn_core.o \ - mpas_ocn_core_interface.o \ +OBJS = mpas_ocn_forward_mode.o \ mpas_ocn_time_integration.o \ mpas_ocn_time_integration_rk4.o \ mpas_ocn_time_integration_split.o -all: core_ocean +all: forward_mode -core_ocean: $(OBJS) - -mpas_ocn_core_interface.o: mpas_ocn_core.o +forward_mode: $(OBJS) mpas_ocn_time_integration.o: mpas_ocn_time_integration_rk4.o mpas_ocn_time_integration_split.o @@ -18,12 +15,12 @@ mpas_ocn_time_integration_rk4.o: mpas_ocn_time_integration_split.o: -mpas_ocn_core.o: mpas_ocn_time_integration.o \ - mpas_ocn_time_integration_rk4.o \ - mpas_ocn_time_integration_split.o +mpas_ocn_forward_mode.o: mpas_ocn_time_integration.o \ + mpas_ocn_time_integration_rk4.o \ + mpas_ocn_time_integration_split.o clean: - $(RM) *.o *.mod *.f90 libdycore.a + $(RM) *.o *.mod *.f90 .F.o: $(RM) $@ $*.mod diff --git a/src/core_ocean/mode_forward/mpas_ocn_core.F b/src/core_ocean/mode_forward/mpas_ocn_core.F deleted file mode 100644 index ab195e7320..0000000000 --- a/src/core_ocean/mode_forward/mpas_ocn_core.F +++ /dev/null @@ -1,865 +0,0 @@ -! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) -! and the University Corporation for Atmospheric Research (UCAR). -! -! Unless noted otherwise source code is licensed under the BSD license. -! Additional copyright and license information can be found in the LICENSE file -! distributed with this code, or at http://mpas-dev.github.com/license.html -! -!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| -! -! ocn_core -! -!> \brief Main driver for MPAS ocean core -!> \author Doug Jacobsen, Mark Petersen, Todd Ringler -!> \date September 2011 -!> \details -!> This module contains initialization and timestep drivers for -!> the MPAS ocean core. -! -!----------------------------------------------------------------------- - -module ocn_core - - use mpas_framework - use mpas_timekeeping - use mpas_dmpar - use mpas_timer - use mpas_io_units - - use ocn_analysis_driver - use ocn_init - - use ocn_global_diagnostics - use ocn_time_integration - use ocn_tendency - use ocn_diagnostics - use ocn_test - - use ocn_thick_hadv - use ocn_thick_vadv - use ocn_thick_ale - use ocn_thick_surface_flux - - use ocn_vel_pressure_grad - use ocn_vel_vadv - use ocn_vel_hmix - use ocn_vel_forcing - use ocn_vel_coriolis - - use ocn_tracer_hmix - use ocn_tracer_surface_flux - use ocn_tracer_short_wave_absorption - use ocn_tracer_nonlocalflux - use ocn_gm - - use ocn_high_freq_thickness_hmix_del2 - - use ocn_equation_of_state - - use ocn_vmix - - use ocn_time_average - - use ocn_forcing - use ocn_sea_ice - - use ocn_constants - - type (MPAS_Clock_type), pointer :: clock - - character (len=*), parameter :: statsAlarmID = 'stats' - character (len=*), parameter :: coupleAlarmID = 'coupling' - - type (timer_node), pointer :: globalDiagTimer, timeIntTimer, testSuiteTimer - type (timer_node), pointer :: initDiagSolveTimer - - contains - -!*********************************************************************** -! -! routine ocn_core_init -! -!> \brief Initialize MPAS-Ocean core -!> \author Doug Jacobsen, Mark Petersen, Todd Ringler -!> \date September 2011 -!> \details -!> This routine calls all initializations required to begin a -!> simulation with MPAS-Ocean -! -!----------------------------------------------------------------------- - - function ocn_core_init(domain, startTimeStamp) result(err)!{{{ - - use mpas_derived_types - use mpas_pool_routines - use mpas_stream_manager - use ocn_tracer_advection - - implicit none - - type (domain_type), intent(inout) :: domain - character(len=*), intent(out) :: startTimeStamp - - real (kind=RKIND) :: dt - type (block_type), pointer :: block - type (dm_info) :: dminfo - - integer :: err, err_tmp - integer, pointer :: nVertLevels - real (kind=RKIND) :: maxDensity, maxDensity_global - real (kind=RKIND), dimension(:), pointer :: meshDensity - type (mpas_pool_type), pointer :: meshPool - type (mpas_pool_type), pointer :: diagnosticsPool - - character (len=StrKIND), pointer :: xtime - type (MPAS_Time_Type) :: startTime - type (MPAS_TimeInterval_type) :: timeStep - - logical, pointer :: config_do_restart, config_filter_btr_mode, config_conduct_tests - logical, pointer :: config_write_stats_on_startup - character (len=StrKIND), pointer :: config_vert_coord_movement, config_pressure_gradient_type - real (kind=RKIND), pointer :: config_maxMeshDensity - - err = 0 - - ! - ! Set "local" clock to point to the clock contained in the domain type - ! - clock => domain % clock - - ! - ! Set startTimeStamp based on the start time of the simulation clock - ! - startTime = mpas_get_clock_time(clock, MPAS_START_TIME, err_tmp) - call mpas_get_time(startTime, dateTimeString=startTimeStamp) - err = ior(err, err_tmp) - - ! Setup ocean config pool - call ocn_constants_init(domain % configs, domain % packages) - - dminfo = domain % dminfo - - call mpas_pool_get_config(domain % configs, 'config_do_restart', config_do_restart) - call mpas_pool_get_config(domain % configs, 'config_vert_coord_movement', config_vert_coord_movement) - call mpas_pool_get_config(domain % configs, 'config_pressure_gradient_type', config_pressure_gradient_type) - call mpas_pool_get_config(domain % configs, 'config_filter_btr_mode', config_filter_btr_mode) - call mpas_pool_get_config(domain % configs, 'config_maxMeshDensity', config_maxMeshDensity) - call mpas_pool_get_config(domain % configs, 'config_conduct_tests', config_conduct_tests) - call mpas_pool_get_config(domain % configs, 'config_write_stats_on_startup', config_write_stats_on_startup) - - ! - ! Read input data for model - ! - if ( config_do_restart ) then - call MPAS_stream_mgr_read(domain % streamManager, streamID='restart', ierr=err) - else - call MPAS_stream_mgr_read(domain % streamManager, streamID='input', ierr=err) - end if - call mpas_stream_mgr_reset_alarms(domain % streamManager, streamID='input', ierr=err) - call mpas_stream_mgr_reset_alarms(domain % streamManager, streamID='restart', ierr=err) - call mpas_stream_mgr_reset_alarms(domain % streamManager, direction=MPAS_STREAM_OUTPUT, ierr=err) - - ! Initialize submodules before initializing blocks. - call ocn_timestep_init(err) - - call ocn_thick_hadv_init(err_tmp) - err = ior(err, err_tmp) - call ocn_thick_vadv_init(err_tmp) - err = ior(err, err_tmp) - call ocn_thick_surface_flux_init(err_tmp) - err = ior(err, err_tmp) - call ocn_thick_ale_init(err_tmp) - err = ior(err,err_tmp) - - call ocn_vel_coriolis_init(err_tmp) - err = ior(err, err_tmp) - call ocn_vel_pressure_grad_init(err_tmp) - err = ior(err, err_tmp) - call ocn_vel_vadv_init(err_tmp) - err = ior(err, err_tmp) - call ocn_vel_hmix_init(err_tmp) - err = ior(err, err_tmp) - call ocn_vel_forcing_init(err_tmp) - err = ior(err, err_tmp) - - call ocn_tracer_hmix_init(err_tmp) - err = ior(err, err_tmp) - call ocn_tracer_surface_flux_init(err_tmp) - err = ior(err, err_tmp) - call ocn_tracer_advection_init(err_tmp) - err = ior(err,err_tmp) - call ocn_tracer_short_wave_absorption_init(err_tmp) - err = ior(err,err_tmp) - call ocn_gm_init(err_tmp) - err = ior(err,err_tmp) - call ocn_tracer_nonlocalflux_init(err_tmp) - err = ior(err,err_tmp) - - call ocn_vmix_init(domain, err_tmp) - err = ior(err, err_tmp) - - call ocn_equation_of_state_init(err_tmp) - err = ior(err, err_tmp) - - call ocn_tendency_init(err_tmp) - err = ior(err,err_tmp) - call ocn_diagnostics_init(err_tmp) - err = ior(err,err_tmp) - - call ocn_forcing_init(err_tmp) - err = ior(err,err_tmp) - - call ocn_high_freq_thickness_hmix_del2_init(err_tmp) - err = ior(err,err_tmp) - - call ocn_global_diagnostics_init(dminfo,err_tmp) - err = ior(err, err_tmp) - - call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nVertLevels', nVertLevels) - call ocn_sea_ice_init(nVertLevels, err_tmp) - err = ior(err, err_tmp) - - call ocn_analysis_init(domain, err_tmp) - err = ior(err, err_tmp) - - call mpas_timer_init(domain) - - if(err.eq.1) then - call mpas_dmpar_abort(dminfo) - endif - - call ocn_init_vert_coord(domain) - - call ocn_compute_max_level(domain) - - if (.not.config_do_restart) call ocn_init_split_timestep(domain) - - write (stdoutUnit,'(a,a)') ' Vertical coordinate movement is: ',trim(config_vert_coord_movement) - - if (config_vert_coord_movement.ne.'fixed'.and. & - config_vert_coord_movement.ne.'uniform_stretching'.and. & - config_vert_coord_movement.ne.'impermeable_interfaces'.and. & - config_vert_coord_movement.ne.'user_specified') then - write (stderrUnit,*) ' Incorrect choice of config_vert_coord_movement.' - call mpas_dmpar_abort(dminfo) - endif - - if(config_vert_coord_movement .ne. 'impermeable_interfaces' .and. config_pressure_gradient_type .eq. 'MontgomeryPotential') then - write (stderrUnit,*) ' Incorrect combination of config_vert_coord_movement and config_pressure_gradient_type' - call mpas_dmpar_abort(dminfo) - end if - - if (config_filter_btr_mode.and. & - config_vert_coord_movement.ne.'fixed')then - write (stderrUnit,*) 'filter_btr_mode has only been tested with'// & - ' config_vert_coord_movement=fixed.' - call mpas_dmpar_abort(dminfo) - endif - - ! find the maximum value of the meshDensity - if (config_maxMeshDensity < 0.0) then - maxDensity=-1 - block => domain % blocklist - do while (associated(block)) - call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) - call mpas_pool_get_array(meshPool, 'meshDensity', meshDensity) - maxDensity = max(maxDensity, maxval(meshDensity)) - block => block % next - end do - call mpas_dmpar_max_real(domain % dminfo, maxDensity, maxDensity_global) - config_maxMeshDensity = maxDensity_global - endif - - ! - ! Initialize core - ! - timeStep = mpas_get_clock_timestep(clock, ierr=err) - call mpas_get_timeInterval(timeStep, dt=dt) - - block => domain % blocklist - do while (associated(block)) - call mpas_init_block(block, dt, err) - if(err.eq.1) then - call mpas_dmpar_abort(dminfo) - endif - - call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) - call mpas_pool_get_array(diagnosticsPool, 'xtime', xtime) - xtime = startTimeStamp - block => block % next - end do - - if (config_conduct_tests) then - call mpas_timer_start("test suite", .false., testSuiteTimer) - call ocn_test_suite(domain,err) - call mpas_timer_stop("test suite", testSuiteTimer) - endif - - if (config_write_stats_on_startup) then - call mpas_timer_start("global diagnostics", .false., globalDiagTimer) - call ocn_compute_global_diagnostics(domain, 1 , 0, dt) - call mpas_timer_stop("global diagnostics", globalDiagTimer) - endif - - end function ocn_core_init!}}} - -!*********************************************************************** -! -! routine mpas_simulation_clock_init -! -!> \brief Initialize timer variables -!> \author Doug Jacobsen, Mark Petersen, Todd Ringler -!> \date September 2011 -!> \details -!> This routine initializes all timer variables -! -!----------------------------------------------------------------------- - - subroutine ocn_simulation_clock_init(core_clock, configs, ierr)!{{{ - - implicit none - - type (MPAS_Clock_type), intent(inout) :: core_clock - type (mpas_pool_type), intent(inout) :: configs - integer, intent(out) :: ierr - - type (MPAS_Time_Type) :: startTime, stopTime, alarmStartTime - type (MPAS_TimeInterval_type) :: runDuration, timeStep, alarmTimeStep - character(len=StrKIND) :: restartTimeStamp - character(len=StrKIND), pointer :: config_start_time, config_stop_time, config_run_duration - character(len=StrKIND), pointer :: config_stats_interval, config_dt, config_restart_timestamp_name - integer :: err_tmp - - - ierr = 0 - - call mpas_pool_get_config(configs, 'config_dt', config_dt) - call mpas_pool_get_config(configs, 'config_start_time', config_start_time) - call mpas_pool_get_config(configs, 'config_stop_time', config_stop_time) - call mpas_pool_get_config(configs, 'config_run_duration', config_run_duration) - call mpas_pool_get_config(configs, 'config_stats_interval', config_stats_interval) - call mpas_pool_get_config(configs, 'config_restart_timestamp_name', config_restart_timestamp_name) - - if ( trim(config_start_time) == "file" ) then - open(22,file=config_restart_timestamp_name,form='formatted',status='old') - read(22,*) restartTimeStamp - close(22) - call mpas_set_time(curr_time=startTime, dateTimeString=restartTimeStamp, ierr=ierr) - else - call mpas_set_time(curr_time=startTime, dateTimeString=config_start_time, ierr=err_tmp) - end if - - call mpas_set_timeInterval(timeStep, timeString=config_dt, ierr=err_tmp) - if (trim(config_run_duration) /= "none") then - call mpas_set_timeInterval(runDuration, timeString=config_run_duration, ierr=err_tmp) - call mpas_create_clock(core_clock, startTime=startTime, timeStep=timeStep, runDuration=runDuration, ierr=err_tmp) - - if (trim(config_stop_time) /= "none") then - call mpas_set_time(curr_time=stopTime, dateTimeString=config_stop_time, ierr=err_tmp) - if(startTime + runduration /= stopTime) then - write(stderrUnit,*) 'Warning: config_run_duration and config_stop_time are inconsitent: using config_run_duration.' - end if - end if - else if (trim(config_stop_time) /= "none") then - call mpas_set_time(curr_time=stopTime, dateTimeString=config_stop_time, ierr=err_tmp) - call mpas_create_clock(core_clock, startTime=startTime, timeStep=timeStep, stopTime=stopTime, ierr=err_tmp) - else - write(stderrUnit,*) 'Error: Neither config_run_duration nor config_stop_time were specified.' - ierr = 1 - end if - - if (trim(config_stats_interval) /= "none") then - call mpas_set_timeInterval(alarmTimeStep, timeString=config_stats_interval, ierr=err_tmp) - alarmStartTime = startTime + alarmTimeStep - call mpas_add_clock_alarm(core_clock, statsAlarmID, alarmStartTime, alarmTimeStep, ierr=err_tmp) - end if - - end subroutine ocn_simulation_clock_init!}}} - -!*********************************************************************** -! -! routine mpas_init_block -! -!> \brief Initialize blocks within MPAS-Ocean core -!> \author Doug Jacobsen, Mark Petersen, Todd Ringler -!> \date September 2011 -!> \details -!> This routine calls all block-level initializations required to begin a -!> simulation with MPAS-Ocean -! -!----------------------------------------------------------------------- - - subroutine mpas_init_block(block, dt, err)!{{{ - - use mpas_derived_types - use mpas_pool_routines - use mpas_rbf_interpolation - use mpas_vector_operations - use mpas_vector_reconstruction - use mpas_tracer_advection_helpers - - implicit none - - type (block_type), intent(inout) :: block - real (kind=RKIND), intent(in) :: dt - integer, intent(out) :: err - - type (mpas_pool_type), pointer :: meshPool, averagePool, statePool - type (mpas_pool_type), pointer :: forcingPool, diagnosticsPool, scratchPool - integer :: i, iEdge, iCell, k - integer :: err1 - - integer, dimension(:), pointer :: nAdvCellsForEdge, maxLevelCell - integer, dimension(:), pointer :: maxLevelEdgeBot, maxLevelEdgeTop - integer, dimension(:,:), pointer :: advCellsForEdge, highOrderAdvectionMask, boundaryCell - real (kind=RKIND), dimension(:), pointer :: areaCell, boundaryLayerDepth - real (kind=RKIND), dimension(:,:), pointer :: advCoefs, advCoefs3rd, normalTransportVelocity - real (kind=RKIND), dimension(:,:), pointer :: layerThickness - real (kind=RKIND), dimension(:,:), pointer :: normalVelocity, normalGMBolusVelocity, edgeTangentVectors - real (kind=RKIND), dimension(:,:), pointer :: velocityX, velocityY, velocityZ - real (kind=RKIND), dimension(:,:), pointer :: velocityZonal, velocityMeridional - real (kind=RKIND), dimension(:,:,:), pointer :: derivTwo - - real (kind=RKIND), dimension(:,:,:), pointer :: tracers - - integer, pointer :: nCells, nEdges, nVertices, nVertLevels - integer, pointer :: config_horiz_tracer_adv_order - logical, pointer :: config_hmix_scaleWithMesh, config_do_restart - logical, pointer :: config_use_standardGM - real (kind=RKIND), pointer :: config_maxMeshDensity - - call mpas_pool_get_dimension(block % dimensions, 'nCells', nCells) - call mpas_pool_get_dimension(block % dimensions, 'nEdges', nEdges) - call mpas_pool_get_dimension(block % dimensions, 'nVertices', nVertices) - call mpas_pool_get_dimension(block % dimensions, 'nVertLevels', nVertLevels) - - call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) - call mpas_pool_get_subpool(block % structs, 'state', statePool) - call mpas_pool_get_subpool(block % structs, 'forcing', forcingPool) - call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) - call mpas_pool_get_subpool(block % structs, 'scratch', scratchPool) - call mpas_pool_get_subpool(block % structs, 'average', averagePool) - - call mpas_pool_get_array(meshPool, 'derivTwo', derivTwo) - call mpas_pool_get_array(meshPool, 'advCoefs', advCoefs) - call mpas_pool_get_array(meshPool, 'advCoefs3rd', advCoefs3rd) - call mpas_pool_get_array(meshPool, 'nAdvCellsForEdge', nAdvCellsForEdge) - call mpas_pool_get_array(meshPool, 'advCellsForEdge', advCellsForEdge) - call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) - call mpas_pool_get_array(meshPool, 'highOrderAdvectionMask', highOrderAdvectionMask) - call mpas_pool_get_array(meshPool, 'boundaryCell', boundaryCell) - call mpas_pool_get_array(meshPool, 'edgeTangentVectors', edgeTangentVectors) - call mpas_pool_get_array(meshPool, 'areaCell', areaCell) - call mpas_pool_get_array(meshPool, 'boundaryCell', boundaryCell) - call mpas_pool_get_array(meshPool, 'maxLevelEdgeBot', maxLevelEdgeBot) - call mpas_pool_get_array(meshPool, 'maxLevelEdgeTop', maxLevelEdgeTop) - - call mpas_pool_get_array(diagnosticsPool, 'normalTransportVelocity', normalTransportVelocity) - call mpas_pool_get_array(diagnosticsPool, 'normalGMBolusVelocity', normalGMBolusVelocity) - call mpas_pool_get_array(diagnosticsPool, 'velocityX', velocityX) - call mpas_pool_get_array(diagnosticsPool, 'velocityY', velocityY) - call mpas_pool_get_array(diagnosticsPool, 'velocityZ', velocityZ) - call mpas_pool_get_array(diagnosticsPool, 'velocityZonal', velocityZonal) - call mpas_pool_get_array(diagnosticsPool, 'velocityMeridional', velocityMeridional) - call mpas_pool_get_array(diagnosticsPool, 'boundaryLayerDepth', boundaryLayerDepth) - - call mpas_pool_get_array(statePool, 'normalVelocity', normalVelocity, 1) - call mpas_pool_get_array(statePool, 'layerThickness', layerThickness, 1) - call mpas_pool_get_array(statePool, 'tracers', tracers, 1) - - call mpas_pool_get_config(block % configs, 'config_horiz_tracer_adv_order', config_horiz_tracer_adv_order) - call mpas_pool_get_config(block % configs, 'config_hmix_scaleWithMesh', config_hmix_scaleWithMesh) - call mpas_pool_get_config(block % configs, 'config_maxMeshDensity', config_maxMeshDensity) - call mpas_pool_get_config(block % configs, 'config_use_standardGM', config_use_standardGM) - call mpas_pool_get_config(block % configs, 'config_do_restart', config_do_restart) - - call ocn_setup_sign_and_index_fields(meshPool) - call mpas_initialize_deriv_two(meshPool, derivTwo, err) - call mpas_tracer_advection_coefficients(meshPool, & - config_horiz_tracer_adv_order, derivTwo, advCoefs, & - advCoefs3rd, nAdvCellsForEdge, advCellsForEdge, & - err1, maxLevelCell, highOrderAdvectionMask, & - boundaryCell) - err = ior(err, err1) - - call ocn_time_average_init(averagePool) - - if (.not. config_do_restart) then - do iCell=1,nCells - boundaryLayerDepth(iCell) = layerThickness(1, iCell) * 0.5 - end do - end if - - call mpas_timer_start("diagnostic solve", .false., initDiagSolveTimer) - call ocn_diagnostic_solve(dt, statePool, forcingPool, meshPool, diagnosticsPool, scratchPool) - call mpas_timer_stop("diagnostic solve", initDiagSolveTimer) - - ! initialize velocities and tracers on land to be zero. - areaCell(nCells+1) = -1.0e34 - - layerThickness(:, nCells+1) = 0.0 - - do iEdge=1, nEdges - normalVelocity(maxLevelEdgeTop(iEdge)+1:maxLevelEdgeBot(iEdge), iEdge) = 0.0 - - normalVelocity(maxLevelEdgeBot(iEdge)+1:nVertLevels,iEdge) = -1.0e34 - end do - - do iCell=1,nCells - tracers(:, maxLevelCell(iCell)+1:nVertLevels,iCell) = -1.0e34 - end do - - ! ------------------------------------------------------------------ - ! Accumulating various parametrizations of the transport velocity - ! ------------------------------------------------------------------ - normalTransportVelocity(:,:) = normalVelocity(:,:) - - ! Compute normalGMBolusVelocity, relativeSlope and RediDiffVertCoef if respective flags are turned on - if (config_use_standardGM) then - call ocn_gm_compute_Bolus_velocity(diagnosticsPool, meshPool, scratchPool) - end if - - if (config_use_standardGM) then - normalTransportVelocity(:,:) = normalTransportVelocity(:,:) + normalGMBolusVelocity(:,:) - end if - ! ------------------------------------------------------------------ - ! End: Accumulating various parametrizations of the transport velocity - ! ------------------------------------------------------------------ - - call ocn_compute_mesh_scaling(meshPool, config_hmix_scaleWithMesh, config_maxMeshDensity) - - call mpas_rbf_interp_initialize(meshPool) - call mpas_initialize_tangent_vectors(meshPool, edgeTangentVectors) - - call mpas_init_reconstruct(meshPool) - - call mpas_reconstruct(meshPool, normalVelocity, & - velocityX, & - velocityY, & - velocityZ, & - velocityZonal, & - velocityMeridional & - ) - - if (config_use_standardGM) then - call ocn_reconstruct_gm_vectors(diagnosticsPool, meshPool) - end if - - call mpas_pool_initialize_time_levels(statePool) - - end subroutine mpas_init_block!}}} - -!*********************************************************************** -! -! routine ocn_core_run -! -!> \brief Main driver for MPAS-Ocean time-stepping -!> \author Doug Jacobsen, Mark Petersen, Todd Ringler -!> \date September 2011 -!> \details -!> This routine includes the time-stepping loop, and calls timer -!> routines to write output and restart files. -! -!----------------------------------------------------------------------- - - function ocn_core_run(domain) result(err)!{{{ - - use mpas_kind_types - use mpas_derived_types - use mpas_pool_routines - use mpas_stream_manager - use mpas_timer - - implicit none - - type (domain_type), intent(inout) :: domain - - integer :: itimestep, err - real (kind=RKIND) :: dt - type (block_type), pointer :: block_ptr - - type (MPAS_Time_Type) :: currTime - character(len=StrKIND) :: timeStamp - integer :: ierr - - type (mpas_pool_type), pointer :: averagePool - type (mpas_pool_type), pointer :: meshPool - type (mpas_pool_type), pointer :: statePool - type (mpas_pool_type), pointer :: forcingPool - - type (MPAS_timeInterval_type) :: timeStep - character(len=StrKIND), pointer :: config_restart_timestamp_name - logical, pointer :: config_write_output_on_startup - - err = 0 - - call mpas_pool_get_config(domain % configs, 'config_write_output_on_startup', config_write_output_on_startup) - call mpas_pool_get_config(domain % configs, 'config_restart_timestamp_name', config_restart_timestamp_name) - - ! Eventually, dt should be domain specific - timeStep = mpas_get_clock_timestep(clock, ierr=ierr) - call mpas_get_timeInterval(timeStep, dt=dt) - - currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr) - call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr) - write(stderrUnit,*) 'Initial time ', trim(timeStamp) - - call ocn_analysis_compute_startup(domain, err) - - if (config_write_output_on_startup) then - call mpas_stream_mgr_write(domain % streamManager, 'output', forceWriteNow=.true., ierr=ierr) - endif - - block_ptr => domain % blocklist - do while(associated(block_ptr)) - call mpas_pool_get_subpool(block_ptr % structs, 'average', averagePool) - call ocn_time_average_init(averagePool) - block_ptr => block_ptr % next - end do - - ! During integration, time level 1 stores the model state at the beginning of the - ! time step, and time level 2 stores the state advanced dt in time by timestep(...) - itimestep = 0 - do while (.not. mpas_is_clock_stop_time(clock)) - call mpas_stream_mgr_read(domain % streamManager, ierr=ierr) - call mpas_stream_mgr_reset_alarms(domain % streamManager, direction=MPAS_STREAM_INPUT, ierr=ierr) - - itimestep = itimestep + 1 - call mpas_advance_clock(clock) - - currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr) - call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr) - write(stderrUnit,*) 'Doing timestep ', trim(timeStamp) - - block_ptr => domain % blocklist - do while(associated(block_ptr)) - call mpas_pool_get_subpool(block_ptr % structs, 'mesh', meshPool) - call mpas_pool_get_subpool(block_ptr % structs, 'state', statePool) - call mpas_pool_get_subpool(block_ptr % structs, 'forcing', forcingPool) - call ocn_forcing_build_arrays(meshPool, statePool, forcingPool, ierr, 1) - call ocn_forcing_build_transmission_array(meshPool, statePool, forcingpool, ierr, 1) - block_ptr => block_ptr % next - end do - - call mpas_timer_start("time integration", .false., timeIntTimer) - call mpas_timestep(domain, itimestep, dt, timeStamp) - call mpas_timer_stop("time integration", timeIntTimer) - - ! Move time level 2 fields back into time level 1 for next time step - block_ptr => domain % blocklist - do while(associated(block_ptr)) - call mpas_pool_get_subpool(block_ptr % structs, 'state', statePool) - call mpas_pool_shift_time_levels(statePool) - block_ptr => block_ptr % next - end do - - call ocn_analysis_compute_w_alarms(domain, err) - call ocn_analysis_write(domain % streamManager, err) - - call mpas_stream_mgr_write(domain % streamManager, streamID='output', ierr=ierr) - call mpas_stream_mgr_reset_alarms(domain % streamManager, streamID='output', ierr=ierr) - - if ( mpas_stream_mgr_ringing_alarms(domain % streamManager, streamID='restart', direction=MPAS_STREAM_OUTPUT, ierr=ierr) ) then - open(22, file=config_restart_timestamp_name, form='formatted', status='replace') - write(22, *) trim(timeStamp) - close(22) - end if - - call mpas_stream_mgr_write(domain % streamManager, streamID='restart', ierr=ierr) - call mpas_stream_mgr_reset_alarms(domain % streamManager, streamID='restart', ierr=ierr) - - call mpas_stream_mgr_write(domain % streamManager, ierr=ierr) - call mpas_stream_mgr_reset_alarms(domain % streamManager, direction=MPAS_STREAM_OUTPUT, ierr=ierr) - end do - - err = iErr - - end function ocn_core_run!}}} - - subroutine mpas_timestep(domain, itimestep, dt, timeStamp)!{{{ - - use mpas_kind_types - use mpas_derived_types - - implicit none - - type (domain_type), intent(inout) :: domain - integer, intent(in) :: itimestep - real (kind=RKIND), intent(in) :: dt - character(len=*), intent(in) :: timeStamp - - type (block_type), pointer :: block_ptr - integer :: ierr - - call ocn_timestep(domain, dt, timeStamp) - - !if (config_stats_interval > 0) then - ! if (mod(itimestep, config_stats_interval) == 0) then - ! call mpas_timer_start("global diagnostics", .false., globalDiagTimer) - ! call ocn_compute_global_diagnostics(domain, 2, itimestep, dt); - ! call mpas_timer_stop("global diagnostics", globalDiagTimer) - ! end if - !end if - - !TODO: replace the above code block with this if we desire to convert config_stats_interval to use alarms - if (mpas_is_alarm_ringing(clock, statsAlarmID, ierr=ierr)) then - call mpas_reset_clock_alarm(clock, statsAlarmID, ierr=ierr) - -! block_ptr => domain % blocklist -! if (associated(block_ptr % next)) then -! write(stderrUnit,*) 'Error: computeGlobalDiagnostics assumes ',& -! 'that there is only one block per processor.' -! end if - - call mpas_timer_start("global diagnostics") - call ocn_compute_global_diagnostics(domain, 2, itimestep, dt); - ! call ocn_compute_global_diagnostics(domain % dminfo, & - ! block_ptr % state % time_levs(2) % state, block_ptr % mesh, & - ! timeStamp, dt) - call mpas_timer_stop("global diagnostics") - end if - - end subroutine mpas_timestep!}}} - - subroutine ocn_init_split_timestep(domain)!{{{ - ! Initialize splitting variables - - use mpas_derived_types - use mpas_pool_routines - use mpas_configure - - implicit none - - type (domain_type), intent(inout) :: domain - - integer :: i, iCell, iEdge, iVertex, k - type (block_type), pointer :: block - - type (mpas_pool_type), pointer :: statePool, meshPool - - integer :: iTracer, cell, cell1, cell2 - integer, dimension(:), pointer :: maxLevelEdgeTop - integer, dimension(:,:), pointer :: cellsOnEdge - real (kind=RKIND) :: normalThicknessFluxSum, layerThicknessSum, layerThicknessEdge1 - real (kind=RKIND), dimension(:), pointer :: refBottomDepth, normalBarotropicVelocity - - real (kind=RKIND), dimension(:,:), pointer :: layerThickness - real (kind=RKIND), dimension(:,:), pointer :: normalBaroclinicVelocity, normalVelocity - integer, pointer :: nVertLevels, nCells, nEdges - character (len=StrKIND), pointer :: config_time_integrator - logical, pointer :: config_filter_btr_mode - - ! Initialize z-level mesh variables from h, read in from input file. - block => domain % blocklist - do while (associated(block)) - call mpas_pool_get_config(block % configs, 'config_time_integrator', config_time_integrator) - call mpas_pool_get_config(block % configs, 'config_filter_btr_mode', config_filter_btr_mode) - call mpas_pool_get_subpool(block % structs, 'state', statePool) - call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) - - call mpas_pool_get_dimension(block % dimensions, 'nVertLevels', nVertLevels) - call mpas_pool_get_dimension(block % dimensions, 'nCells', nCells) - call mpas_pool_get_dimension(block % dimensions, 'nEdges', nEdges) - - call mpas_pool_get_array(statePool, 'layerThickness', layerThickness, 1) - call mpas_pool_get_array(statePool, 'normalVelocity', normalVelocity, 1) - call mpas_pool_get_array(statePool, 'normalBarotropicVelocity', normalBarotropicVelocity, 1) - call mpas_pool_get_array(statePool, 'normalBaroclinicVelocity', normalBaroclinicVelocity, 1) - - call mpas_pool_get_array(meshPool, 'refBottomDepth', refBottomDepth) - call mpas_pool_get_array(meshPool, 'cellsOnEdge', cellsOnEdge) - call mpas_pool_get_array(meshPool, 'maxLevelEdgeTop', maxLevelEdgeTop) - - ! Compute barotropic velocity at first timestep - ! This is only done upon start-up. - if (trim(config_time_integrator) == 'unsplit_explicit') then - call mpas_pool_get_array(statePool, 'normalBarotropicVelocity', normalBarotropicVelocity) - normalBarotropicVelocity(:) = 0.0 - - normalBaroclinicVelocity(:,:) = normalVelocity(:,:) - - elseif (trim(config_time_integrator) == 'split_explicit') then - - if (config_filter_btr_mode) then - do iCell = 1, nCells - layerThickness(1,iCell) = refBottomDepth(1) - enddo - endif - - do iEdge = 1, nEdges - cell1 = cellsOnEdge(1,iEdge) - cell2 = cellsOnEdge(2,iEdge) - - ! normalBarotropicVelocity = sum(u)/sum(h) on each column - ! ocn_diagnostic_solve has not yet been called, so compute hEdge - ! just for this edge. - - ! thicknessSum is initialized outside the loop because on land boundaries - ! maxLevelEdgeTop=0, but I want to initialize thicknessSum with a - ! nonzero value to avoid a NaN. - layerThicknessEdge1 = 0.5*( layerThickness(1,cell1) + layerThickness(1,cell2) ) - normalThicknessFluxSum = layerThicknessEdge1 * normalVelocity(1,iEdge) - layerThicknessSum = layerThicknessEdge1 - - do k=2, maxLevelEdgeTop(iEdge) - ! ocn_diagnostic_solve has not yet been called, so compute hEdge - ! just for this edge. - layerThicknessEdge1 = 0.5*( layerThickness(k,cell1) + layerThickness(k,cell2) ) - - normalThicknessFluxSum = normalThicknessFluxSum & - + layerThicknessEdge1 * normalVelocity(k,iEdge) - layerThicknessSum = layerThicknessSum + layerThicknessEdge1 - - enddo - normalBarotropicVelocity(iEdge) = normalThicknessFluxSum / layerThicknessSum - - ! normalBaroclinicVelocity(k,iEdge) = normalVelocity(k,iEdge) - normalBarotropicVelocity(iEdge) - do k = 1, maxLevelEdgeTop(iEdge) - normalBaroclinicVelocity(k,iEdge) = normalVelocity(k,iEdge) - normalBarotropicVelocity(iEdge) - enddo - - ! normalBaroclinicVelocity=0, normalVelocity=0 on land cells - do k = maxLevelEdgeTop(iEdge)+1, nVertLevels - normalBaroclinicVelocity(k,iEdge) = 0.0 - normalVelocity(k,iEdge) = 0.0 - enddo - enddo - - if (config_filter_btr_mode) then - ! filter normalBarotropicVelocity out of initial condition - normalVelocity(:,:) = normalBaroclinicVelocity(:,:) - - normalBarotropicVelocity(:) = 0.0 - endif - - endif - - block => block % next - end do - - end subroutine ocn_init_split_timestep!}}} - - function ocn_core_finalize(domain) result(iErr)!{{{ - - use mpas_derived_types - use mpas_decomp - - implicit none - - type (domain_type), intent(inout) :: domain - integer :: ierr - - iErr = 0 - - call ocn_analysis_finalize(domain, ierr) - - call mpas_destroy_clock(clock, ierr) - - call mpas_decomp_destroy_decomp_list(domain % decompositions) - - end function ocn_core_finalize!}}} - - -end module ocn_core - -! vim: foldmethod=marker diff --git a/src/core_ocean/mode_forward/mpas_ocn_forward_mode.F b/src/core_ocean/mode_forward/mpas_ocn_forward_mode.F new file mode 100644 index 0000000000..15e1724b18 --- /dev/null +++ b/src/core_ocean/mode_forward/mpas_ocn_forward_mode.F @@ -0,0 +1,516 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| +! +! ocn_forward_mode +! +!> \brief Main driver for MPAS ocean core +!> \author Doug Jacobsen, Mark Petersen, Todd Ringler +!> \date September 2011 +!> \details +!> This module contains initialization and timestep drivers for +!> the MPAS ocean core. +! +!----------------------------------------------------------------------- + +module ocn_forward_mode + + use mpas_kind_types + use mpas_derived_types + use mpas_pool_routines + use mpas_stream_manager + use mpas_timekeeping + use mpas_dmpar + use mpas_timer + use mpas_io_units + use mpas_decomp + use mpas_abort, only : mpas_dmpar_global_abort + + use ocn_analysis_driver + use ocn_init_routines + + use ocn_time_integration + use ocn_time_integration_split + use ocn_tendency + use ocn_diagnostics + use ocn_test + + use ocn_thick_hadv + use ocn_thick_vadv + use ocn_thick_ale + use ocn_thick_surface_flux + + use ocn_vel_pressure_grad + use ocn_vel_vadv + use ocn_vel_hmix + use ocn_vel_forcing + use ocn_vel_coriolis + + use ocn_tracer_hmix + use ocn_tracer_surface_flux + use ocn_tracer_short_wave_absorption + use ocn_tracer_nonlocalflux + use ocn_tracer_advection + use ocn_gm + + use ocn_high_freq_thickness_hmix_del2 + + use ocn_equation_of_state + + use ocn_vmix + + use ocn_time_average + + use ocn_forcing + use ocn_sea_ice + + use ocn_constants + + implicit none + private + + public :: ocn_forward_mode_init, ocn_forward_mode_run, ocn_forward_mode_finalize + public :: ocn_forward_mode_setup_clock + + contains + +!*********************************************************************** +! +! function ocn_forward_mode_init +! +!> \brief Initialize MPAS-Ocean core +!> \author Doug Jacobsen, Mark Petersen, Todd Ringler +!> \date September 2011 +!> \details +!> This function calls all initializations required to begin a +!> simulation with MPAS-Ocean +! +!----------------------------------------------------------------------- + + function ocn_forward_mode_init(domain, startTimeStamp) result(ierr)!{{{ + + type (domain_type), intent(inout) :: domain + character(len=*), intent(out) :: startTimeStamp + integer :: ierr + + real (kind=RKIND) :: dt + type (block_type), pointer :: block + + integer :: err_tmp + integer, pointer :: nVertLevels + real (kind=RKIND) :: maxDensity, maxDensity_global + real (kind=RKIND), dimension(:), pointer :: meshDensity + type (mpas_pool_type), pointer :: meshPool + type (mpas_pool_type), pointer :: diagnosticsPool + + character (len=StrKIND), pointer :: xtime + type (MPAS_Time_Type) :: startTime + type (MPAS_TimeInterval_type) :: timeStep + + logical, pointer :: config_do_restart, config_filter_btr_mode, config_conduct_tests + character (len=StrKIND), pointer :: config_vert_coord_movement, config_pressure_gradient_type + real (kind=RKIND), pointer :: config_maxMeshDensity + + ierr = 0 + + ! + ! Set startTimeStamp based on the start time of the simulation clock + ! + startTime = mpas_get_clock_time(domain % clock, MPAS_START_TIME, err_tmp) + call mpas_get_time(startTime, dateTimeString=startTimeStamp) + ierr = ior(ierr, err_tmp) + + ! Setup ocean config pool + call ocn_constants_init(domain % configs, domain % packages) + + call mpas_pool_get_config(domain % configs, 'config_do_restart', config_do_restart) + call mpas_pool_get_config(domain % configs, 'config_vert_coord_movement', config_vert_coord_movement) + call mpas_pool_get_config(domain % configs, 'config_pressure_gradient_type', config_pressure_gradient_type) + call mpas_pool_get_config(domain % configs, 'config_filter_btr_mode', config_filter_btr_mode) + call mpas_pool_get_config(domain % configs, 'config_maxMeshDensity', config_maxMeshDensity) + call mpas_pool_get_config(domain % configs, 'config_conduct_tests', config_conduct_tests) + + ! + ! Read input data for model + ! + call mpas_timer_start('io_read') + call MPAS_stream_mgr_read(domain % streamManager, streamID='mesh', whence=MPAS_STREAM_NEAREST, ierr=err_tmp) + if ( config_do_restart ) then + call MPAS_stream_mgr_read(domain % streamManager, streamID='restart', ierr=err_tmp) + else + call MPAS_stream_mgr_read(domain % streamManager, streamID='input', ierr=err_tmp) + end if + call mpas_timer_stop('io_read') + call mpas_timer_start('reset_io_alarms') + call mpas_stream_mgr_reset_alarms(domain % streamManager, streamID='input', ierr=err_tmp) + call mpas_stream_mgr_reset_alarms(domain % streamManager, streamID='restart', ierr=err_tmp) + call mpas_stream_mgr_reset_alarms(domain % streamManager, direction=MPAS_STREAM_OUTPUT, ierr=err_tmp) + call mpas_timer_stop('reset_io_alarms') + + ! Initialize submodules before initializing blocks. + call ocn_timestep_init(ierr) + + call ocn_thick_hadv_init(err_tmp) + ierr = ior(ierr, err_tmp) + call ocn_thick_vadv_init(err_tmp) + ierr = ior(ierr, err_tmp) + call ocn_thick_surface_flux_init(err_tmp) + ierr = ior(ierr, err_tmp) + call ocn_thick_ale_init(err_tmp) + ierr = ior(ierr,err_tmp) + + call ocn_vel_coriolis_init(err_tmp) + ierr = ior(ierr, err_tmp) + call ocn_vel_pressure_grad_init(err_tmp) + ierr = ior(ierr, err_tmp) + call ocn_vel_vadv_init(err_tmp) + ierr = ior(ierr, err_tmp) + call ocn_vel_hmix_init(err_tmp) + ierr = ior(ierr, err_tmp) + call ocn_vel_forcing_init(err_tmp) + ierr = ior(ierr, err_tmp) + + call ocn_tracer_hmix_init(err_tmp) + ierr = ior(ierr, err_tmp) + call ocn_tracer_surface_flux_init(err_tmp) + ierr = ior(ierr, err_tmp) + call ocn_tracer_advection_init(err_tmp) + ierr = ior(ierr,err_tmp) + call ocn_tracer_short_wave_absorption_init(err_tmp) + ierr = ior(ierr,err_tmp) + call ocn_gm_init(err_tmp) + ierr = ior(ierr,err_tmp) + call ocn_tracer_nonlocalflux_init(err_tmp) + ierr = ior(ierr,err_tmp) + + call ocn_vmix_init(domain, err_tmp) + ierr = ior(ierr, err_tmp) + + call ocn_equation_of_state_init(err_tmp) + ierr = ior(ierr, err_tmp) + + call ocn_tendency_init(err_tmp) + ierr = ior(ierr,err_tmp) + call ocn_diagnostics_init(err_tmp) + ierr = ior(ierr,err_tmp) + + call ocn_forcing_init(err_tmp) + ierr = ior(ierr,err_tmp) + + call ocn_high_freq_thickness_hmix_del2_init(err_tmp) + ierr = ior(ierr,err_tmp) + + call mpas_pool_get_dimension(domain % blocklist % dimensions, 'nVertLevels', nVertLevels) + call ocn_sea_ice_init(nVertLevels, err_tmp) + ierr = ior(ierr, err_tmp) + + call ocn_analysis_init(domain, err_tmp) + ierr = ior(ierr, err_tmp) + + if(ierr.eq.1) then + call mpas_dmpar_global_abort('ERROR: An error was encountered while initializing the MPAS-Ocean forward mode') + endif + + call ocn_init_routines_vert_coord(domain) + + call ocn_init_routines_compute_max_level(domain) + + if (.not.config_do_restart) call ocn_time_integration_split_init(domain) + + write (stdoutUnit,'(a,a)') ' Vertical coordinate movement is: ',trim(config_vert_coord_movement) + + if (config_vert_coord_movement.ne.'fixed'.and. & + config_vert_coord_movement.ne.'uniform_stretching'.and. & + config_vert_coord_movement.ne.'impermeable_interfaces'.and. & + config_vert_coord_movement.ne.'user_specified') then + write (stderrUnit,*) ' Incorrect choice of config_vert_coord_movement.' + call mpas_dmpar_global_abort('ERROR: Incorrect choice of config_vert_coord_movement.') + endif + + if(config_vert_coord_movement .ne. 'impermeable_interfaces' .and. config_pressure_gradient_type .eq. 'MontgomeryPotential') then + write (stderrUnit,*) ' Incorrect combination of config_vert_coord_movement and config_pressure_gradient_type' + call mpas_dmpar_global_abort('ERROR: Incorrect combination of config_vert_coord_movement and config_pressure_gradient_type') + end if + + if (config_filter_btr_mode.and. & + config_vert_coord_movement.ne.'fixed')then + write (stderrUnit,*) 'filter_btr_mode has only been tested with'// & + ' config_vert_coord_movement=fixed.' + call mpas_dmpar_global_abort('ERROR: filter_btr_mode has only been tested with config_vert_coord_movement=fixed.') + endif + + ! find the maximum value of the meshDensity + if (config_maxMeshDensity < 0.0) then + maxDensity=-1 + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + call mpas_pool_get_array(meshPool, 'meshDensity', meshDensity) + maxDensity = max(maxDensity, maxval(meshDensity)) + block => block % next + end do + call mpas_dmpar_max_real(domain % dminfo, maxDensity, maxDensity_global) + config_maxMeshDensity = maxDensity_global + endif + + ! + ! Initialize core + ! + timeStep = mpas_get_clock_timestep(domain % clock, ierr=err_tmp) + call mpas_get_timeInterval(timeStep, dt=dt) + + block => domain % blocklist + do while (associated(block)) + call ocn_init_routines_block(block, dt, ierr) + if(ierr.eq.1) then + call mpas_dmpar_global_abort('ERROR: An error was encountered in ocn_init_routines_block') + endif + + call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) + call mpas_pool_get_array(diagnosticsPool, 'xtime', xtime) + xtime = startTimeStamp + block => block % next + end do + + if (config_conduct_tests) then + call mpas_timer_start("test suite") + call ocn_test_suite(domain,ierr) + call mpas_timer_stop("test suite") + endif + + end function ocn_forward_mode_init!}}} + +!*********************************************************************** +! +! function ocn_forward_mode_setup_clock +! +!> \brief Initialize timer variables +!> \author Doug Jacobsen, Mark Petersen, Todd Ringler +!> \date September 2011 +!> \details +!> This routine initializes all timer variables +! +!----------------------------------------------------------------------- + + function ocn_forward_mode_setup_clock(core_clock, configs) result(ierr)!{{{ + + implicit none + + type (MPAS_Clock_type), intent(inout) :: core_clock + type (mpas_pool_type), intent(inout) :: configs + integer :: ierr + + type (MPAS_Time_Type) :: startTime, stopTime, alarmStartTime + type (MPAS_TimeInterval_type) :: runDuration, timeStep, alarmTimeStep + character(len=StrKIND) :: restartTimeStamp + character(len=StrKIND), pointer :: config_start_time, config_stop_time, config_run_duration + character(len=StrKIND), pointer :: config_dt, config_restart_timestamp_name + integer :: err_tmp + + ierr = 0 + + call mpas_pool_get_config(configs, 'config_dt', config_dt) + call mpas_pool_get_config(configs, 'config_start_time', config_start_time) + call mpas_pool_get_config(configs, 'config_stop_time', config_stop_time) + call mpas_pool_get_config(configs, 'config_run_duration', config_run_duration) + call mpas_pool_get_config(configs, 'config_restart_timestamp_name', config_restart_timestamp_name) + + if ( trim(config_start_time) == "file" ) then + open(22,file=config_restart_timestamp_name,form='formatted',status='old') + read(22,*) restartTimeStamp + close(22) + call mpas_set_time(curr_time=startTime, dateTimeString=restartTimeStamp, ierr=ierr) + else + call mpas_set_time(curr_time=startTime, dateTimeString=config_start_time, ierr=err_tmp) + end if + + call mpas_set_timeInterval(timeStep, timeString=config_dt, ierr=err_tmp) + if (trim(config_run_duration) /= "none") then + call mpas_set_timeInterval(runDuration, timeString=config_run_duration, ierr=err_tmp) + call mpas_create_clock(core_clock, startTime=startTime, timeStep=timeStep, runDuration=runDuration, ierr=err_tmp) + + if (trim(config_stop_time) /= "none") then + call mpas_set_time(curr_time=stopTime, dateTimeString=config_stop_time, ierr=err_tmp) + if(startTime + runduration /= stopTime) then + write(stderrUnit,*) 'Warning: config_run_duration and config_stop_time are inconsitent: using config_run_duration.' + end if + end if + else if (trim(config_stop_time) /= "none") then + call mpas_set_time(curr_time=stopTime, dateTimeString=config_stop_time, ierr=err_tmp) + call mpas_create_clock(core_clock, startTime=startTime, timeStep=timeStep, stopTime=stopTime, ierr=err_tmp) + else + write(stderrUnit,*) 'Error: Neither config_run_duration nor config_stop_time were specified.' + ierr = 1 + end if + + end function ocn_forward_mode_setup_clock!}}} + +!*********************************************************************** +! +! function ocn_forward_mode_run +! +!> \brief Main driver for MPAS-Ocean time-stepping +!> \author Doug Jacobsen, Mark Petersen, Todd Ringler +!> \date September 2011 +!> \details +!> This function includes the time-stepping loop, and calls +!> routines to write output and restart files. +! +!----------------------------------------------------------------------- + + function ocn_forward_mode_run(domain) result(ierr)!{{{ + + type (domain_type), intent(inout) :: domain + + integer :: itimestep, err + real (kind=RKIND) :: dt + type (block_type), pointer :: block_ptr + + type (MPAS_Time_Type) :: currTime + character(len=StrKIND) :: timeStamp + integer :: ierr + + type (mpas_pool_type), pointer :: averagePool + type (mpas_pool_type), pointer :: meshPool + type (mpas_pool_type), pointer :: statePool + type (mpas_pool_type), pointer :: forcingPool + + type (MPAS_timeInterval_type) :: timeStep + character(len=StrKIND), pointer :: config_restart_timestamp_name + logical, pointer :: config_write_output_on_startup + + ierr = 0 + + call mpas_pool_get_config(domain % configs, 'config_write_output_on_startup', config_write_output_on_startup) + call mpas_pool_get_config(domain % configs, 'config_restart_timestamp_name', config_restart_timestamp_name) + + ! Eventually, dt should be domain specific + timeStep = mpas_get_clock_timestep(domain % clock, ierr=ierr) + call mpas_get_timeInterval(timeStep, dt=dt) + + currTime = mpas_get_clock_time(domain % clock, MPAS_NOW, ierr) + call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr) + write(stderrUnit,*) 'Initial time ', trim(timeStamp) + + call ocn_analysis_compute_startup(domain, err) + + if (config_write_output_on_startup) then + call mpas_timer_start('io_write') + call mpas_stream_mgr_write(domain % streamManager, 'output', forceWriteNow=.true., ierr=ierr) + call mpas_timer_stop('io_write') + endif + + block_ptr => domain % blocklist + do while(associated(block_ptr)) + call mpas_pool_get_subpool(block_ptr % structs, 'average', averagePool) + call ocn_time_average_init(averagePool) + block_ptr => block_ptr % next + end do + + ! During integration, time level 1 stores the model state at the beginning of the + ! time step, and time level 2 stores the state advanced dt in time by timestep(...) + itimestep = 0 + do while (.not. mpas_is_clock_stop_time(domain % clock)) + call mpas_timer_start('io_read') + call mpas_stream_mgr_read(domain % streamManager, ierr=ierr) + call mpas_timer_stop('io_read') + call mpas_timer_start('reset_io_alarms') + call mpas_stream_mgr_reset_alarms(domain % streamManager, direction=MPAS_STREAM_INPUT, ierr=ierr) + call mpas_timer_stop('reset_io_alarms') + + itimestep = itimestep + 1 + call mpas_advance_clock(domain % clock) + + currTime = mpas_get_clock_time(domain % clock, MPAS_NOW, ierr) + call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr) + write(stderrUnit,*) 'Doing timestep ', trim(timeStamp) + + block_ptr => domain % blocklist + do while(associated(block_ptr)) + call mpas_pool_get_subpool(block_ptr % structs, 'mesh', meshPool) + call mpas_pool_get_subpool(block_ptr % structs, 'state', statePool) + call mpas_pool_get_subpool(block_ptr % structs, 'forcing', forcingPool) + call ocn_forcing_build_arrays(meshPool, statePool, forcingPool, ierr, 1) + call ocn_forcing_build_fraction_absorbed_array(meshPool, statePool, forcingpool, ierr, 1) + block_ptr => block_ptr % next + end do + + call mpas_timer_start("time integration") + call ocn_timestep(domain, dt, timeStamp) + call mpas_timer_stop("time integration") + + ! Move time level 2 fields back into time level 1 for next time step + block_ptr => domain % blocklist + do while(associated(block_ptr)) + call mpas_pool_get_subpool(block_ptr % structs, 'state', statePool) + call mpas_pool_shift_time_levels(statePool) + block_ptr => block_ptr % next + end do + + call ocn_analysis_compute(domain, err) + call ocn_analysis_write(domain, err) + + call mpas_timer_start('io_write') + call mpas_stream_mgr_write(domain % streamManager, streamID='output', ierr=ierr) + call mpas_timer_stop('io_write') + call mpas_timer_start('reset_io_alarms') + call mpas_stream_mgr_reset_alarms(domain % streamManager, streamID='output', ierr=ierr) + call mpas_timer_stop('reset_io_alarms') + + call mpas_timer_start('io_write') + call mpas_stream_mgr_write(domain % streamManager, streamID='restart', ierr=ierr) + call mpas_timer_stop('io_write') + + if ( mpas_stream_mgr_ringing_alarms(domain % streamManager, streamID='restart', direction=MPAS_STREAM_OUTPUT, ierr=ierr) ) then + open(22, file=config_restart_timestamp_name, form='formatted', status='replace') + write(22, *) trim(timeStamp) + close(22) + end if + + call mpas_timer_start('reset_io_alarms') + call mpas_stream_mgr_reset_alarms(domain % streamManager, streamID='restart', ierr=ierr) + call mpas_timer_stop('reset_io_alarms') + + call mpas_timer_start('io_write') + call mpas_stream_mgr_write(domain % streamManager, ierr=ierr) + call mpas_timer_stop('io_write') + call mpas_timer_start('reset_io_alarms') + call mpas_stream_mgr_reset_alarms(domain % streamManager, direction=MPAS_STREAM_OUTPUT, ierr=ierr) + call mpas_timer_stop('reset_io_alarms') + end do + end function ocn_forward_mode_run!}}} + +!*********************************************************************** +! +! function ocn_forward_mode_finalize +! +!> \brief Finalize MPAS-Ocean Forward Mode +!> \author Doug Jacobsen, Mark Petersen, Todd Ringler +!> \date September 2011 +!> \details +!> This function finalizes the MPAS-Ocean core in forward mode. +! +!----------------------------------------------------------------------- + + function ocn_forward_mode_finalize(domain) result(iErr)!{{{ + + type (domain_type), intent(inout) :: domain + + integer :: ierr + + call ocn_analysis_finalize(domain, ierr) + + call mpas_destroy_clock(domain % clock, ierr) + + call mpas_decomp_destroy_decomp_list(domain % decompositions) + + end function ocn_forward_mode_finalize!}}} + +end module ocn_forward_mode + +! vim: foldmethod=marker diff --git a/src/core_ocean/mode_forward/mpas_ocn_time_integration_rk4.F b/src/core_ocean/mode_forward/mpas_ocn_time_integration_rk4.F index d0148a0cbe..100d323b0a 100644 --- a/src/core_ocean/mode_forward/mpas_ocn_time_integration_rk4.F +++ b/src/core_ocean/mode_forward/mpas_ocn_time_integration_rk4.F @@ -165,12 +165,7 @@ subroutine ocn_time_integrator_rk4(domain, dt)!{{{ ! Forcing Array pointers real (kind=RKIND), dimension(:), pointer :: seaIceEnergy - ! Diagnostics Field Pointers - type (field1DReal), pointer :: boundaryLayerDepthField - type (field2DReal), pointer :: normalizedRelativeVorticityEdgeField, divergenceField, relativeVorticityField - ! State/Tend Field Pointers - type (field2DReal), pointer :: highFreqThicknessField, lowFreqDivergenceField type (field2DReal), pointer :: normalVelocityField, layerThicknessField type (field3DReal), pointer :: tracersField @@ -309,21 +304,16 @@ subroutine ocn_time_integrator_rk4(domain, dt)!{{{ ! Update halos for diagnostic variables. call mpas_timer_start("RK4-boundary layer depth halo update") if (config_use_cvmix_kpp) then - call mpas_pool_get_field(diagnosticsPool, 'boundaryLayerDepth', boundaryLayerDepthField) - call mpas_dmpar_exch_halo_field(boundaryLayerDepthField) + call mpas_dmpar_field_halo_exch(domain, 'boundaryLayerDepth') end if call mpas_timer_stop("RK4-boundary layer depth halo update") call mpas_timer_start("RK4-diagnostic halo update") - call mpas_pool_get_field(diagnosticsPool, 'normalizedRelativeVorticityEdge', normalizedRelativeVorticityEdgeField) - call mpas_pool_get_field(diagnosticsPool, 'divergence', divergenceField) - call mpas_pool_get_field(diagnosticsPool, 'relativeVorticity', relativeVorticityField) - - call mpas_dmpar_exch_halo_field(normalizedRelativeVorticityEdgeField) + call mpas_dmpar_field_halo_exch(domain, 'normalizedRelativeVorticityEdge') if (config_mom_del4 > 0.0) then - call mpas_dmpar_exch_halo_field(divergenceField) - call mpas_dmpar_exch_halo_field(relativeVorticityField) + call mpas_dmpar_field_halo_exch(domain, 'divergence') + call mpas_dmpar_field_halo_exch(domain, 'relativeVorticity') end if call mpas_timer_stop("RK4-diagnostic halo update") @@ -347,13 +337,8 @@ subroutine ocn_time_integrator_rk4(domain, dt)!{{{ call mpas_timer_stop("RK4-tendency computations") call mpas_timer_start("RK4-prognostic halo update") - call mpas_pool_get_subpool(domain % blocklist % structs, 'tend', tendPool) - - call mpas_pool_get_field(tendPool, 'highFreqThickness', highFreqThicknessField) - call mpas_pool_get_field(tendPool, 'lowFreqDivergence', lowFreqDivergenceField) - - call mpas_dmpar_exch_halo_field(highFreqThicknessField) - call mpas_dmpar_exch_halo_field(lowFreqDivergenceField) + call mpas_dmpar_field_halo_exch(domain, 'tendHighFreqThickness') + call mpas_dmpar_field_halo_exch(domain, 'tendLowFreqDivergence') call mpas_timer_stop("RK4-prognostic halo update") ! Compute next substep state for high frequency thickness. @@ -442,15 +427,9 @@ subroutine ocn_time_integrator_rk4(domain, dt)!{{{ ! Update halos for prognostic variables. call mpas_timer_start("RK4-prognostic halo update") - call mpas_pool_get_subpool(domain % blocklist % structs, 'tend', tendPool) - - call mpas_pool_get_field(tendPool, 'normalVelocity', normalVelocityField) - call mpas_pool_get_field(tendPool, 'layerThickness', layerThicknessField) - call mpas_pool_get_field(tendPool, 'tracers', tracersField) - - call mpas_dmpar_exch_halo_field(normalVelocityField) - call mpas_dmpar_exch_halo_field(layerThicknessField) - call mpas_dmpar_exch_halo_field(tracersField) + call mpas_dmpar_field_halo_exch(domain, 'tendNormalVelocity') + call mpas_dmpar_field_halo_exch(domain, 'tendLayerThickness') + call mpas_dmpar_field_halo_exch(domain, 'tendTracers') call mpas_timer_stop("RK4-prognostic halo update") ! Compute next substep state for velocity, thickness, and tracers. @@ -685,13 +664,8 @@ subroutine ocn_time_integrator_rk4(domain, dt)!{{{ ! conducted on tendencies, not on the velocity and tracer fields. So this update is required to ! communicate the change due to implicit vertical mixing across the boundary. call mpas_timer_start("RK4-implicit vert mix halos") - call mpas_pool_get_subpool(domain % blocklist % structs, 'state', statePool) - - call mpas_pool_get_field(statePool, 'normalVelocity', normalVelocityField, 2) - call mpas_pool_get_field(statePool, 'tracers', tracersField, 2) - - call mpas_dmpar_exch_halo_field(normalVelocityField) - call mpas_dmpar_exch_halo_field(tracersField) + call mpas_dmpar_field_halo_exch(domain, 'normalVelocity', timeLevel=2) + call mpas_dmpar_field_halo_exch(domain, 'tracers', timeLevel=2) call mpas_timer_stop("RK4-implicit vert mix halos") call mpas_timer_stop("RK4-implicit vert mix") diff --git a/src/core_ocean/mode_forward/mpas_ocn_time_integration_split.F b/src/core_ocean/mode_forward/mpas_ocn_time_integration_split.F index ed7121c7ed..f52a987b96 100644 --- a/src/core_ocean/mode_forward/mpas_ocn_time_integration_split.F +++ b/src/core_ocean/mode_forward/mpas_ocn_time_integration_split.F @@ -56,11 +56,7 @@ module ocn_time_integration_split ! !-------------------------------------------------------------------- - public :: ocn_time_integrator_split - - type (timer_node), pointer :: timer_main, timer_prep, timer_bcl_vel, timer_btr_vel, timer_diagnostic_update, timer_implicit_vmix, & - timer_halo_diagnostic, timer_halo_normalBarotropicVelocity, timer_halo_ssh, timer_halo_f, timer_halo_thickness, & - timer_halo_tracers, timer_halo_normalBaroclinicVelocity + public :: ocn_time_integrator_split, ocn_time_integration_split_init contains @@ -181,18 +177,7 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ ! Forcing Array Pointer real (kind=RKIND), dimension(:), pointer :: seaIceEnergy - ! Diagnostics Field Pointers - type (field2DReal), pointer :: normalizedRelativeVorticityEdgeField, divergenceField, relativeVorticityField - type (field1DReal), pointer :: barotropicThicknessFluxField, boundaryLayerDepthField - - ! State/Tend Field Pointers - type (field1DReal), pointer :: normalBarotropicVelocitySubcycleField, sshSubcycleField - type (field2DReal), pointer :: highFreqThicknessField, lowFreqDivergenceField - type (field2DReal), pointer :: normalBaroclinicVelocityField, layerThicknessField - type (field2DReal), pointer :: normalVelocityField - type (field3DReal), pointer :: tracersField - - call mpas_timer_start("se timestep", .false., timer_main) + call mpas_timer_start("se timestep") call mpas_pool_get_config(domain % configs, 'config_n_bcl_iter_beg', config_n_bcl_iter_beg) call mpas_pool_get_config(domain % configs, 'config_n_bcl_iter_mid', config_n_bcl_iter_mid) @@ -228,7 +213,7 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ ! Prep variables before first iteration ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - call mpas_timer_start("se prep", .false., timer_prep) + call mpas_timer_start("se prep") block => domain % blocklist do while (associated(block)) call mpas_pool_get_dimension(block % dimensions, 'nCells', nCells) @@ -306,7 +291,7 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ block => block % next end do - call mpas_timer_stop("se prep", timer_prep) + call mpas_timer_stop("se prep") !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! BEGIN large iteration loop !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -320,25 +305,21 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ call mpas_pool_get_subpool(domain % blocklist % structs, 'diagnostics', diagnosticsPool) ! --- update halos for diagnostic ocean boundayr layer depth - call mpas_timer_start("se halo diag obd", .false., timer_halo_diagnostic) + call mpas_timer_start("se halo diag obd") if (config_use_cvmix_kpp) then - call mpas_pool_get_field(diagnosticsPool, 'boundaryLayerDepth', boundaryLayerDepthField) - call mpas_dmpar_exch_halo_field(boundaryLayerDepthField) + call mpas_dmpar_field_halo_exch(domain, 'boundaryLayerDepth') end if call mpas_timer_stop("se halo diag obd") ! --- update halos for diagnostic variables - call mpas_timer_start("se halo diag", .false., timer_halo_diagnostic) + call mpas_timer_start("se halo diag") - call mpas_pool_get_field(diagnosticsPool, 'normalizedRelativeVorticityEdge', normalizedRelativeVorticityEdgeField) - call mpas_pool_get_field(diagnosticsPool, 'divergence', divergenceField) - call mpas_pool_get_field(diagnosticsPool, 'relativeVorticity', relativeVorticityField) - call mpas_dmpar_exch_halo_field(normalizedRelativeVorticityEdgeField) + call mpas_dmpar_field_halo_exch(domain, 'normalizedRelativeVorticityEdge') if (config_mom_del4 > 0.0) then - call mpas_dmpar_exch_halo_field(divergenceField) - call mpas_dmpar_exch_halo_field(relativeVorticityField) + call mpas_dmpar_field_halo_exch(domain, 'divergence') + call mpas_dmpar_field_halo_exch(domain, 'relativeVorticity') end if - call mpas_timer_stop("se halo diag", timer_halo_diagnostic) + call mpas_timer_stop("se halo diag") !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! @@ -361,13 +342,8 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ call mpas_timer_stop("se freq-filtered-thick computations") call mpas_timer_start("se freq-filtered-thick halo update") - call mpas_pool_get_subpool(domain % blocklist % structs, 'tend', tendPool) - - call mpas_pool_get_field(tendPool, 'highFreqThickness', highFreqThicknessField) - call mpas_pool_get_field(tendPool, 'lowFreqDivergence', lowFreqDivergenceField) - - call mpas_dmpar_exch_halo_field(highFreqThicknessField) - call mpas_dmpar_exch_halo_field(lowFreqDivergenceField) + call mpas_dmpar_field_halo_exch(domain, 'tendHighFreqThickness') + call mpas_dmpar_field_halo_exch(domain, 'tendLowFreqDivergence') call mpas_timer_stop("se freq-filtered-thick halo update") block => domain % blocklist @@ -398,7 +374,7 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ ! compute velocity tendencies, T(u*,w*,p*) - call mpas_timer_start("se bcl vel", .false., timer_bcl_vel) + call mpas_timer_start("se bcl vel") block => domain % blocklist do while (associated(block)) @@ -523,16 +499,13 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ block => block % next end do - call mpas_timer_start("se halo normalBaroclinicVelocity", .false., timer_halo_normalBaroclinicVelocity) - call mpas_pool_get_subpool(domain % blocklist % structs, 'state', statePool) - call mpas_pool_get_field(statePool, 'normalBaroclinicVelocity', normalBaroclinicVelocityField, 2) - - call mpas_dmpar_exch_halo_field(normalBaroclinicVelocityField) - call mpas_timer_stop("se halo normalBaroclinicVelocity", timer_halo_normalBaroclinicVelocity) + call mpas_timer_start("se halo normalBaroclinicVelocity") + call mpas_dmpar_field_halo_exch(domain, 'normalBaroclinicVelocity', timeLevel=2) + call mpas_timer_stop("se halo normalBaroclinicVelocity") end do ! do j=1,config_n_bcl_iter - call mpas_timer_stop("se bcl vel", timer_bcl_vel) + call mpas_timer_stop("se bcl vel") !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! END baroclinic iterations on linear Coriolis term !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -544,7 +517,7 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - call mpas_timer_start("se btr vel", .false., timer_btr_vel) + call mpas_timer_start("se btr vel") oldBtrSubcycleTime = 1 newBtrSubcycleTime = 2 @@ -693,12 +666,9 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ end do ! block ! boundary update on normalBarotropicVelocityNew - call mpas_timer_start("se halo normalBarotropicVelocity", .false., timer_halo_normalBarotropicVelocity) - call mpas_pool_get_subpool(domain % blocklist % structs, 'state', statePool) - - call mpas_pool_get_field(statePool, 'normalBarotropicVelocitySubcycle', normalBarotropicVelocitySubcycleField, newBtrSubcycleTime) - call mpas_dmpar_exch_halo_field(normalBarotropicVelocitySubcycleField) - call mpas_timer_stop("se halo normalBarotropicVelocity", timer_halo_normalBarotropicVelocity) + call mpas_timer_start("se halo normalBarotropicVelocity") + call mpas_dmpar_field_halo_exch(domain, 'normalBarotropicVelocitySubcycle', timeLevel=newBtrSubcycleTime) + call mpas_timer_stop("se halo normalBarotropicVelocity") endif ! config_btr_gam1_velWt1>1.0e-12 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -813,12 +783,9 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ end do ! block ! boundary update on SSHnew - call mpas_timer_start("se halo ssh", .false., timer_halo_ssh) - call mpas_pool_get_subpool(domain % blocklist % structs, 'state', statePool) - - call mpas_pool_get_field(statePool, 'sshSubcycle', sshSubcycleField, newBtrSubcycleTime) - call mpas_dmpar_exch_halo_field(sshSubcycleField) - call mpas_timer_stop("se halo ssh", timer_halo_ssh) + call mpas_timer_start("se halo ssh") + call mpas_dmpar_field_halo_exch(domain, 'sshSubcycle', timeLevel=newBtrSubcycleTime) + call mpas_timer_stop("se halo ssh") !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! Barotropic subcycle: VELOCITY CORRECTOR STEP @@ -881,13 +848,9 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ end do ! block ! boundary update on normalBarotropicVelocityNew - call mpas_timer_start("se halo normalBarotropicVelocity", .false., timer_halo_normalBarotropicVelocity) - call mpas_pool_get_subpool(domain % blocklist % structs, 'state', statePool) - - call mpas_pool_get_field(statePool, 'normalBarotropicVelocitySubcycle', normalBarotropicVelocitySubcycleField, newBtrSubcycleTime) - - call mpas_dmpar_exch_halo_field(normalBarotropicVelocitySubcycleField) - call mpas_timer_stop("se halo normalBarotropicVelocity", timer_halo_normalBarotropicVelocity) + call mpas_timer_start("se halo normalBarotropicVelocity") + call mpas_dmpar_field_halo_exch(domain, 'normalBarotropicVelocitySubcycle', timeLevel=newBtrSubcycleTime) + call mpas_timer_stop("se halo normalBarotropicVelocity") end do !do BtrCorIter=1,config_n_btr_cor_iter !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -1001,13 +964,9 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ end do ! block ! boundary update on SSHnew - call mpas_timer_start("se halo ssh", .false., timer_halo_ssh) - call mpas_pool_get_subpool(domain % blocklist % structs, 'state', statePool) - - call mpas_pool_get_field(statePool, 'sshSubcycle', sshSubcycleField) - - call mpas_dmpar_exch_halo_field(sshSubcycleField) - call mpas_timer_stop("se halo ssh", timer_halo_ssh) + call mpas_timer_start("se halo ssh") + call mpas_dmpar_field_halo_exch(domain, 'sshSubcycle') + call mpas_timer_stop("se halo ssh") endif ! config_btr_solve_SSH2 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -1067,13 +1026,9 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ ! boundary update on F - call mpas_timer_start("se halo F", .false., timer_halo_f) - call mpas_pool_get_subpool(domain % blocklist % structs, 'diagnostics', diagnosticsPool) - - call mpas_pool_get_field(diagnosticsPool, 'barotropicThicknessFlux', barotropicThicknessFluxField) - - call mpas_dmpar_exch_halo_field(barotropicThicknessFluxField) - call mpas_timer_stop("se halo F", timer_halo_f) + call mpas_timer_start("se halo F") + call mpas_dmpar_field_halo_exch(domain, 'barotropicThicknessFlux') + call mpas_timer_stop("se halo F") ! Check that you can compute SSH using the total sum or the individual increments @@ -1156,7 +1111,7 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ endif ! split_explicit - call mpas_timer_stop("se btr vel", timer_btr_vel) + call mpas_timer_stop("se btr vel") !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! @@ -1202,13 +1157,9 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ end do ! update halo for thickness tendencies - call mpas_timer_start("se halo thickness", .false., timer_halo_thickness) - call mpas_pool_get_subpool(domain % blocklist % structs, 'tend', tendPool) - - call mpas_pool_get_field(tendPool, 'layerThickness', layerThicknessField) - - call mpas_dmpar_exch_halo_field(layerThicknessField) - call mpas_timer_stop("se halo thickness", timer_halo_thickness) + call mpas_timer_start("se halo thickness") + call mpas_dmpar_field_halo_exch(domain, 'tendLayerThickness') + call mpas_timer_stop("se halo thickness") block => domain % blocklist do while (associated(block)) @@ -1223,13 +1174,9 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ end do ! update halo for tracer tendencies - call mpas_timer_start("se halo tracers", .false., timer_halo_tracers) - call mpas_pool_get_subpool(domain % blocklist % structs, 'tend', tendPool) - - call mpas_pool_get_field(tendPool, 'tracers', tracersField) - - call mpas_dmpar_exch_halo_field(tracersField) - call mpas_timer_stop("se halo tracers", timer_halo_tracers) + call mpas_timer_start("se halo tracers") + call mpas_dmpar_field_halo_exch(domain, 'tendTracers') + call mpas_timer_stop("se halo tracers") block => domain % blocklist do while (associated(block)) @@ -1455,13 +1402,8 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ ! conducted on tendencies, not on the velocity and tracer fields. So this update is required to ! communicate the change due to implicit vertical mixing across the boundary. call mpas_timer_start("se implicit vert mix halos") - call mpas_pool_get_subpool(domain % blocklist % structs, 'state', statePool) - - call mpas_pool_get_field(statePool, 'normalVelocity', normalVelocityField, 2) - call mpas_pool_get_field(statePool, 'tracers', tracersField, 2) - - call mpas_dmpar_exch_halo_field(normalVelocityField) - call mpas_dmpar_exch_halo_field(tracersField) + call mpas_dmpar_field_halo_exch(domain, 'normalVelocity', timeLevel=2) + call mpas_dmpar_field_halo_exch(domain, 'tracers', timeLevel=2) call mpas_timer_stop("se implicit vert mix halos") call mpas_timer_stop("se implicit vert mix") @@ -1543,12 +1485,136 @@ subroutine ocn_time_integrator_split(domain, dt)!{{{ block => block % next end do - call mpas_timer_stop("se timestep", timer_main) + call mpas_timer_stop("se timestep") deallocate(n_bcl_iter) end subroutine ocn_time_integrator_split!}}} +!*********************************************************************** +! +! routine ocn_time_integration_split_init +! +!> \brief Initialize split-explicit time stepping within MPAS-Ocean core +!> \author Mark Petersen +!> \date September 2011 +!> \details +!> This routine initializes variables required for the split-explicit time +!> stepper. +! +!----------------------------------------------------------------------- + subroutine ocn_time_integration_split_init(domain)!{{{ + ! Initialize splitting variables + + type (domain_type), intent(inout) :: domain + + integer :: i, iCell, iEdge, iVertex, k + type (block_type), pointer :: block + + type (mpas_pool_type), pointer :: statePool, meshPool + + integer :: iTracer, cell, cell1, cell2 + integer, dimension(:), pointer :: maxLevelEdgeTop + integer, dimension(:,:), pointer :: cellsOnEdge + real (kind=RKIND) :: normalThicknessFluxSum, layerThicknessSum, layerThicknessEdge1 + real (kind=RKIND), dimension(:), pointer :: refBottomDepth, normalBarotropicVelocity + + real (kind=RKIND), dimension(:,:), pointer :: layerThickness + real (kind=RKIND), dimension(:,:), pointer :: normalBaroclinicVelocity, normalVelocity + integer, pointer :: nVertLevels, nCells, nEdges + character (len=StrKIND), pointer :: config_time_integrator + logical, pointer :: config_filter_btr_mode + + ! Initialize z-level mesh variables from h, read in from input file. + block => domain % blocklist + do while (associated(block)) + call mpas_pool_get_config(block % configs, 'config_time_integrator', config_time_integrator) + call mpas_pool_get_config(block % configs, 'config_filter_btr_mode', config_filter_btr_mode) + call mpas_pool_get_subpool(block % structs, 'state', statePool) + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + + call mpas_pool_get_dimension(block % dimensions, 'nVertLevels', nVertLevels) + call mpas_pool_get_dimension(block % dimensions, 'nCells', nCells) + call mpas_pool_get_dimension(block % dimensions, 'nEdges', nEdges) + + call mpas_pool_get_array(statePool, 'layerThickness', layerThickness, 1) + call mpas_pool_get_array(statePool, 'normalVelocity', normalVelocity, 1) + call mpas_pool_get_array(statePool, 'normalBarotropicVelocity', normalBarotropicVelocity, 1) + call mpas_pool_get_array(statePool, 'normalBaroclinicVelocity', normalBaroclinicVelocity, 1) + + call mpas_pool_get_array(meshPool, 'refBottomDepth', refBottomDepth) + call mpas_pool_get_array(meshPool, 'cellsOnEdge', cellsOnEdge) + call mpas_pool_get_array(meshPool, 'maxLevelEdgeTop', maxLevelEdgeTop) + + ! Compute barotropic velocity at first timestep + ! This is only done upon start-up. + if (trim(config_time_integrator) == 'unsplit_explicit') then + call mpas_pool_get_array(statePool, 'normalBarotropicVelocity', normalBarotropicVelocity) + normalBarotropicVelocity(:) = 0.0 + + normalBaroclinicVelocity(:,:) = normalVelocity(:,:) + + elseif (trim(config_time_integrator) == 'split_explicit') then + + if (config_filter_btr_mode) then + do iCell = 1, nCells + layerThickness(1,iCell) = refBottomDepth(1) + enddo + endif + + do iEdge = 1, nEdges + cell1 = cellsOnEdge(1,iEdge) + cell2 = cellsOnEdge(2,iEdge) + + ! normalBarotropicVelocity = sum(u)/sum(h) on each column + ! ocn_diagnostic_solve has not yet been called, so compute hEdge + ! just for this edge. + + ! thicknessSum is initialized outside the loop because on land boundaries + ! maxLevelEdgeTop=0, but I want to initialize thicknessSum with a + ! nonzero value to avoid a NaN. + layerThicknessEdge1 = 0.5*( layerThickness(1,cell1) + layerThickness(1,cell2) ) + normalThicknessFluxSum = layerThicknessEdge1 * normalVelocity(1,iEdge) + layerThicknessSum = layerThicknessEdge1 + + do k=2, maxLevelEdgeTop(iEdge) + ! ocn_diagnostic_solve has not yet been called, so compute hEdge + ! just for this edge. + layerThicknessEdge1 = 0.5*( layerThickness(k,cell1) + layerThickness(k,cell2) ) + + normalThicknessFluxSum = normalThicknessFluxSum & + + layerThicknessEdge1 * normalVelocity(k,iEdge) + layerThicknessSum = layerThicknessSum + layerThicknessEdge1 + + enddo + normalBarotropicVelocity(iEdge) = normalThicknessFluxSum / layerThicknessSum + + ! normalBaroclinicVelocity(k,iEdge) = normalVelocity(k,iEdge) - normalBarotropicVelocity(iEdge) + do k = 1, maxLevelEdgeTop(iEdge) + normalBaroclinicVelocity(k,iEdge) = normalVelocity(k,iEdge) - normalBarotropicVelocity(iEdge) + enddo + + ! normalBaroclinicVelocity=0, normalVelocity=0 on land cells + do k = maxLevelEdgeTop(iEdge)+1, nVertLevels + normalBaroclinicVelocity(k,iEdge) = 0.0 + normalVelocity(k,iEdge) = 0.0 + enddo + enddo + + if (config_filter_btr_mode) then + ! filter normalBarotropicVelocity out of initial condition + normalVelocity(:,:) = normalBaroclinicVelocity(:,:) + + normalBarotropicVelocity(:) = 0.0 + endif + + endif + + block => block % next + end do + + end subroutine ocn_time_integration_split_init!}}} + end module ocn_time_integration_split ! vim: foldmethod=marker diff --git a/src/core_ocean/shared/Makefile b/src/core_ocean/shared/Makefile index a4eea294ca..bfcb3e7fe5 100644 --- a/src/core_ocean/shared/Makefile +++ b/src/core_ocean/shared/Makefile @@ -1,5 +1,5 @@ .SUFFIXES: .F .o -OBJS = mpas_ocn_init.o \ +OBJS = mpas_ocn_init_routines.o \ mpas_ocn_gm.o \ mpas_ocn_diagnostics.o \ mpas_ocn_diagnostics_routines.o \ @@ -36,7 +36,6 @@ OBJS = mpas_ocn_init.o \ mpas_ocn_tracer_short_wave_absorption_jerlov.o \ mpas_ocn_high_freq_thickness_hmix_del2.o \ mpas_ocn_tracer_surface_flux.o \ - mpas_ocn_global_diagnostics.o \ mpas_ocn_test.o \ mpas_ocn_constants.o \ mpas_ocn_forcing.o \ @@ -48,7 +47,7 @@ OBJS = mpas_ocn_init.o \ all: $(OBJS) -mpas_ocn_init.o: mpas_ocn_constants.o +mpas_ocn_init_routines.o: mpas_ocn_constants.o mpas_ocn_time_average.o mpas_ocn_diagnostics.o mpas_ocn_gm.o mpas_ocn_tendency.o: mpas_ocn_time_average.o mpas_ocn_high_freq_thickness_hmix_del2.o mpas_ocn_tracer_surface_flux.o mpas_ocn_thick_surface_flux.o mpas_ocn_tracer_short_wave_absorption.o mpas_ocn_tracer_advection.o mpas_ocn_tracer_hmix.o mpas_ocn_tracer_nonlocalflux.o mpas_ocn_vmix.o mpas_ocn_constants.o @@ -58,8 +57,6 @@ mpas_ocn_diagnostics.o: mpas_ocn_thick_ale.o mpas_ocn_diagnostics_routines.o mpa mpas_ocn_thick_ale.o: mpas_ocn_constants.o -mpas_ocn_global_diagnostics.o: - mpas_ocn_time_average.o: mpas_ocn_time_average_coupled.o: mpas_ocn_constants.o diff --git a/src/core_ocean/shared/mpas_ocn_constants.F b/src/core_ocean/shared/mpas_ocn_constants.F index 53275bd900..ad5d1f4501 100644 --- a/src/core_ocean/shared/mpas_ocn_constants.F +++ b/src/core_ocean/shared/mpas_ocn_constants.F @@ -29,10 +29,16 @@ module ocn_constants #endif implicit none + private save - type (mpas_pool_type), pointer :: ocnConfigs - type (mpas_pool_type), pointer :: ocnPackages + public :: ocn_constants_init + + type (mpas_pool_type), public, pointer :: ocnConfigs + type (mpas_pool_type), public, pointer :: ocnPackages + + character (len=*), public, parameter :: statsAlarmID = 'stats' + character (len=*), public, parameter :: coupleAlarmID = 'coupling' real (kind=RKIND), public :: & rho_air ,&! ambient air density (kg/m^3) diff --git a/src/core_ocean/shared/mpas_ocn_diagnostics.F b/src/core_ocean/shared/mpas_ocn_diagnostics.F index d68e0417ef..6bd850a9d7 100644 --- a/src/core_ocean/shared/mpas_ocn_diagnostics.F +++ b/src/core_ocean/shared/mpas_ocn_diagnostics.F @@ -36,8 +36,6 @@ module ocn_diagnostics private save - type (timer_node), pointer :: diagEOSTimer - !-------------------------------------------------------------------- ! ! Public parameters @@ -129,7 +127,6 @@ subroutine ocn_diagnostic_solve(dt, statePool, forcingPool, meshPool, diagnostic real (kind=RKIND), dimension(:,:), pointer :: tracersSurfaceValue real (kind=RKIND), dimension(:,:), pointer :: tracersSurfaceLayerValue - real (kind=RKIND), dimension(:), pointer :: boundaryLayerDepth, boundaryLayerDepthEdge real (kind=RKIND), dimension(:), pointer :: normalVelocitySurfaceLayer real (kind=RKIND), dimension(:), pointer :: indexSurfaceLayerDepth @@ -140,7 +137,7 @@ subroutine ocn_diagnostic_solve(dt, statePool, forcingPool, meshPool, diagnostic integer :: timeLevel integer, pointer :: indexTemperature, indexSalinity logical, pointer :: config_use_cvmix_kpp - real (kind=RKIND), pointer :: config_density0, config_apvm_scale_factor, config_coef_3rd_order, config_cvmix_kpp_surface_layer_extent + real (kind=RKIND), pointer :: config_density0, config_apvm_scale_factor, config_coef_3rd_order, config_cvmix_kpp_surface_layer_averaging character (len=StrKIND), pointer :: config_pressure_gradient_type if (present(timeLevelIn)) then @@ -153,7 +150,7 @@ subroutine ocn_diagnostic_solve(dt, statePool, forcingPool, meshPool, diagnostic call mpas_pool_get_config(ocnConfigs, 'config_apvm_scale_factor', config_apvm_scale_factor) call mpas_pool_get_config(ocnConfigs, 'config_pressure_gradient_type', config_pressure_gradient_type) call mpas_pool_get_config(ocnConfigs, 'config_coef_3rd_order', config_coef_3rd_order) - call mpas_pool_get_config(ocnConfigs, 'config_cvmix_kpp_surface_layer_extent', config_cvmix_kpp_surface_layer_extent) + call mpas_pool_get_config(ocnConfigs, 'config_cvmix_kpp_surface_layer_averaging', config_cvmix_kpp_surface_layer_averaging) call mpas_pool_get_config(ocnConfigs, 'config_use_cvmix_kpp', config_use_cvmix_kpp) call mpas_pool_get_dimension(statePool, 'index_temperature', indexTemperature) @@ -228,8 +225,6 @@ subroutine ocn_diagnostic_solve(dt, statePool, forcingPool, meshPool, diagnostic call mpas_pool_get_array(diagnosticsPool, 'tracersSurfaceValue', tracersSurfaceValue) call mpas_pool_get_array(diagnosticsPool, 'tracersSurfaceLayerValue', tracersSurfaceLayerValue) - call mpas_pool_get_array(diagnosticsPool, 'boundaryLayerDepth', boundaryLayerDepth) - call mpas_pool_get_array(diagnosticsPool, 'boundaryLayerDepthEdge', boundaryLayerDepthEdge) call mpas_pool_get_array(diagnosticsPool, 'normalVelocitySurfaceLayer', normalVelocitySurfaceLayer) call mpas_pool_get_array(diagnosticsPool, 'indexSurfaceLayerDepth', indexSurfaceLayerDepth) @@ -469,7 +464,7 @@ subroutine ocn_diagnostic_solve(dt, statePool, forcingPool, meshPool, diagnostic ! ! equation of state ! - call mpas_timer_start("equation of state", .false., diagEOSTimer) + call mpas_timer_start("equation of state") ! compute in-place density if (config_pressure_gradient_type.eq.'Jacobian_from_TS') then @@ -490,7 +485,7 @@ subroutine ocn_diagnostic_solve(dt, statePool, forcingPool, meshPool, diagnostic ! That is, layer k has been displaced to the depth of layer k+1. call ocn_equation_of_state_density(statePool, diagnosticsPool, meshPool, 1, 'relative', displacedDensity, err, timeLevelIn=timeLevel) - call mpas_timer_stop("equation of state", diagEOSTimer) + call mpas_timer_stop("equation of state") ! ! Pressure @@ -608,7 +603,7 @@ subroutine ocn_diagnostic_solve(dt, statePool, forcingPool, meshPool, diagnostic tracersSurfaceLayerValue(:,:) = 0.0 indexSurfaceLayerDepth(:) = -9.e30 do iCell=1,nCells - surfaceLayerDepth = boundaryLayerDepth(iCell) * config_cvmix_kpp_surface_layer_extent + surfaceLayerDepth = config_cvmix_kpp_surface_layer_averaging sumSurfaceLayer=0.0 do k=1,maxLevelCell(iCell) sumSurfaceLayer = sumSurfaceLayer + layerThickness(k,iCell) @@ -619,58 +614,45 @@ subroutine ocn_diagnostic_solve(dt, statePool, forcingPool, meshPool, diagnostic exit endif end do - sumSurfaceLayer = 0.0 do k=1,int(rSurfaceLayer) - sumSurfaceLayer = sumSurfaceLayer + layerThickness(k,iCell) tracersSurfaceLayerValue(:,iCell) = tracersSurfaceLayerValue(:,iCell) + tracers(:,k,iCell)*layerThickness(k,iCell) enddo k=int(rSurfaceLayer)+1 - sumSurfaceLayer = sumSurfaceLayer + fraction(rSurfaceLayer)*layerThickness(k,iCell) tracersSurfaceLayerValue(:,iCell) = tracersSurfaceLayerValue(:,iCell) + fraction(rSurfaceLayer)*tracers(:,k,iCell)*layerThickness(k,iCell) - tracersSurfaceLayerValue(:,iCell) = tracersSurfaceLayerValue(:,iCell) / sumSurfaceLayer + tracersSurfaceLayerValue(:,iCell) = tracersSurfaceLayerValue(:,iCell) / surfaceLayerDepth enddo - endif - ! - ! average normal velocity values over the ocean surface layer - ! the ocean surface layer is generally assumed to be about 0.1 of the boundary layer depth - ! - if (config_use_cvmix_kpp) then - normalVelocitySurfaceLayer(:) = 0.0 + ! + ! average normal velocity values over the ocean surface layer + ! the ocean surface layer is generally assumed to be about 0.1 of the boundary layer depth + ! + normalVelocitySurfaceLayer(:) = 0.0_RKIND do iEdge=1,nEdges cell1=cellsOnEdge(1,iEdge) cell2=cellsOnEdge(2,iEdge) - boundaryLayerDepthEdge(iEdge) = 0.5*( boundaryLayerDepth(cell1)+boundaryLayerDepth(cell2) ) - surfaceLayerDepth = boundaryLayerDepthEdge(iEdge) * config_cvmix_kpp_surface_layer_extent + surfaceLayerDepth = config_cvmix_kpp_surface_layer_averaging sumSurfaceLayer=0.0 do k=1,maxLevelEdgeTop(iEdge) rSurfaceLayer = k sumSurfaceLayer = sumSurfaceLayer + layerThicknessEdge(k,iEdge) if(sumSurfaceLayer.gt.surfaceLayerDepth) then - sumSurfaceLayer = sumSurfaceLayer - layerThicknessEdge(k,iCell) - rSurfaceLayer = int(k-1) + (surfaceLayerDepth-sumSurfaceLayer)/layerThicknessEdge(k,iCell) + sumSurfaceLayer = sumSurfaceLayer - layerThicknessEdge(k,iEdge) + rSurfaceLayer = int(k-1) + (surfaceLayerDepth-sumSurfaceLayer)/layerThicknessEdge(k,iEdge) exit endif end do - sumSurfaceLayer = 0.0 do k=1,int(rSurfaceLayer) - sumSurfaceLayer = sumSurfaceLayer + layerThicknessEdge(k,iEdge) normalVelocitySurfaceLayer(iEdge) = normalVelocitySurfaceLayer(iEdge) + normalVelocity(k,iEdge)*layerThicknessEdge(k,iEdge) enddo k=int(rSurfaceLayer)+1 if(k.le.maxLevelEdgeTop(iEdge)) then - sumSurfaceLayer = sumSurfaceLayer + fraction(rSurfaceLayer)*layerThickness(k,iCell) normalVelocitySurfaceLayer(iEdge) = normalVelocitySurfaceLayer(iEdge) + fraction(rSurfaceLayer)*normalVelocity(k,iEdge)*layerThicknessEdge(k,iEdge) - endif - if (maxLevelEdgeTop(iEdge) .gt. 0) then - normalVelocitySurfaceLayer(iEdge) = normalVelocitySurfaceLayer(iEdge) / sumSurfaceLayer + normalVelocitySurfaceLayer(iEdge) = normalVelocitySurfaceLayer(iEdge) / surfaceLayerDepth end if enddo - endif ! if config_use_cvmix_kpp - ! - ! compute fields used as intent(in) to CVMix/KPP - if (config_use_cvmix_kpp) then + ! + ! compute fields used as intent(in) to CVMix/KPP call computeKPPInputFields(statePool, forcingPool, meshPool, diagnosticsPool, scratchPool, timeLevel) endif @@ -1116,8 +1098,8 @@ subroutine computeKPPInputFields(statePool, forcingPool, meshPool, diagnosticsPo ! real pointers real (kind=RKIND), dimension(:), pointer :: dcEdge, dvEdge, areaCell - real (kind=RKIND), dimension(:), pointer :: penetrativeTemperatureFlux, surfaceMassFlux, & - surfaceBuoyancyForcing, surfaceFrictionVelocity, boundaryLayerDepth, penetrativeTemperatureFluxOBL, & + real (kind=RKIND), dimension(:), pointer :: penetrativeTemperatureFlux, surfaceThicknessFlux, & + surfaceBuoyancyForcing, surfaceFrictionVelocity, penetrativeTemperatureFluxOBL, & normalVelocitySurfaceLayer real (kind=RKIND), dimension(:), pointer :: surfaceWindStress, surfaceWindStressMagnitude real (kind=RKIND), dimension(:,:), pointer :: & @@ -1149,7 +1131,7 @@ subroutine computeKPPInputFields(statePool, forcingPool, meshPool, diagnosticsPo call mpas_pool_get_config(ocnConfigs, 'config_density0', config_density0) ! set the parameter turbulentVelocitySquared - turbulentVelocitySquared = 0.001 + turbulentVelocitySquared = 0.001_RKIND ! set scalar values call mpas_pool_get_dimension(meshPool, 'nCells', nCells) @@ -1172,7 +1154,6 @@ subroutine computeKPPInputFields(statePool, forcingPool, meshPool, diagnosticsPo call mpas_pool_get_array(diagnosticsPool, 'zTop', zTop) call mpas_pool_get_array(diagnosticsPool, 'density', density) call mpas_pool_get_array(diagnosticsPool, 'tracersSurfaceValue ', tracersSurfaceValues) - call mpas_pool_get_array(diagnosticsPool, 'boundaryLayerDepth', boundaryLayerDepth) call mpas_pool_get_array(diagnosticsPool, 'surfaceFrictionVelocity', surfaceFrictionVelocity) call mpas_pool_get_array(diagnosticsPool, 'penetrativeTemperatureFluxOBL', penetrativeTemperatureFluxOBL) call mpas_pool_get_array(diagnosticsPool, 'bulkRichardsonNumberBuoy', bulkRichardsonNumberBuoy) @@ -1181,7 +1162,7 @@ subroutine computeKPPInputFields(statePool, forcingPool, meshPool, diagnosticsPo call mpas_pool_get_array(diagnosticsPool, 'surfaceBuoyancyForcing', surfaceBuoyancyForcing) call mpas_pool_get_array(diagnosticsPool, 'normalVelocitySurfaceLayer', normalVelocitySurfaceLayer) - call mpas_pool_get_array(forcingPool, 'surfaceMassFlux', surfaceMassFlux) + call mpas_pool_get_array(forcingPool, 'surfaceThicknessFlux', surfaceThicknessFlux) call mpas_pool_get_array(forcingPool, 'surfaceTracerFlux', surfaceTracerFlux) call mpas_pool_get_array(forcingPool, 'penetrativeTemperatureFlux', penetrativeTemperatureFlux) call mpas_pool_get_array(forcingPool, 'surfaceWindStress', surfaceWindStress) @@ -1214,15 +1195,15 @@ subroutine computeKPPInputFields(statePool, forcingPool, meshPool, diagnosticsPo ! everything below should be consistent with that specified in Registry ! everything below should be consistent with the CVMix/KPP documentation: https://www.dropbox.com/s/6hqgc0rsoa828nf/cvmix_20aug2013.pdf ! - ! surfaceMassFlux: surface mass flux, m/s, positive into ocean + ! surfaceThicknessFlux: surface mass flux, m/s, positive into ocean ! surfaceTracerFlux(indexTempFlux): non-penetrative temperature flux, C m/s, positive into ocean ! penetrativeTemperatureFlux: penetrative surface temperature flux at ocean surface, positive into ocean ! surfaceTracerFlux(indexSaltFlux): salinity flux, PSU m/s, positive into ocean ! penetrativeTemperatureFluxOBL: penetrative temperature flux computed at z=OBL, positive down ! ! note: the following fields used the CVMix/KPP computation of buoyancy forcing are not included here - ! 1. Tm: temperature associated with surfaceMassFlux, C (here we assume Tm == temperatureSurfaceValue) - ! 2. Sm: salinity associated with surfaceMassFlux, PSU (here we assume Sm == salinitySurfaceValue and account for salinity flux in surfaceTracerFlux array) + ! 1. Tm: temperature associated with surfaceThicknessFlux, C (here we assume Tm == temperatureSurfaceValue) + ! 2. Sm: salinity associated with surfaceThicknessFlux, PSU (here we assume Sm == salinitySurfaceValue and account for salinity flux in surfaceTracerFlux array) ! surfaceBuoyancyForcing(iCell) = thermalExpansionCoeff (1,iCell) * & (surfaceTracerFlux(indexTempFlux,iCell) + penetrativeTemperatureFlux(iCell) - penetrativeTemperatureFluxOBL(iCell)) & @@ -1247,14 +1228,14 @@ subroutine computeKPPInputFields(statePool, forcingPool, meshPool, diagnosticsPo ! zero the bulk Richardson number within the ocean surface layer ! this prevent CVMix/KPP from mis-diagnosing the OBL to be within the surface layer - bulkRichardsonNumberBuoy (:,iCell) = 1.0e8 - bulkRichardsonNumberShear(:,iCell) = 1.0 + bulkRichardsonNumberBuoy (:,iCell) = 1.0e8_RKIND + bulkRichardsonNumberShear(:,iCell) = 1.0_RKIND ! loop over vertical to compute bulk Richardson number do k=1,maxLevelCell(iCell) ! find deltaVelocitySquared defined at cell centers based on velocity at levels 1 and k - deltaVelocitySquared = 0.0 + deltaVelocitySquared = 0.0_RKIND do i = 1, nEdgesOnCell(iCell) iEdge = edgesOnCell(i, iCell) factor = 0.5 * dcEdge(iEdge) * dvEdge(iEdge) * invAreaCell @@ -1263,34 +1244,19 @@ subroutine computeKPPInputFields(statePool, forcingPool, meshPool, diagnosticsPo enddo buoyContribution = gravity * (density(k,iCell) - densitySurfaceDisplaced(k,iCell)) / config_density0 - shearContribution = max(deltaVelocitySquared,1.0e-10) + shearContribution = max(deltaVelocitySquared,1.0e-15_RKIND) - ! compute bulk Richardson number - ! we estimate the bulk Richardson number here, but its value will be updated - ! in the ocn_vmix_coefs_cvmix_build when we have access to the turbulent velocity scale and unresolved shear + ! store the buoyancy and resolved shear contributions to bulk Richardson number bulkRichardsonNumberBuoy(k,iCell) = buoyContribution bulkRichardsonNumberShear(k,iCell) = shearContribution - enddo - - ! remove 2dz mode from bulkRichardsonNumber{Buoy,Shear} - buoySmoothed(:) = 0.0 - shearSmoothed(:) = 0.0 - do k=2,maxLevelCell(iCell)-1 - buoySmoothed(k) = (bulkRichardsonNumberBuoy(k-1,iCell) + 2*bulkRichardsonNumberBuoy(k,iCell) + bulkRichardsonNumberBuoy(k+1,iCell)) / 4.0 - shearSmoothed(k) = (bulkRichardsonNumberShear(k-1,iCell) + 2*bulkRichardsonNumberShear(k,iCell) + bulkRichardsonNumberShear(k+1,iCell)) / 4.0 - enddo - buoySmoothed(1) = buoySmoothed(2) - shearSmoothed(1) = shearSmoothed(2) - buoySmoothed(maxLevelCell(iCell))=buoySmoothed(maxLevelCell(iCell)-1) - shearSmoothed(maxLevelCell(iCell))=shearSmoothed(maxLevelCell(iCell)-1) - - bulkRichardsonNumberBuoy(1:maxLevelCell(iCell),iCell) = buoySmoothed(1:maxLevelCell(iCell)) - bulkRichardsonNumberShear(1:maxLevelCell(iCell),iCell) = shearSmoothed(1:maxLevelCell(iCell)) + enddo ! do k=1,maxLevelCell(iCell) - ! bulkRichardsonNumberBuoy to a negative value within surface layer to prevent CVMix/KPP from + ! set bulkRichardsonNumberBuoy to a negative value within surface layer to prevent CVMix/KPP from ! incorrectly diagnosing OBL to be within surface layer - bulkRichardsonNumberBuoy(1:int(indexSurfaceLayerDepth(iCell)),iCell) = -1.0 + ! require boundary layer to be below the top layer + k=max(int(indexSurfaceLayerDepth(iCell)),1) + bulkRichardsonNumberBuoy(1:k,iCell) = 0.0_RKIND enddo diff --git a/src/core_ocean/shared/mpas_ocn_diagnostics_routines.F b/src/core_ocean/shared/mpas_ocn_diagnostics_routines.F index d69309f4ff..02bc956bcf 100644 --- a/src/core_ocean/shared/mpas_ocn_diagnostics_routines.F +++ b/src/core_ocean/shared/mpas_ocn_diagnostics_routines.F @@ -28,8 +28,6 @@ module ocn_diagnostics_routines private save - type (timer_node), pointer :: diagEOSTimer - !-------------------------------------------------------------------- ! ! Public parameters diff --git a/src/core_ocean/shared/mpas_ocn_equation_of_state.F b/src/core_ocean/shared/mpas_ocn_equation_of_state.F index d6b0de7ab5..f8df5baa78 100644 --- a/src/core_ocean/shared/mpas_ocn_equation_of_state.F +++ b/src/core_ocean/shared/mpas_ocn_equation_of_state.F @@ -128,8 +128,8 @@ subroutine ocn_equation_of_state_density(statePool, diagnosticsPool, meshPool, k if (linearEos) then - call ocn_equation_of_state_linear_density(meshPool, indexT, indexS, tracers, density, err, & - thermalExpansionCoeff, salineContractionCoeff) + call ocn_equation_of_state_linear_density(meshPool, k_displaced, displacement_type, indexT, indexS, tracers, density, err, & + tracersSurfaceValue, thermalExpansionCoeff, salineContractionCoeff) elseif (jmEos) then diff --git a/src/core_ocean/shared/mpas_ocn_equation_of_state_linear.F b/src/core_ocean/shared/mpas_ocn_equation_of_state_linear.F index 8104c79219..7de9681a47 100644 --- a/src/core_ocean/shared/mpas_ocn_equation_of_state_linear.F +++ b/src/core_ocean/shared/mpas_ocn_equation_of_state_linear.F @@ -20,9 +20,12 @@ module ocn_equation_of_state_linear + use mpas_kind_types use mpas_derived_types use mpas_pool_routines use ocn_constants + use mpas_dmpar + use mpas_io_units implicit none private @@ -72,51 +75,109 @@ module ocn_equation_of_state_linear ! !----------------------------------------------------------------------- - subroutine ocn_equation_of_state_linear_density(meshPool, indexT, indexS, tracers, density, err, & - thermalExpansionCoeff, salineContractionCoeff)!{{{ + subroutine ocn_equation_of_state_linear_density(meshPool, k_displaced, displacement_type, indexT, indexS, tracers, density, err, & + tracersSurfaceLayerValue, thermalExpansionCoeff, salineContractionCoeff)!{{{ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! This module contains routines necessary for computing the density ! from model temperature and salinity using an equation of state. ! ! Input: mesh - mesh metadata ! s - state: tracers - ! k_displaced - ! If k_displaced<=0, state % density is returned with no displaced - ! If k_displaced>0,the state % densityDisplaced is returned, and is for - ! a parcel adiabatically displaced from its original level to level - ! k_displaced. This does not effect the linear EOS. ! ! Output: s - state: computed density + ! + ! + !> While somewhat unnecessary, we make the interface and capability + !> of linear eos to be identical to nonlinear eos + !> + !> Density can be computed in-situ using k_displaced=0 and + !> displacement_type = 'relative'. + !> + !> Potential density (referenced to top layer) can be computed + !> using k_displaced=1 and displacement_type = 'absolute'. + !> + !> The density of SST/SSS after adiabatic displacement to each layer + !> can be computed using displacement_type = 'surfaceDisplaced'. + !> + !> When using displacement_type = 'surfaceDisplaced', k_displaced is + !> ignored and tracersSurfaceLayerValue must be present. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! implicit none type (mpas_pool_type), intent(in) :: meshPool - integer, intent(in) :: indexT, indexS + character(len=*), intent(in) :: displacement_type + integer, intent(in) :: k_displaced, indexT, indexS real (kind=RKIND), dimension(:,:,:), intent(in) :: tracers real (kind=RKIND), dimension(:,:), intent(inout) :: density integer, intent(out) :: err + real (kind=RKIND), dimension(:,:), intent(in), optional :: tracersSurfaceLayerValue real (kind=RKIND), dimension(:,:), intent(out), optional :: & thermalExpansionCoeff, &! Thermal expansion coefficient (alpha), defined as $-1/\rho d\rho/dT$ (note negative sign) salineContractionCoeff ! Saline contraction coefficient (beta), defined as $1/\rho d\rho/dS$ integer, dimension(:), pointer :: maxLevelCell - integer :: iCell, k - integer, pointer :: nCells + integer :: iCell, k, k_displaced_local, k_ref + integer, pointer :: nCells, nVertLevels + character(len=60) :: displacement_type_local type (dm_info) :: dminfo call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) call mpas_pool_get_dimension(meshPool, 'nCells', nCells) + call mpas_pool_get_dimension(meshPool, 'nVertLevels', nVertLevels) err = 0 - do iCell = 1, nCells - do k = 1, maxLevelCell(iCell) - ! Linear equation of state - density(k,iCell) = config_eos_linear_densityref & - - config_eos_linear_alpha * (tracers(indexT,k,iCell) - config_eos_linear_Tref) & - + config_eos_linear_beta * (tracers(indexS,k,iCell) - config_eos_linear_Sref) + ! copy some intent(in) into local work space + displacement_type_local = trim(displacement_type) + k_displaced_local = k_displaced + + ! test of request to address out of bounds + if (displacement_type_local == 'absolute' .and. & + (k_displaced_local <= 0 .or. k_displaced_local > nVertLevels) ) then + write (stderrUnit,*) 'Abort: In equation_of_state_jm', & + ' k_displaced must be between 1 and nVertLevels for ', & + 'displacement_type = absolute' + call mpas_dmpar_abort(dminfo) + endif + + ! if surfaceDisplaced, then compute density at all levels based on surface values + if (displacement_type_local == 'surfaceDisplaced') then + do iCell = 1, nCells + do k = 1, maxLevelCell(iCell) + ! Linear equation of state + density(k,iCell) = config_eos_linear_densityref & + - config_eos_linear_alpha * (tracersSurfaceLayerValue(indexT,iCell) - config_eos_linear_Tref) & + + config_eos_linear_beta * (tracersSurfaceLayerValue(indexS,iCell) - config_eos_linear_Sref) + end do end do - end do + endif + + + ! if absolute, then compute density at all levels based on pressure of k_displaced value + ! but since linear EOS does not (at present) have a pressure dependency, this just returns density + if (displacement_type_local == 'absolute') then + do iCell = 1, nCells + do k = 1, maxLevelCell(iCell) + ! Linear equation of state + density(k,iCell) = config_eos_linear_densityref & + - config_eos_linear_alpha * (tracers(indexT,k,iCell) - config_eos_linear_Tref) & + + config_eos_linear_beta * (tracers(indexS,k,iCell) - config_eos_linear_Sref) + end do + end do + endif + + ! if relative, then compute density at all levels based on k+k_displaced pressure value + ! but since (at present) linear EOS has not dependence on pressure, it returns density + if (displacement_type_local == 'relative') then + do iCell = 1, nCells + do k = 1, maxLevelCell(iCell) + ! Linear equation of state + density(k,iCell) = config_eos_linear_densityref & + - config_eos_linear_alpha * (tracers(indexT,k,iCell) - config_eos_linear_Tref) & + + config_eos_linear_beta * (tracers(indexS,k,iCell) - config_eos_linear_Sref) + end do + end do + endif if (present(thermalExpansionCoeff)) then do iCell = 1, nCells diff --git a/src/core_ocean/shared/mpas_ocn_forcing.F b/src/core_ocean/shared/mpas_ocn_forcing.F index de93e9207a..fed8a95417 100644 --- a/src/core_ocean/shared/mpas_ocn_forcing.F +++ b/src/core_ocean/shared/mpas_ocn_forcing.F @@ -25,6 +25,7 @@ module ocn_forcing use mpas_timekeeping use mpas_io_units use mpas_dmpar + use mpas_abort, only : mpas_dmpar_global_abort use ocn_forcing_bulk use ocn_forcing_restoring use ocn_constants @@ -47,7 +48,7 @@ module ocn_forcing public :: ocn_forcing_build_arrays, & ocn_forcing_init, & - ocn_forcing_build_transmission_array, & + ocn_forcing_build_fraction_absorbed_array, & ocn_forcing_transmission !-------------------------------------------------------------------- @@ -206,18 +207,18 @@ end subroutine ocn_forcing_init!}}} !*********************************************************************** ! -! routine ocn_forcing_build_transmission_array +! routine ocn_forcing_build_fraction_absorbed_array ! -!> \brief Transmission coefficient array for surface forcing. +!> \brief fraction absorbed coefficient array for surface forcing. !> \author Doug Jacobsen !> \date 10/03/2013 !> \details -!> This subroutine builds the transmission coefficient array for use in +!> This subroutine builds the fractionAbsorbed coefficient array for use in !> applying surface fluxes deeper than the surface layer. ! !----------------------------------------------------------------------- - subroutine ocn_forcing_build_transmission_array(meshPool, statePool, forcingPool, err, timeLevelIn)!{{{ + subroutine ocn_forcing_build_fraction_absorbed_array(meshPool, statePool, forcingPool, err, timeLevelIn)!{{{ type (mpas_pool_type), intent(in) :: meshPool !< Input: Mesh information type (mpas_pool_type), intent(in) :: statePool !< Input: State information type (mpas_pool_type), intent(inout) :: forcingPool !< Input/Output: Forcing information @@ -232,7 +233,7 @@ subroutine ocn_forcing_build_transmission_array(meshPool, statePool, forcingPool real (kind=RKIND) :: zTop, zBot, transmissionCoeffTop, transmissionCoeffBot - real (kind=RKIND), dimension(:,:), pointer :: layerThickness, transmissionCoefficients + real (kind=RKIND), dimension(:,:), pointer :: layerThickness, fractionAbsorbed integer :: iCell, k, timeLevel integer, pointer :: nCells @@ -253,7 +254,7 @@ subroutine ocn_forcing_build_transmission_array(meshPool, statePool, forcingPool call mpas_pool_get_array(statePool, 'layerThickness', layerThickness, timeLevel) - call mpas_pool_get_array(forcingPool, 'transmissionCoefficients', transmissionCoefficients) + call mpas_pool_get_array(forcingPool, 'fractionAbsorbed', fractionAbsorbed) do iCell = 1, nCells zTop = 0.0_RKIND @@ -262,14 +263,14 @@ subroutine ocn_forcing_build_transmission_array(meshPool, statePool, forcingPool zBot = zTop - layerThickness(k,iCell) transmissionCoeffBot = ocn_forcing_transmission(zBot) - transmissionCoefficients(k, iCell) = transmissionCoeffTop - transmissionCoeffBot + fractionAbsorbed(k, iCell) = transmissionCoeffTop - transmissionCoeffBot zTop = zBot transmissionCoeffTop = transmissionCoeffBot end do end do - end subroutine ocn_forcing_build_transmission_array!}}} + end subroutine ocn_forcing_build_fraction_absorbed_array!}}} !*********************************************************************** ! diff --git a/src/core_ocean/shared/mpas_ocn_forcing_bulk.F b/src/core_ocean/shared/mpas_ocn_forcing_bulk.F index 9dd73307a5..bce3cb0794 100644 --- a/src/core_ocean/shared/mpas_ocn_forcing_bulk.F +++ b/src/core_ocean/shared/mpas_ocn_forcing_bulk.F @@ -51,6 +51,8 @@ module ocn_forcing_bulk ! !-------------------------------------------------------------------- + real (kind=RKIND) :: refDensity + !*********************************************************************** contains @@ -114,7 +116,7 @@ subroutine ocn_forcing_bulk_build_arrays(meshPool, forcingPool, err)!{{{ real (kind=RKIND), dimension(:), pointer :: rainFlux real (kind=RKIND), dimension(:), pointer :: seaSurfacePressure, iceFraction - real (kind=RKIND), dimension(:), pointer :: surfaceMassFlux, surfaceWindStress, surfaceWindStressMagnitude + real (kind=RKIND), dimension(:), pointer :: surfaceThicknessFlux, surfaceWindStress, surfaceWindStressMagnitude real (kind=RKIND), dimension(:,:), pointer :: surfaceTracerFlux err = 0 @@ -151,7 +153,7 @@ subroutine ocn_forcing_bulk_build_arrays(meshPool, forcingPool, err)!{{{ call mpas_pool_get_array(forcingPool, 'seaSurfacePressure', seaSurfacePressure) call mpas_pool_get_array(forcingPool, 'iceFraction', iceFraction) - call mpas_pool_get_array(forcingPool, 'surfaceMassFlux', surfaceMassFlux) + call mpas_pool_get_array(forcingPool, 'surfaceThicknessFlux', surfaceThicknessFlux) call mpas_pool_get_array(forcingPool, 'surfaceTracerFlux', surfaceTracerFlux) call mpas_pool_get_array(forcingPool, 'penetrativeTemperatureFlux', penetrativeTemperatureFlux) @@ -175,7 +177,7 @@ subroutine ocn_forcing_bulk_build_arrays(meshPool, forcingPool, err)!{{{ surfaceTracerFlux(index_salinity_flux, iCell) = seaIceSalinityFlux(iCell) * sflux_factor - surfaceMassFlux(iCell) = snowFlux(iCell) + rainFlux(iCell) + evaporationFlux(iCell) + seaIceFreshWaterFlux(iCell) + iceRunoffFlux(iCell) + riverRunoffFlux(iCell) + surfaceThicknessFlux(iCell) = ( snowFlux(iCell) + rainFlux(iCell) + evaporationFlux(iCell) + seaIceFreshWaterFlux(iCell) + iceRunoffFlux(iCell) + riverRunoffFlux(iCell) ) / refDensity end do penetrativeTemperatureFlux = shortWaveHeatFlux * hflux_factor @@ -198,8 +200,15 @@ subroutine ocn_forcing_bulk_init(err)!{{{ integer, intent(out) :: err !< Output: error flag + real (kind=RKIND), pointer :: config_density0 + err = 0 + + call mpas_pool_get_config(ocnConfigs, 'config_density0', config_density0) + + refDensity = config_density0 + end subroutine ocn_forcing_bulk_init!}}} !*********************************************************************** diff --git a/src/core_ocean/shared/mpas_ocn_forcing_restoring.F b/src/core_ocean/shared/mpas_ocn_forcing_restoring.F index 54663ccfb7..b7377d105d 100644 --- a/src/core_ocean/shared/mpas_ocn_forcing_restoring.F +++ b/src/core_ocean/shared/mpas_ocn_forcing_restoring.F @@ -115,19 +115,19 @@ subroutine ocn_forcing_restoring_build_arrays(meshPool, indexT, indexS, indexTFl !----------------------------------------------------------------- integer :: iCell, k - integer, pointer :: nCellsSolve + integer, pointer :: nCells real (kind=RKIND) :: invTemp, invSalinity err = 0 - call mpas_pool_get_dimension(meshPool, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(meshPool, 'nCells', nCells) invTemp = 1.0 / (temperatureTimeScale * 86400.0) invSalinity = 1.0 / (salinityTimeScale * 86400.0) k = 1 ! restoring only in top layer - do iCell=1,nCellsSolve + do iCell=1,nCells surfaceTracerFluxes(indexTFlux, iCell) = - temperatureLengthScale * (tracers(indexT, k, iCell) - temperatureRestoring(iCell)) * invTemp surfaceTracerFluxes(indexSFlux, iCell) = - salinityLengthScale * (tracers(indexS, k, iCell) - salinityRestoring(iCell)) * invSalinity enddo diff --git a/src/core_ocean/shared/mpas_ocn_global_diagnostics.F b/src/core_ocean/shared/mpas_ocn_global_diagnostics.F deleted file mode 100644 index fcfffb0a3c..0000000000 --- a/src/core_ocean/shared/mpas_ocn_global_diagnostics.F +++ /dev/null @@ -1,961 +0,0 @@ -! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) -! and the University Corporation for Atmospheric Research (UCAR). -! -! Unless noted otherwise source code is licensed under the BSD license. -! Additional copyright and license information can be found in the LICENSE file -! distributed with this code, or at http://mpas-dev.github.com/license.html -! -!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| -! -! ocn_global_diagnostics -! -!> \brief MPAS ocean statistics for the full domain -!> \author Mark Petersen and Xylar Asay-Davis -!> \date 15 April 2013 -!> \details -!> This module contains routines to compute global statistics such as -!> minimum, maximum, and mean of variables, taken over the full domain. -! -!----------------------------------------------------------------------- -module ocn_global_diagnostics - - use mpas_derived_types - use mpas_pool_routines - use mpas_constants - use mpas_dmpar - use mpas_timer - - implicit none - save - public - - type (timer_node), pointer :: diagBlockTimer, diagMPITimer - - contains - -!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| -! -! ocn_compute_global_diagnostics -! -!> \brief MPAS ocean statistics for the full domain -!> \author Mark Petersen and Xylar Asay-Davis -!> \date 15 April 2013 -!> \details -!> This routines to computes and writes global statistics such as -!> minimum, maximum, and mean of variables, taken over the full domain. -! -!----------------------------------------------------------------------- - - subroutine ocn_compute_global_diagnostics(domain, timeLevel, timeIndex, dt)!{{{ - - ! Note: this routine assumes that there is only one block per processor. No looping - ! is preformed over blocks. - ! dminfo is the domain info needed for global communication - ! state contains the state variables needed to compute global diagnostics - ! mesh conains the meta data about the mesh - ! timeIndex is the current time step counter - ! dt is the duration of each time step - ! - ! Sums of variables at vertices are not weighted by thickness (since layerThickness is not known at - ! vertices as it is at cell centers and at edges). - ! - ! RMS here is volume-weighted root mean square, i.e. - ! rms = sqrt( sum( T_i^2*v_i) / sum(v_i) ) - ! where T is the field of interest - ! and v is the volume of the cell. - - implicit none - - type (domain_type), intent(inout) :: domain !< Input/Output: domain information - integer, intent(in) :: timeIndex - real (kind=RKIND), intent(in) :: dt - integer, intent(in) :: timeLevel - - type (block_type), pointer :: block - type (dm_info), pointer :: dminfo - type (mpas_pool_type), pointer :: statePool - type (mpas_pool_type), pointer :: meshPool - type (mpas_pool_type), pointer :: diagnosticsPool - - integer :: nCellsGlobal, nEdgesGlobal, nVerticesGlobal, iTracer - integer :: elementIndex, variableIndex, nVariables, nSums, nMaxes, nMins - integer :: k, i, fileID - integer :: timeYYYY, timeMM, timeDD, timeH, timeM, timeS - integer, pointer :: nVertLevels, nCellsSolve, nEdgesSolve, nVerticesSolve, num_tracers - character*1 timeChar - integer, parameter :: kMaxVariables = 1024 ! this must be a little more than double the number of variables to be reduced - integer, dimension(:), pointer :: maxLevelCell, maxLevelEdgeTop, maxLevelVertexBot - - real (kind=RKIND) :: localCFL, localSum, time_days - real (kind=RKIND), pointer :: volumeCellGlobal, volumeEdgeGlobal, CFLNumberGlobal, areaCellGlobal, areaEdgeGlobal, areaTriangleGlobal - real (kind=RKIND), dimension(:), pointer :: areaCell, dcEdge, dvEdge, areaTriangle, areaEdge - real (kind=RKIND), dimension(:,:), pointer :: layerThickness, normalVelocity, tangentialVelocity, layerThicknessEdge, relativeVorticity, kineticEnergyCell, & - normalizedRelativeVorticityEdge, normalizedPlanetaryVorticityEdge, pressure, montgomeryPotential, vertAleTransportTop, vertVelocityTop, & - lowFreqDivergence, highFreqThickness, density - real (kind=RKIND), dimension(:,:,:), pointer :: tracers - - real (kind=RKIND), dimension(kMaxVariables) :: sums, sumSquares, mins, maxes, averages, rms, verticalSumMins, verticalSumMaxes, reductions - real (kind=RKIND), dimension(kMaxVariables) :: sums_tmp, sumSquares_tmp, mins_tmp, maxes_tmp, averages_tmp, verticalSumMins_tmp, verticalSumMaxes_tmp - - real (kind=RKIND), dimension(:,:), allocatable :: enstrophy, normalizedAbsoluteVorticity, workArray - - character (len=StrKIND), pointer :: xtime - - block => domain % blocklist - dminfo => domain % dminfo - - sums = 0.0 - sumSquares = 0.0 - mins = 1.0e34 - maxes = -1.0e34 - averages = 0.0 - verticalSumMins = 1.0e34 - verticalSumMaxes = -1.0e34 - reductions = 0.0 - - call mpas_timer_start("diagnostic block loop", .false., diagBlockTimer) - do while (associated(block)) - call mpas_pool_get_subpool(block % structs, 'state', statePool) - call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) - call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) - - call mpas_pool_get_dimension(statePool, 'num_tracers', num_tracers) - - call mpas_pool_get_dimension(meshPool, 'nVertLevels', nVertLevels) - call mpas_pool_get_dimension(meshPool, 'nCellsSolve', nCellsSolve) - call mpas_pool_get_dimension(meshPool, 'nEdgesSolve', nEdgesSolve) - call mpas_pool_get_dimension(meshPool, 'nVerticesSolve', nVerticesSolve) - - call mpas_pool_get_array(meshPool, 'areaCell', areaCell) - call mpas_pool_get_array(meshPool, 'dcEdge', dcEdge) - call mpas_pool_get_array(meshPool, 'dvEdge', dvEdge) - call mpas_pool_get_array(meshPool, 'areaTriangle', areaTriangle) - call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) - call mpas_pool_get_array(meshPool, 'maxLevelEdgeTop', maxLevelEdgeTop) - call mpas_pool_get_array(meshPool, 'maxLevelVertexBot', maxLevelVertexBot) - - allocate(areaEdge(1:nEdgesSolve)) - areaEdge = dcEdge(1:nEdgesSolve)*dvEdge(1:nEdgesSolve) - - call mpas_pool_get_array(statePool, 'layerThickness', layerThickness, timeLevel) - call mpas_pool_get_array(statePool, 'normalVelocity', normalVelocity, timeLevel) - call mpas_pool_get_array(statePool, 'tracers', tracers, timeLevel) - call mpas_pool_get_array(statePool, 'lowFreqDivergence', lowFreqDivergence, timeLevel) - call mpas_pool_get_array(statePool, 'highFreqThickness', highFreqThickness, timeLevel) - - call mpas_pool_get_array(diagnosticsPool, 'density', density) - call mpas_pool_get_array(diagnosticsPool, 'montgomeryPotential', montgomeryPotential) - call mpas_pool_get_array(diagnosticsPool, 'pressure', pressure) - call mpas_pool_get_array(diagnosticsPool, 'relativeVorticity', relativeVorticity) - call mpas_pool_get_array(diagnosticsPool, 'normalizedRelativeVorticityEdge', normalizedRelativeVorticityEdge) - call mpas_pool_get_array(diagnosticsPool, 'normalizedPlanetaryVorticityEdge', normalizedPlanetaryVorticityEdge) - call mpas_pool_get_array(diagnosticsPool, 'vertAleTransportTop', vertAleTransportTop) - call mpas_pool_get_array(diagnosticsPool, 'vertVelocityTop', vertVelocityTop) - call mpas_pool_get_array(diagnosticsPool, 'tangentialVelocity', tangentialVelocity) - call mpas_pool_get_array(diagnosticsPool, 'layerThicknessEdge', layerThicknessEdge) - call mpas_pool_get_array(diagnosticsPool, 'kineticEnergyCell', kineticEnergyCell) - - allocate(workArray(nVertLevels,nCellsSolve)) - - variableIndex = 0 - ! layerThickness - variableIndex = variableIndex + 1 - call ocn_compute_field_area_weighted_local_stats_max_level(dminfo, nVertLevels, nCellsSolve, maxLevelCell(1:nCellsSolve), areaCell(1:nCellsSolve), layerThickness(:,1:nCellsSolve), & - sums_tmp(variableIndex), sumSquares_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), verticalSumMaxes_tmp(variableIndex)) - sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex) - sumSquares(variableIndex) = sumSquares(variableIndex) + sumSquares_tmp(variableIndex) - mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex)) - maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex)) - verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) - verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) - - ! normalVelocity - variableIndex = variableIndex + 1 - call ocn_compute_field_volume_weighted_local_stats_max_level(dminfo, nVertLevels, nEdgesSolve, maxLevelEdgeTop(1:nEdgesSolve), areaEdge(1:nEdgesSolve), layerThicknessEdge(:,1:nEdgesSolve), & - normalVelocity(:,1:nEdgesSolve), sums_tmp(variableIndex), sumSquares_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), & - verticalSumMaxes_tmp(variableIndex)) - sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex) - sumSquares(variableIndex) = sumSquares(variableIndex) + sumSquares_tmp(variableIndex) - mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex)) - maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex)) - verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) - verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) - - ! tangentialVelocity - variableIndex = variableIndex + 1 - call ocn_compute_field_volume_weighted_local_stats_max_level(dminfo, nVertLevels, nEdgesSolve, maxLevelEdgeTop(1:nEdgesSolve), areaEdge(1:nEdgesSolve), layerThicknessEdge(:,1:nEdgesSolve), & - tangentialVelocity(:,1:nEdgesSolve), sums_tmp(variableIndex), sumSquares_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), & - verticalSumMaxes_tmp(variableIndex)) - sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex) - sumSquares(variableIndex) = sumSquares(variableIndex) + sumSquares_tmp(variableIndex) - mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex)) - maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex)) - verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) - verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) - - ! layerThicknessEdge - variableIndex = variableIndex + 1 - call ocn_compute_field_area_weighted_local_stats_max_level(dminfo, nVertLevels, nEdgesSolve, maxLevelEdgeTop(1:nEdgesSolve), areaEdge(1:nEdgesSolve), layerThicknessEdge(:,1:nEdgesSolve), & - sums_tmp(variableIndex), sumSquares_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), verticalSumMaxes_tmp(variableIndex)) - sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex) - sumSquares(variableIndex) = sumSquares(variableIndex) + sumSquares_tmp(variableIndex) - mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex)) - maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex)) - verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) - verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) - - ! relativeVorticity - variableIndex = variableIndex + 1 - call ocn_compute_field_area_weighted_local_stats_max_level(dminfo, nVertLevels, nVerticesSolve, maxLevelVertexBot(1:nVerticesSolve), areaTriangle(1:nVerticesSolve), relativeVorticity(:,1:nVerticesSolve), & - sums_tmp(variableIndex), sumSquares_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), verticalSumMaxes_tmp(variableIndex)) - sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex) - sumSquares(variableIndex) = sumSquares(variableIndex) + sumSquares_tmp(variableIndex) - mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex)) - maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex)) - verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) - verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) - - ! enstrophy - allocate(enstrophy(nVertLevels,nVerticesSolve)) - enstrophy(:,:)=relativeVorticity(:,1:nVerticesSolve)**2 - variableIndex = variableIndex + 1 - call ocn_compute_field_area_weighted_local_stats_max_level(dminfo, nVertLevels, nVerticesSolve, maxLevelVertexBot(1:nVerticesSolve), areaTriangle(1:nVerticesSolve), & - enstrophy(:,:), sums_tmp(variableIndex), sumSquares_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), & - verticalSumMins_tmp(variableIndex), verticalSumMaxes_tmp(variableIndex)) - deallocate(enstrophy) - sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex) - sumSquares(variableIndex) = sumSquares(variableIndex) + sumSquares_tmp(variableIndex) - mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex)) - maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex)) - verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) - verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) - - ! kineticEnergyCell - variableIndex = variableIndex + 1 - call ocn_compute_field_volume_weighted_local_stats_max_level(dminfo, nVertLevels, nCellsSolve, maxLevelCell(1:nCellsSolve), areaCell(1:nCellsSolve), layerThickness(:,1:nCellsSolve), & - kineticEnergyCell(:,1:nCellsSolve), sums_tmp(variableIndex), sumSquares_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), & - verticalSumMaxes_tmp(variableIndex)) - sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex) - sumSquares(variableIndex) = sumSquares(variableIndex) + sumSquares_tmp(variableIndex) - mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex)) - maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex)) - verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) - verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) - - ! normalizedAbsoluteVorticity - allocate(normalizedAbsoluteVorticity(nVertLevels,nEdgesSolve)) - normalizedAbsoluteVorticity(:,:) = normalizedRelativeVorticityEdge(:,1:nEdgesSolve) + normalizedPlanetaryVorticityEdge(:,1:nEdgesSolve) - variableIndex = variableIndex + 1 - call ocn_compute_field_volume_weighted_local_stats_max_level(dminfo, nVertLevels, nEdgesSolve, maxLevelEdgeTop(1:nEdgesSolve), areaEdge(1:nEdgesSolve), layerThicknessEdge(:,1:nEdgesSolve), & - normalizedAbsoluteVorticity(:,1:nEdgesSolve), sums_tmp(variableIndex), sumSquares_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), & - verticalSumMaxes_tmp(variableIndex)) - deallocate(normalizedAbsoluteVorticity) - sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex) - sumSquares(variableIndex) = sumSquares(variableIndex) + sumSquares_tmp(variableIndex) - mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex)) - maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex)) - verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) - verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) - - ! pressure - variableIndex = variableIndex + 1 - call ocn_compute_field_volume_weighted_local_stats_max_level(dminfo, nVertLevels, nCellsSolve, maxLevelCell(1:nCellsSolve), areaCell(1:nCellsSolve), layerThickness(:,1:nCellsSolve), & - pressure(:,1:nCellsSolve), sums_tmp(variableIndex), sumSquares_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), & - verticalSumMaxes_tmp(variableIndex)) - sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex) - sumSquares(variableIndex) = sumSquares(variableIndex) + sumSquares_tmp(variableIndex) - mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex)) - maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex)) - verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) - verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) - - ! montgomeryPotential - variableIndex = variableIndex + 1 - call ocn_compute_field_volume_weighted_local_stats_max_level(dminfo, nVertLevels, nCellsSolve, maxLevelCell(1:nCellsSolve), areaCell(1:nCellsSolve), layerThickness(:,1:nCellsSolve), & - montgomeryPotential(:,1:nCellsSolve), sums_tmp(variableIndex), sumSquares_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), & - verticalSumMaxes_tmp(variableIndex)) - sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex) - sumSquares(variableIndex) = sumSquares(variableIndex) + sumSquares_tmp(variableIndex) - mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex)) - maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex)) - verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) - verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) - - ! vertVelocityTop vertical velocity - variableIndex = variableIndex + 1 - workArray = vertVelocityTop(1:nVertLevels,1:nCellsSolve) - call ocn_compute_field_volume_weighted_local_stats_max_level(dminfo, nVertLevels, nCellsSolve, maxLevelCell(1:nCellsSolve), areaCell(1:nCellsSolve), layerThickness(:,1:nCellsSolve), & - workArray, sums_tmp(variableIndex), sumSquares_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), & - verticalSumMaxes_tmp(variableIndex)) - sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex) - sumSquares(variableIndex) = sumSquares(variableIndex) + sumSquares_tmp(variableIndex) - mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex)) - maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex)) - verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) - verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) - - ! vertAleTransportTop vertical velocity - variableIndex = variableIndex + 1 - workArray = vertAleTransportTop(1:nVertLevels,1:nCellsSolve) - call ocn_compute_field_volume_weighted_local_stats_max_level(dminfo, nVertLevels, nCellsSolve, maxLevelCell(1:nCellsSolve), areaCell(1:nCellsSolve), layerThickness(:,1:nCellsSolve), & - workArray, sums_tmp(variableIndex), sumSquares_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), & - verticalSumMaxes_tmp(variableIndex)) - sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex) - sumSquares(variableIndex) = sumSquares(variableIndex) + sumSquares_tmp(variableIndex) - mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex)) - maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex)) - verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) - verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) - - ! lowFreqDivergence - variableIndex = variableIndex + 1 - if (associated(lowFreqDivergence)) then - call ocn_compute_field_volume_weighted_local_stats_max_level(dminfo, nVertLevels, nCellsSolve, maxLevelCell(1:nCellsSolve), areaCell(1:nCellsSolve), layerThickness(:,1:nCellsSolve), & - lowFreqDivergence(:,1:nCellsSolve), sums_tmp(variableIndex), sumSquares_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), & - verticalSumMaxes_tmp(variableIndex)) - sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex) - sumSquares(variableIndex) = sumSquares(variableIndex) + sumSquares_tmp(variableIndex) - mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex)) - maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex)) - verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) - verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) - end if - - ! highFreqThickness - variableIndex = variableIndex + 1 - if (associated(highFreqThickness)) then - call ocn_compute_field_volume_weighted_local_stats_max_level(dminfo, nVertLevels, nCellsSolve, maxLevelCell(1:nCellsSolve), areaCell(1:nCellsSolve), layerThickness(:,1:nCellsSolve), & - highFreqThickness(:,1:nCellsSolve), sums_tmp(variableIndex), sumSquares_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), & - verticalSumMaxes_tmp(variableIndex)) - sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex) - sumSquares(variableIndex) = sumSquares(variableIndex) + sumSquares_tmp(variableIndex) - mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex)) - maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex)) - verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) - verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) - end if - - ! Tracers - do iTracer = 1, num_tracers - variableIndex = variableIndex + 1 - workArray = Tracers(iTracer,:,1:nCellsSolve) - call ocn_compute_field_volume_weighted_local_stats_max_level(dminfo, nVertLevels, nCellsSolve, maxLevelCell(1:nCellsSolve), areaCell(1:nCellsSolve), layerThickness(:,1:nCellsSolve), & - workArray, sums_tmp(variableIndex), sumSquares_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), & - verticalSumMaxes_tmp(variableIndex)) - sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex) - sumSquares(variableIndex) = sumSquares(variableIndex) + sumSquares_tmp(variableIndex) - mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex)) - maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex)) - verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex)) - verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex)) - enddo - deallocate(workArray) - - nVariables = variableIndex - nSums = nVariables - nMins = nVariables - nMaxes = nVariables - - nSums = nSums + 1 - sums(nSums) = sums(nSums) + sum(areaCell(1:nCellsSolve)) - - nSums = nSums + 1 - sums(nSums) = sums(nSums) + sum(dcEdge(1:nEdgesSolve)*dvEdge(1:nEdgesSolve)) - - nSums = nSums + 1 - sums(nSums) = sums(nSums) + sum(areaTriangle(1:nVerticesSolve)) - - nSums = nSums + 1 - sums(nSums) = sums(nSums) + nCellsSolve - - nSums = nSums + 1 - sums(nSums) = sums(nSums) + nEdgesSolve - - nSums = nSums + 1 - sums(nSums) = sums(nSums) + nVerticesSolve - - localCFL = 0.0 - do elementIndex = 1,nEdgesSolve - localCFL = max(localCFL, maxval(dt*normalVelocity(:,elementIndex)/dcEdge(elementIndex))) - end do - nMaxes = nMaxes + 1 - maxes(nMaxes) = localCFL - - do i = 1, nVariables - mins(nMins+i) = min(mins(nMins+i),verticalSumMins_tmp(i)) - maxes(nMaxes+i) = max(maxes(nMaxes+i),verticalSumMaxes_tmp(i)) - end do - - nMins = nMins + nVariables - nMaxes = nMaxes + nVariables - - deallocate(areaEdge) - - block => block % next - end do - call mpas_timer_stop("diagnostic block loop", diagBlockTimer) - call mpas_timer_start("diagnostics mpi", .false., diagMPITimer) - - call mpas_pool_get_subpool(domain % blocklist % structs, 'diagnostics', diagnosticsPool) - - call mpas_pool_get_array(diagnosticsPool, 'areaCellGlobal', areaCellGlobal) - call mpas_pool_get_array(diagnosticsPool, 'areaEdgeGlobal', areaEdgeGlobal) - call mpas_pool_get_array(diagnosticsPool, 'areaTriangleGlobal', areaTriangleGlobal) - call mpas_pool_get_array(diagnosticsPool, 'volumeCellGlobal', volumeCellGlobal) - call mpas_pool_get_array(diagnosticsPool, 'volumeEdgeGlobal', volumeEdgeGlobal) - call mpas_pool_get_array(diagnosticsPool, 'CFLNumberGlobal', CFLNumberGlobal) - call mpas_pool_get_array(diagnosticsPool, 'xtime', xtime) - - ! global reduction of the 5 arrays (packed into 3 to minimize global communication) - call mpas_dmpar_sum_real_array(dminfo, nSums, sums(1:nSums), reductions(1:nSums)) - sums(1:nVariables) = reductions(1:nVariables) - areaCellGlobal = reductions(nVariables+1) - areaEdgeGlobal = reductions(nVariables+2) - areaTriangleGlobal = reductions(nVariables+3) - nCellsGlobal = int(reductions(nVariables+4)) - nEdgesGlobal = int(reductions(nVariables+5)) - nVerticesGlobal = int(reductions(nVariables+6)) - call mpas_dmpar_sum_real_array(dminfo, nVariables, sumSquares(1:nVariables), reductions(1:nVariables)) - sumSquares(1:nVariables) = reductions(1:nVariables) - - call mpas_dmpar_min_real_array(dminfo, nMins, mins(1:nMins), reductions(1:nMins)) - mins(1:nVariables) = reductions(1:nVariables) - verticalSumMins(1:nVariables) = reductions(nMins-nVariables+1:nMins) - - call mpas_dmpar_max_real_array(dminfo, nMaxes, maxes(1:nMaxes), reductions(1:nMaxes)) - maxes(1:nVariables) = reductions(1:nVariables) - CFLNumberGlobal = reductions(nVariables+1) - verticalSumMaxes(1:nVariables) = reductions(nMaxes-nVariables+1:nMaxes) - - volumeCellGlobal = sums(1) - volumeEdgeGlobal = sums(4) - - - ! compute the averages (slightly different depending on how the sum was computed) - variableIndex = 0 - - ! time, in days, using a 360 day calendar - read (xtime, '(i4,10(a1,i2))') timeYYYY, timeChar, timeMM, timeChar, timeDD, timeChar, timeH, timeChar, timeM, timeChar, timeS - ! subtract 31.0 because calendar starts on 00-01-01 - time_days = timeYYYY*360.0 + timeMM*30.0 + timeDD + (timeH + (timeM + timeS/60.0)/60.0)/24.0 - 31.0 - - ! layerThickness - variableIndex = variableIndex + 1 - averages(variableIndex) = sums(variableIndex)/(areaCellGlobal*nVertLevels) - rms(variableIndex) = sqrt(sumSquares(variableIndex)/(areaCellGlobal*nVertLevels)) - - ! normalVelocity - variableIndex = variableIndex + 1 - averages(variableIndex) = sums(variableIndex)/volumeEdgeGlobal - rms(variableIndex) = sqrt(sumSquares(variableIndex)/volumeEdgeGlobal) - - ! tangentialVelocity - variableIndex = variableIndex + 1 - averages(variableIndex) = sums(variableIndex)/volumeEdgeGlobal - rms(variableIndex) = sqrt(sumSquares(variableIndex)/volumeEdgeGlobal) - - ! layerThicknessEdge - variableIndex = variableIndex + 1 - averages(variableIndex) = sums(variableIndex)/(areaEdgeGlobal*nVertLevels) - rms(variableIndex) = sqrt(sumSquares(variableIndex)/(areaEdgeGlobal*nVertLevels)) - - ! relativeVorticity - variableIndex = variableIndex + 1 - averages(variableIndex) = sums(variableIndex)/(areaTriangleGlobal*nVertLevels) - rms(variableIndex) = sqrt(sumSquares(variableIndex)/(areaTriangleGlobal*nVertLevels)) - - ! enstrophy - variableIndex = variableIndex + 1 - averages(variableIndex) = sums(variableIndex)/(areaTriangleGlobal*nVertLevels) - rms(variableIndex) = sqrt(sumSquares(variableIndex)/(areaTriangleGlobal*nVertLevels)) - - ! kineticEnergyCell - variableIndex = variableIndex + 1 - averages(variableIndex) = sums(variableIndex)/volumeCellGlobal - rms(variableIndex) = sqrt(sumSquares(variableIndex)/volumeCellGlobal) - - ! normalizedAbsoluteVorticity - variableIndex = variableIndex + 1 - averages(variableIndex) = sums(variableIndex)/volumeEdgeGlobal - rms(variableIndex) = sqrt(sumSquares(variableIndex)/volumeEdgeGlobal) - - ! pressure - variableIndex = variableIndex + 1 - averages(variableIndex) = sums(variableIndex)/volumeCellGlobal - rms(variableIndex) = sqrt(sumSquares(variableIndex)/volumeCellGlobal) - - ! montgomeryPotential - variableIndex = variableIndex + 1 - averages(variableIndex) = sums(variableIndex)/volumeCellGlobal - rms(variableIndex) = sqrt(sumSquares(variableIndex)/volumeCellGlobal) - - ! vertVelocityTop vertical velocity - variableIndex = variableIndex + 1 - averages(variableIndex) = sums(variableIndex)/volumeCellGlobal - rms(variableIndex) = sqrt(sumSquares(variableIndex)/volumeCellGlobal) - - ! vertAleTransportTop vertical velocity - variableIndex = variableIndex + 1 - averages(variableIndex) = sums(variableIndex)/volumeCellGlobal - rms(variableIndex) = sqrt(sumSquares(variableIndex)/volumeCellGlobal) - - if (associated(lowFreqDivergence) .and. associated(highFreqThickness)) then - ! lowFreqDivergence - variableIndex = variableIndex + 1 - averages(variableIndex) = sums(variableIndex)/volumeCellGlobal - rms(variableIndex) = sqrt(sumSquares(variableIndex)/volumeCellGlobal) - - ! highFreqThickness - variableIndex = variableIndex + 1 - averages(variableIndex) = sums(variableIndex)/volumeCellGlobal - rms(variableIndex) = sqrt(sumSquares(variableIndex)/volumeCellGlobal) - else - ! lowFreqDivergence - variableIndex = variableIndex + 1 - averages(variableIndex) = 0.0_RKIND - rms(variableIndex) = 0.0_RKIND - - ! highFreqThickness - variableIndex = variableIndex + 1 - averages(variableIndex) = 0.0_RKIND - rms(variableIndex) = 0.0_RKIND - end if - - ! Tracers - do iTracer = 1, num_tracers - variableIndex = variableIndex + 1 - averages(variableIndex) = sums(variableIndex)/volumeCellGlobal - rms(variableIndex) = sqrt(sumSquares(variableIndex)/volumeCellGlobal) - enddo - - call mpas_timer_stop("diagnostics mpi", diagMPITimer) - - call mpas_pool_get_subpool(domain % blocklist % structs, 'diagnostics', diagnosticsPool) - call mpas_pool_get_array(diagnosticsPool, 'xtime', xtime) - - ! write out the data to files - if (dminfo % my_proc_id == IO_NODE) then - fileID = getFreeUnit() - open(fileID,file='stats_min.txt',STATUS='UNKNOWN', POSITION='append') - write (fileID,'(100es24.14)') time_days, mins(1:nVariables) - close (fileID) - open(fileID,file='stats_max.txt',STATUS='UNKNOWN', POSITION='append') - write (fileID,'(100es24.14)') time_days, maxes(1:nVariables) - close (fileID) - open(fileID,file='stats_sum.txt',STATUS='UNKNOWN', POSITION='append') - write (fileID,'(100es24.14)') time_days, sums(1:nVariables) - close (fileID) - open(fileID,file='stats_rms.txt',STATUS='UNKNOWN', POSITION='append') - write (fileID,'(100es24.14)') time_days, rms(1:nVariables) - close (fileID) - open(fileID,file='stats_avg.txt',STATUS='UNKNOWN', POSITION='append') - write (fileID,'(100es24.14)') time_days, averages(1:nVariables) - close (fileID) - open(fileID,file='stats_time.txt',STATUS='UNKNOWN', POSITION='append') - write (fileID,'(i10,10x,a,100es24.14)') timeIndex, & - trim(xtime), dt, & - CFLNumberGlobal - close (fileID) - open(fileID,file='stats_colmin.txt',STATUS='UNKNOWN', POSITION='append') - write (fileID,'(100es24.14)') verticalSumMins(1:nVariables) - close (fileID) - open(fileID,file='stats_colmax.txt',STATUS='UNKNOWN', POSITION='append') - write (fileID,'(100es24.14)') verticalSumMaxes(1:nVariables) - close (fileID) - end if - - end subroutine ocn_compute_global_diagnostics!}}} - - integer function getFreeUnit()!{{{ - implicit none - - integer :: index - logical :: isOpened - - getFreeUnit = 0 - do index = 1,99 - if((index /= 5) .and. (index /= 6)) then - inquire(unit = index, opened = isOpened) - if( .not. isOpened) then - getFreeUnit = index - return - end if - end if - end do - end function getFreeUnit!}}} - - subroutine ocn_compute_field_local_stats(dminfo, nVertLevels, nElements, field, localSum, localMin, localMax, localVertSumMin, &!{{{ - localVertSumMax) - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(in) :: nVertLevels, nElements - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field - real (kind=RKIND), intent(out) :: localSum, localMin, localMax, localVertSumMin, & - localVertSumMax - - localSum = sum(field) - localMin = minval(field) - localMax = maxval(field) - localVertSumMin = minval(sum(field,1)) - localVertSumMax = maxval(sum(field,1)) - - end subroutine ocn_compute_field_local_stats!}}} - - subroutine ocn_compute_field_area_weighted_local_stats(dminfo, nVertLevels, nElements, areas, field, localSum, localMin, &!{{{ - localMax, localVertSumMin, localVertSumMax) - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(in) :: nVertLevels, nElements - real (kind=RKIND), dimension(nElements), intent(in) :: areas - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field - real (kind=RKIND), intent(out) :: localSum, localMin, localMax, localVertSumMin, & - localVertSumMax - - integer :: elementIndex - - localSum = 0.0 - do elementIndex = 1, nElements - localSum = localSum + areas(elementIndex) * sum(field(:,elementIndex)) - end do - - localMin = minval(field) - localMax = maxval(field) - localVertSumMin = minval(sum(field,1)) - localVertSumMax = maxval(sum(field,1)) - - end subroutine ocn_compute_field_area_weighted_local_stats!}}} - - subroutine ocn_compute_field_area_weighted_local_stats_max_level(dminfo, nVertLevels, nElements, maxLevel, areas, field, &!{{{ - localSum, localRMS, localMin, localMax, localVertSumMin, localVertSumMax) - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(in) :: nVertLevels, nElements - integer, dimension(nElements), intent(in) :: maxLevel - real (kind=RKIND), dimension(nElements), intent(in) :: areas - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field - real (kind=RKIND), intent(out) :: localSum, localRMS, localMin, localMax, localVertSumMin, & - localVertSumMax - - integer :: elementIndex - real (kind=RKIND) :: colSum, colRMS, colSumAbs - - localSum = 0.0 - localRMS = 0.0 - localMin = 1.0e34 - localMax = -1.0e34 - localVertSumMin = 1.0e34 - localVertSumMax = -1.0e34 - - do elementIndex = 1, nElements - colSum = sum(field(1:maxLevel(elementIndex),elementIndex)) - localSum = localSum + areas(elementIndex) * colSum - colRMS = sum(field(1:maxLevel(elementIndex),elementIndex)**2) - localRMS = localRMS + areas(elementIndex) * colRMS - localMin = min(localMin,minval(field(1:maxLevel(elementIndex),elementIndex))) - localMax = max(localMax,maxval(field(1:maxLevel(elementIndex),elementIndex))) - localVertSumMin = min(localVertSumMin,colSum) - localVertSumMax = max(localVertSumMax,colSum) - end do - - end subroutine ocn_compute_field_area_weighted_local_stats_max_level!}}} - - subroutine ocn_compute_field_thickness_weighted_local_stats(dminfo, nVertLevels, nElements, h, field, &!{{{ - localSum, localMin, localMax, localVertSumMin, localVertSumMax) - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(in) :: nVertLevels, nElements - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: h - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field - real (kind=RKIND), intent(out) :: localSum, localMin, localMax, localVertSumMin, & - localVertSumMax - - real (kind=RKIND), dimension(nVertLevels, nElements) :: hTimesField - - integer :: elementIndex - - localSum = sum(h*field) - localMin = minval(field) - localMax = maxval(field) - localVertSumMin = minval(sum(h*field,1)) - localVertSumMax = maxval(sum(h*field,1)) - - end subroutine ocn_compute_field_thickness_weighted_local_stats!}}} - - subroutine ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nElements, areas, layerThickness, field, &!{{{ - localSum, localMin, localMax, localVertSumMin, localVertSumMax) - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(in) :: nVertLevels, nElements - real (kind=RKIND), dimension(nElements), intent(in) :: areas - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: layerThickness - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field - real (kind=RKIND), intent(out) :: localSum, localMin, localMax, localVertSumMin, & - localVertSumMax - - real (kind=RKIND), dimension(nVertLevels, nElements) :: hTimesField - - integer :: elementIndex - - localSum = 0.0 - do elementIndex = 1, nElements - localSum = localSum + areas(elementIndex) * sum(layerThickness(:,elementIndex)*field(:,elementIndex)) - end do - - localMin = minval(field) - localMax = maxval(field) - localVertSumMin = minval(sum(layerThickness*field,1)) - localVertSumMax = maxval(sum(layerThickness*field,1)) - - end subroutine ocn_compute_field_volume_weighted_local_stats!}}} - - subroutine ocn_compute_field_volume_weighted_local_stats_max_level(dminfo, nVertLevels, nElements, maxLevel, areas, layerThickness, field, &!{{{ - localSum, localRMS, localMin, localMax, localVertSumMin, localVertSumMax) - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(in) :: nVertLevels, nElements - integer, dimension(nElements), intent(in) :: maxLevel - real (kind=RKIND), dimension(nElements), intent(in) :: areas - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: layerThickness - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field - real (kind=RKIND), intent(out) :: localSum, localRMS, localMin, localMax, localVertSumMin, & - localVertSumMax - - integer :: elementIndex - real (kind=RKIND) :: thicknessWeightedColSum, thicknessWeightedColRMS, thicknessWeightedColSumAbs - real (kind=RKIND), dimension(nVertLevels, nElements) :: hTimesField - - localSum = 0.0 - localRMS = 0.0 - localMin = 1.0e34 - localMax = -1.0e34 - localVertSumMin = 1.0e34 - localVertSumMax = -1.0e34 - - do elementIndex = 1, nElements - thicknessWeightedColSum = sum(layerThickness(1:maxLevel(elementIndex),elementIndex)*field(1:maxLevel(elementIndex),elementIndex)) - localSum = localSum + areas(elementIndex) * thicknessWeightedColSum - thicknessWeightedColRMS = sum(layerThickness(1:maxLevel(elementIndex),elementIndex)*field(1:maxLevel(elementIndex),elementIndex)**2) - localRMS = localRMS + areas(elementIndex) * thicknessWeightedColRMS - localMin = min(localMin,minval(field(1:maxLevel(elementIndex),elementIndex))) - localMax = max(localMax,maxval(field(1:maxLevel(elementIndex),elementIndex))) - localVertSumMin = min(localVertSumMin,thicknessWeightedColSum) - localVertSumMax = max(localVertSumMax,thicknessWeightedColSum) - end do - - end subroutine ocn_compute_field_volume_weighted_local_stats_max_level!}}} - - subroutine ocn_compute_global_sum(dminfo, nVertLevels, nElements, field, globalSum)!{{{ - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(in) :: nVertLevels, nElements - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field - real (kind=RKIND), intent(out) :: globalSum - - real (kind=RKIND) :: localSum - - localSum = sum(field) - call mpas_dmpar_sum_real(dminfo, localSum, globalSum) - - end subroutine ocn_compute_global_sum!}}} - - subroutine ocn_compute_area_weighted_global_sum(dminfo, nVertLevels, nElements, areas, field, globalSum)!{{{ - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(in) :: nVertLevels, nElements - real (kind=RKIND), dimension(nElements), intent(in) :: areas - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field - real (kind=RKIND), intent(out) :: globalSum - - integer :: elementIndex - real (kind=RKIND) :: localSum - - localSum = 0. - do elementIndex = 1, nElements - localSum = localSum + areas(elementIndex) * sum(field(:,elementIndex)) - end do - - call mpas_dmpar_sum_real(dminfo, localSum, globalSum) - - end subroutine ocn_compute_area_weighted_global_sum!}}} - - subroutine ocn_compute_volume_weighted_global_sum(dminfo, nVertLevels, nElements, areas, h, field, globalSum)!{{{ - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(in) :: nVertLevels, nElements - real (kind=RKIND), dimension(nElements), intent(in) :: areas - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: h - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field - real (kind=RKIND), intent(out) :: globalSum - - real (kind=RKIND), dimension(nVertLevels, nElements) :: hTimesField - - hTimesField = h*field - - call ocn_compute_area_weighted_global_sum(dminfo, nVertLevels, nElements, areas, hTimesField, globalSum) - - end subroutine ocn_compute_volume_weighted_global_sum!}}} - - subroutine ocn_compute_global_min(dminfo, nVertLevels, nElements, field, globalMin)!{{{ - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(in) :: nVertLevels, nElements - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field - real (kind=RKIND), intent(out) :: globalMin - - real (kind=RKIND) :: localMin - - localMin = minval(field) - call mpas_dmpar_min_real(dminfo, localMin, globalMin) - - end subroutine ocn_compute_global_min!}}} - - subroutine ocn_compute_global_max(dminfo, nVertLevels, nElements, field, globalMax)!{{{ - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(in) :: nVertLevels, nElements - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field - real (kind=RKIND), intent(out) :: globalMax - - real (kind=RKIND) :: localMax - - localMax = maxval(field) - call mpas_dmpar_max_real(dminfo, localMax, globalMax) - - end subroutine ocn_compute_global_max!}}} - - subroutine ocn_compute_global_vert_sum_horiz_min(dminfo, nVertLevels, nElements, field, globalMin)!{{{ - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(in) :: nVertLevels, nElements - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field - real (kind=RKIND), intent(out) :: globalMin - - real (kind=RKIND) :: localMin - - localMin = minval(sum(field,1)) - call mpas_dmpar_min_real(dminfo, localMin, globalMin) - - end subroutine ocn_compute_global_vert_sum_horiz_min!}}} - - subroutine ocn_compute_global_vert_sum_horiz_max(dminfo, nVertLevels, nElements, field, globalMax)!{{{ - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(in) :: nVertLevels, nElements - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field - real (kind=RKIND), intent(out) :: globalMax - - real (kind=RKIND) :: localMax - - localMax = maxval(sum(field,1)) - call mpas_dmpar_max_real(dminfo, localMax, globalMax) - - end subroutine ocn_compute_global_vert_sum_horiz_max!}}} - - subroutine ocn_compute_global_vert_thickness_weighted_sum_horiz_min(dminfo, nVertLevels, nElements, h, field, globalMin)!{{{ - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(in) :: nVertLevels, nElements - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: h, field - real (kind=RKIND), intent(out) :: globalMin - - real (kind=RKIND) :: localMin - - localMin = minval(sum(h*field,1)) - call mpas_dmpar_min_real(dminfo, localMin, globalMin) - - end subroutine ocn_compute_global_vert_thickness_weighted_sum_horiz_min!}}} - - subroutine ocn_compute_global_vert_thickness_weighted_sum_horiz_max(dminfo, nVertLevels, nElements, h, field, globalMax)!{{{ - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(in) :: nVertLevels, nElements - real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: h, field - real (kind=RKIND), intent(out) :: globalMax - - real (kind=RKIND) :: localMax - - localMax = maxval(sum(h*field,1)) - call mpas_dmpar_max_real(dminfo, localMax, globalMax) - - end subroutine ocn_compute_global_vert_thickness_weighted_sum_horiz_max!}}} - - subroutine ocn_global_diagnostics_init(dminfo,err)!{{{ - - ! Create stats_readme.txt file listing variable names - - implicit none - - type (dm_info), intent(in) :: dminfo - integer, intent(out) :: err - integer :: fileID, i - - err = 0 - - if (dminfo % my_proc_id == IO_NODE) then - fileID = getFreeUnit() - open(fileID,file='stats_readme.txt',STATUS='UNKNOWN', POSITION='rewind') - - write (fileID,'(a)') 'readme file for MPAS-Ocean global statistics' - write (fileID,'(/,a)') 'stats_time.txt. contains: timeIndex, timestamp, dt, CFLNumberGlobal' - write (fileID,'(/,a)') 'All other stats_*.txt. contain the following columns. Rows correspond to timestamps in rows of stats_time.txt' - write (fileID,'(a)') "See user's guide for units associated with these variables." - - i=1 - write (fileID,'(i5,a)') i,'. time, in days, using a 360 day calendar'; i=i+1 - write (fileID,'(i5,a)') i,'. layerThickness'; i=i+1 - write (fileID,'(i5,a)') i,'. normalVelocity'; i=i+1 - write (fileID,'(i5,a)') i,'. tangentialVelocity'; i=i+1 - write (fileID,'(i5,a)') i,'. layerThicknessEdge'; i=i+1 - write (fileID,'(i5,a)') i,'. relativeVorticity'; i=i+1 - write (fileID,'(i5,a)') i,'. enstrophy = relativeVorticity**2'; i=i+1 - write (fileID,'(i5,a)') i,'. kineticEnergyCell'; i=i+1 - write (fileID,'(i5,a)') i,'. normalizedAbsoluteVorticity = (relative vorticity + planetary vorticity)/layer thickness'; i=i+1 - write (fileID,'(i5,a)') i,'. pressure'; i=i+1 - write (fileID,'(i5,a)') i,'. montgomeryPotential'; i=i+1 - write (fileID,'(i5,a)') i,'. vertVelocityTop vertical velocity'; i=i+1 - write (fileID,'(i5,a)') i,'. vertAleTransportTop vertical transport'; i=i+1 - write (fileID,'(i5,a)') i,'. lowFreqDivergence'; i=i+1 - write (fileID,'(i5,a)') i,'. highFreqThickness'; i=i+1 - write (fileID,'(i5,a)') i,'. Tracers: usually T, S, then others in remaining columns' - - write (fileID,'(/,a)') 'A chain of simple unix commands may be used to access a specific part of the data. For example,' - write (fileID,'(a)') 'to view the last three values of column seven in the global average, use:' - write (fileID,'(a)') "cat stats_avg.txt | awk '{print $7}' | tail -n3" - - close (fileID) - endif - - end subroutine ocn_global_diagnostics_init!}}} - - -end module ocn_global_diagnostics diff --git a/src/core_ocean/shared/mpas_ocn_gm.F b/src/core_ocean/shared/mpas_ocn_gm.F index e229db81f6..c06c9f1ef6 100644 --- a/src/core_ocean/shared/mpas_ocn_gm.F +++ b/src/core_ocean/shared/mpas_ocn_gm.F @@ -9,7 +9,6 @@ module ocn_gm use mpas_derived_types use mpas_pool_routines - use mpas_configure use mpas_timer use mpas_constants @@ -47,7 +46,7 @@ module ocn_gm logical, pointer :: config_use_standardGM logical, pointer :: config_disable_redi_k33 - real, parameter :: epsGM = 1.0e-12 + real (kind=RKIND), parameter :: epsGM = 1.0e-12 !*********************************************************************** @@ -186,12 +185,12 @@ subroutine ocn_gm_compute_Bolus_velocity(diagnosticsPool, meshPool, scratchPool) gradZMidEdge(:,:) = huge(0D0) gradZMidTopOfEdge(:,:) = huge(0D0) - relativeSlopeTopOfEdge(:,:) = 0.0 - relativeSlopeTopOfCell(:,:) = 0.0 - relativeSlopeTapering(:,:) = 0.0 - relativeSlopeTaperingCell(:,:) = 0.0 - k33(:,:) = 0.0 - normalGMBolusVelocity(:,:) = 0.0 + relativeSlopeTopOfEdge(:,:) = 0.0_RKIND + relativeSlopeTopOfCell(:,:) = 0.0_RKIND + relativeSlopeTapering(:,:) = 0.0_RKIND + relativeSlopeTaperingCell(:,:) = 0.0_RKIND + k33(:,:) = 0.0_RKIND + normalGMBolusVelocity(:,:) = 0.0_RKIND !-------------------------------------------------------------------- ! @@ -212,8 +211,8 @@ subroutine ocn_gm_compute_Bolus_velocity(diagnosticsPool, meshPool, scratchPool) ! Approximation of dDensityDzTopOfCell on the top and bottom interfaces through the idea of having ! ghost cells above the top and below the bottom layers of the same depths and density. ! Essentially, this enforces the boundary condition (d density)/dz = 0 at the top and bottom. - dDensityDzTopOfCell(1,iCell) = 0.0 - dDensityDzTopOfCell(maxLevelCell(iCell)+1,iCell) = 0.0 + dDensityDzTopOfCell(1,iCell) = 0.0_RKIND + dDensityDzTopOfCell(maxLevelCell(iCell)+1,iCell) = 0.0_RKIND end do ! Interpolate dDensityDzTopOfCell to edge and layer interface @@ -221,7 +220,7 @@ subroutine ocn_gm_compute_Bolus_velocity(diagnosticsPool, meshPool, scratchPool) do k = 1, maxLevelEdgeTop(iEdge)+1 cell1 = cellsOnEdge(1,iEdge) cell2 = cellsOnEdge(2,iEdge) - dDensityDzTopOfEdge(k,iEdge) = 0.5 * (dDensityDzTopOfCell(k,cell1) + dDensityDzTopOfCell(k,cell2)) + dDensityDzTopOfEdge(k,iEdge) = 0.5_RKIND * (dDensityDzTopOfCell(k,cell1) + dDensityDzTopOfCell(k,cell2)) end do end do @@ -290,7 +289,7 @@ subroutine ocn_gm_compute_Bolus_velocity(diagnosticsPool, meshPool, scratchPool) ! Compute relativeSlopeTopOfEdge at edge and layer interface ! set relativeSlopeTopOfEdge to zero for horizontal land/water edges. - relativeSlopeTopOfEdge = 0.0 + relativeSlopeTopOfEdge = 0.0_RKIND do iEdge = 1, nEdges ! Beside a full land cell (e.g. missing cell) maxLevelEdgeTop=0, so relativeSlopeTopOfEdge at that edge will remain zero. @@ -309,22 +308,22 @@ subroutine ocn_gm_compute_Bolus_velocity(diagnosticsPool, meshPool, scratchPool) ! slope can be unbounded in regions of neutral stability, reset to the large, but bounded, value ! values is hardwrite to 1.0, this is equivalent to a slope of 45 degrees - where(relativeSlopeTopOfEdge < -1.0) relativeSlopeTopOfEdge = -1.0 - where(relativeSlopeTopOfEdge > 1.0) relativeSlopeTopOfEdge = 1.0 + where(relativeSlopeTopOfEdge < -1.0_RKIND) relativeSlopeTopOfEdge = -1.0_RKIND + where(relativeSlopeTopOfEdge > 1.0_RKIND) relativeSlopeTopOfEdge = 1.0_RKIND ! average relative slope to cell centers ! do this by computing (relative slope)^2, then taking sqrt - areaCellSum = 1.0e-34 + areaCellSum = 1.0e-34_RKIND do iEdge = 1, nEdges cell1 = cellsOnEdge(1,iEdge) cell2 = cellsOnEdge(2,iEdge) ! contribution of cell area from this edge: - areaEdge = 0.25 * dcEdge(iEdge) * dvEdge(iEdge) + areaEdge = 0.25_RKIND * dcEdge(iEdge) * dvEdge(iEdge) do k = 1, maxLevelEdgeTop(iEdge) ! only one component is summed (thus the weighting by a factor of 2.0) - rtmp = 2.0 * areaEdge * relativeSlopeTopOfEdge(k,iEdge)**2 + rtmp = 2.0_RKIND * areaEdge * relativeSlopeTopOfEdge(k,iEdge)**2 relativeSlopeTopOfCell(k,cell1) = relativeSlopeTopOfCell(k,cell1) + rtmp relativeSlopeTopOfCell(k,cell2) = relativeSlopeTopOfCell(k,cell2) + rtmp @@ -341,10 +340,10 @@ subroutine ocn_gm_compute_Bolus_velocity(diagnosticsPool, meshPool, scratchPool) ! Compute tapering function ! Compute k33 at cell center and layer interface - k33(:,:) = 0.0 + k33(:,:) = 0.0_RKIND do iCell=1,nCells do k = 2, maxLevelCell(iCell) - relativeSlopeTaperingCell(k,iCell) = min(1.0, config_max_relative_slope**2 / (relativeSlopeTopOfCell(k,iCell)**2+epsGM)) + relativeSlopeTaperingCell(k,iCell) = min(1.0_RKIND, config_max_relative_slope**2 / (relativeSlopeTopOfCell(k,iCell)**2+epsGM)) k33(k,iCell) = relativeSlopeTaperingCell(k,iCell) * (relativeSlopeTopOfCell(k,iCell))**2 end do end do @@ -354,7 +353,7 @@ subroutine ocn_gm_compute_Bolus_velocity(diagnosticsPool, meshPool, scratchPool) cell1 = cellsOnEdge(1,iEdge) cell2 = cellsOnEdge(2,iEdge) do k = 2, maxLevelEdgeTop(iEdge) - relativeSlopeTapering(k,iEdge) = 0.5 * (relativeSlopeTaperingCell(k,cell1) + relativeSlopeTaperingCell(k,cell2)) + relativeSlopeTapering(k,iEdge) = 0.5_RKIND * (relativeSlopeTaperingCell(k,cell1) + relativeSlopeTaperingCell(k,cell2)) enddo enddo @@ -363,7 +362,7 @@ subroutine ocn_gm_compute_Bolus_velocity(diagnosticsPool, meshPool, scratchPool) k33 = config_Redi_kappa * k33 ! allow disabling of K33 for testing - if(config_disable_redi_k33) k33=0.0 + if(config_disable_redi_k33) k33=0.0_RKIND !-------------------------------------------------------------------- ! @@ -371,7 +370,7 @@ subroutine ocn_gm_compute_Bolus_velocity(diagnosticsPool, meshPool, scratchPool) ! !-------------------------------------------------------------------- - gmStreamFuncTopOfEdge(:,:) = 0.0 + gmStreamFuncTopOfEdge(:,:) = 0.0_RKIND c = config_gravWaveSpeed_trunc**2 do iEdge = 1, nEdges @@ -382,28 +381,28 @@ subroutine ocn_gm_compute_Bolus_velocity(diagnosticsPool, meshPool, scratchPool) if (maxLevelEdgeTop(iEdge) .GE. 3) then ! First row k = 2 - BruntVaisalaFreqTopEdge = 0.5 * (BruntVaisalaFreqTop(k,cell1) + BruntVaisalaFreqTop(k,cell2)) - BruntVaisalaFreqTopEdge = max(BruntVaisalaFreqTopEdge, 0.0) - tridiagB(k-1) = - 2.*config_gravWaveSpeed_trunc**2/(layerThicknessEdge(k-1,iEdge)*layerThicknessEdge(k,iEdge)) - BruntVaisalaFreqTopEdge - tridiagC(k-1) = 2.*config_gravWaveSpeed_trunc**2/layerThicknessEdge(k,iEdge)/(layerThicknessEdge(k-1,iEdge)+layerThicknessEdge(k,iEdge)) + BruntVaisalaFreqTopEdge = 0.5_RKIND * (BruntVaisalaFreqTop(k,cell1) + BruntVaisalaFreqTop(k,cell2)) + BruntVaisalaFreqTopEdge = max(BruntVaisalaFreqTopEdge, 0.0_RKIND) + tridiagB(k-1) = - 2.0_RKIND * config_gravWaveSpeed_trunc**2/(layerThicknessEdge(k-1,iEdge)*layerThicknessEdge(k,iEdge)) - BruntVaisalaFreqTopEdge + tridiagC(k-1) = 2.0_RKIND * config_gravWaveSpeed_trunc**2/layerThicknessEdge(k,iEdge)/(layerThicknessEdge(k-1,iEdge)+layerThicknessEdge(k,iEdge)) rightHandSide(k-1) = config_standardGM_tracer_kappa * gravity / config_density0 * gradDensityConstZTopOfEdge(k,iEdge) ! Second to next to the last rows - do k = 3, maxLevelEdgeTop(iEdge)-1 - BruntVaisalaFreqTopEdge = 0.5 * (BruntVaisalaFreqTop(k,cell1) + BruntVaisalaFreqTop(k,cell2)) - BruntVaisalaFreqTopEdge = max(BruntVaisalaFreqTopEdge, 0.0) - tridiagA(k-2) = 2.*config_gravWaveSpeed_trunc**2/layerThicknessEdge(k-1,iEdge)/(layerThicknessEdge(k-1,iEdge)+layerThicknessEdge(k,iEdge)) - tridiagB(k-1) = - 2.*config_gravWaveSpeed_trunc**2/(layerThicknessEdge(k-1,iEdge)*layerThicknessEdge(k,iEdge)) - BruntVaisalaFreqTopEdge - tridiagC(k-1) = 2.*config_gravWaveSpeed_trunc**2/layerThicknessEdge(k,iEdge)/(layerThicknessEdge(k-1,iEdge)+layerThicknessEdge(k,iEdge)) + do k = 3, maxLevelEdgeTop(iEdge)-1 + BruntVaisalaFreqTopEdge = 0.5_RKIND * (BruntVaisalaFreqTop(k,cell1) + BruntVaisalaFreqTop(k,cell2)) + BruntVaisalaFreqTopEdge = max(BruntVaisalaFreqTopEdge, 0.0_RKIND) + tridiagA(k-2) = 2.0_RKIND * config_gravWaveSpeed_trunc**2/layerThicknessEdge(k-1,iEdge)/(layerThicknessEdge(k-1,iEdge)+layerThicknessEdge(k,iEdge)) + tridiagB(k-1) = - 2.0_RKIND * config_gravWaveSpeed_trunc**2/(layerThicknessEdge(k-1,iEdge)*layerThicknessEdge(k,iEdge)) - BruntVaisalaFreqTopEdge + tridiagC(k-1) = 2.0_RKIND * config_gravWaveSpeed_trunc**2/layerThicknessEdge(k,iEdge)/(layerThicknessEdge(k-1,iEdge)+layerThicknessEdge(k,iEdge)) rightHandSide(k-1) = config_standardGM_tracer_kappa * gravity / config_density0 * gradDensityConstZTopOfEdge(k,iEdge) end do ! Last row k = maxLevelEdgeTop(iEdge) - BruntVaisalaFreqTopEdge = 0.5 * (BruntVaisalaFreqTop(k,cell1) + BruntVaisalaFreqTop(k,cell2)) - BruntVaisalaFreqTopEdge = max(BruntVaisalaFreqTopEdge, 0.0) - tridiagA(k-2) = 2.*config_gravWaveSpeed_trunc**2/layerThicknessEdge(k-1,iEdge)/(layerThicknessEdge(k-1,iEdge)+layerThicknessEdge(k,iEdge)) - tridiagB(k-1) = - 2.0*config_gravWaveSpeed_trunc**2/(layerThicknessEdge(k-1,iEdge)*layerThicknessEdge(k,iEdge)) - BruntVaisalaFreqTopEdge + BruntVaisalaFreqTopEdge = 0.5_RKIND * (BruntVaisalaFreqTop(k,cell1) + BruntVaisalaFreqTop(k,cell2)) + BruntVaisalaFreqTopEdge = max(BruntVaisalaFreqTopEdge, 0.0_RKIND) + tridiagA(k-2) = 2.0_RKIND * config_gravWaveSpeed_trunc**2/layerThicknessEdge(k-1,iEdge)/(layerThicknessEdge(k-1,iEdge)+layerThicknessEdge(k,iEdge)) + tridiagB(k-1) = - 2.0_RKIND * config_gravWaveSpeed_trunc**2/(layerThicknessEdge(k-1,iEdge)*layerThicknessEdge(k,iEdge)) - BruntVaisalaFreqTopEdge rightHandSide(k-1) = config_standardGM_tracer_kappa * gravity / config_density0 * gradDensityConstZTopOfEdge(k,iEdge) ! Total number of rows @@ -423,14 +422,14 @@ subroutine ocn_gm_compute_Bolus_velocity(diagnosticsPool, meshPool, scratchPool) end do ! Interpolate gmStreamFuncTopOfEdge to cell centers for visualization - gmStreamFuncTopOfCell(:,:) = 0.0 + gmStreamFuncTopOfCell(:,:) = 0.0_RKIND do iEdge = 1, nEdges cell1 = cellsOnEdge(1,iEdge) cell2 = cellsOnEdge(2,iEdge) - areaEdge = 0.25 * dcEdge(iEdge) * dvEdge(iEdge) + areaEdge = 0.25_RKIND * dcEdge(iEdge) * dvEdge(iEdge) do k = 1, maxLevelEdgeTop(iEdge) - rtmp = 0.5 * ( gmStreamFuncTopOfEdge(k,iEdge) + gmStreamFuncTopOfEdge(k+1,iEdge) ) * areaEdge + rtmp = 0.5_RKIND * ( gmStreamFuncTopOfEdge(k,iEdge) + gmStreamFuncTopOfEdge(k+1,iEdge) ) * areaEdge gmStreamFuncTopOfCell(k,cell1) = gmStreamFuncTopOfCell(k,cell1) + rtmp gmStreamFuncTopOfCell(k,cell2) = gmStreamFuncTopOfCell(k,cell2) + rtmp end do diff --git a/src/core_ocean/shared/mpas_ocn_init.F b/src/core_ocean/shared/mpas_ocn_init_routines.F similarity index 70% rename from src/core_ocean/shared/mpas_ocn_init.F rename to src/core_ocean/shared/mpas_ocn_init_routines.F index 46e66fc483..3dfab0cfb2 100644 --- a/src/core_ocean/shared/mpas_ocn_init.F +++ b/src/core_ocean/shared/mpas_ocn_init_routines.F @@ -13,18 +13,33 @@ !> \author Mark Petersen !> \date December 2013 !> \details -!> This module contains routines to initialize variables at the +!> This module contains routines to initialize variables at the !> beginning of an MPAS-Ocean simulation, or when starting the !> ocean analysis core. ! !----------------------------------------------------------------------- -module ocn_init +module ocn_init_routines - use mpas_framework + use mpas_kind_types + use mpas_derived_types + use mpas_pool_routines + use mpas_timer use mpas_dmpar + use mpas_constants + + use mpas_rbf_interpolation + use mpas_vector_operations + use mpas_vector_reconstruction + use mpas_tracer_advection_helpers + + use ocn_time_average + use ocn_diagnostics + use ocn_gm use ocn_constants + private + !-------------------------------------------------------------------- ! ! Public parameters @@ -37,11 +52,12 @@ module ocn_init ! !-------------------------------------------------------------------- - public :: & - ocn_compute_max_level, & - ocn_compute_mesh_scaling, & - ocn_setup_sign_and_index_fields, & - ocn_init_vert_coord + public :: & + ocn_init_routines_compute_max_level, & + ocn_init_routines_compute_mesh_scaling, & + ocn_init_routines_setup_sign_and_index_fields, & + ocn_init_routines_vert_coord, & + ocn_init_routines_block !-------------------------------------------------------------------- ! @@ -55,24 +71,18 @@ module ocn_init !*********************************************************************** ! -! routine ocn_compute_max_level +! routine ocn_init_routines_compute_max_level ! !> \brief initialize max level and boundary mask variables !> \author Doug Jacobsen, Mark Petersen, Todd Ringler !> \date September 2011 -!> \details +!> \details !> This routine initializes max level and boundary mask variables ! !----------------------------------------------------------------------- -subroutine ocn_compute_max_level(domain)!{{{ +subroutine ocn_init_routines_compute_max_level(domain)!{{{ ! Initialize maxLevel and boundary mesh variables. - use mpas_derived_types - use mpas_pool_routines - use mpas_constants - - implicit none - type (domain_type), intent(inout) :: domain type (mpas_pool_type), pointer :: meshPool @@ -119,7 +129,7 @@ subroutine ocn_compute_max_level(domain)!{{{ maxLevelEdgeTop(iEdge) = & min( maxLevelCell(cellsOnEdge(1,iEdge)), & maxLevelCell(cellsOnEdge(2,iEdge)) ) - end do + end do maxLevelEdgeTop(nEdges+1) = 0 ! maxLevelEdgeBot is the maximum (deepest) of the surrounding cells @@ -127,7 +137,7 @@ subroutine ocn_compute_max_level(domain)!{{{ maxLevelEdgeBot(iEdge) = & max( maxLevelCell(cellsOnEdge(1,iEdge)), & maxLevelCell(cellsOnEdge(2,iEdge)) ) - end do + end do maxLevelEdgeBot(nEdges+1) = 0 ! maxLevelVertexBot is the maximum (deepest) of the surrounding cells @@ -138,7 +148,7 @@ subroutine ocn_compute_max_level(domain)!{{{ max( maxLevelVertexBot(iVertex), & maxLevelCell(cellsOnVertex(i,iVertex))) end do - end do + end do maxLevelVertexBot(nVertices+1) = 0 ! maxLevelVertexTop is the minimum (shallowest) of the surrounding cells @@ -149,7 +159,7 @@ subroutine ocn_compute_max_level(domain)!{{{ min( maxLevelVertexTop(iVertex), & maxLevelCell(cellsOnVertex(i,iVertex))) end do - end do + end do maxLevelVertexTop(nVertices+1) = 0 ! set boundary edge @@ -158,7 +168,7 @@ subroutine ocn_compute_max_level(domain)!{{{ do iEdge = 1, nEdges boundaryEdge(1:maxLevelEdgeTop(iEdge),iEdge)=0 edgeMask(1:maxLevelEdgeTop(iEdge),iEdge)=1 - end do + end do ! ! Find cells and vertices that have an edge on the boundary @@ -200,21 +210,21 @@ subroutine ocn_compute_max_level(domain)!{{{ ! Note: We do not update halos on maxLevel* variables. I want the ! outside edge of a halo to be zero on each processor. -end subroutine ocn_compute_max_level!}}} +end subroutine ocn_init_routines_compute_max_level!}}} !*********************************************************************** ! -! routine ocn_setup_sign_and_index_fields +! routine ocn_init_routines_setup_sign_and_index_fields ! !> \brief set up sign and index fields !> \author Doug Jacobsen, Mark Petersen, Todd Ringler !> \date September 2011 -!> \details -!> This routine initializes edgeSignOnCell, edgeSignOnVertex, and +!> \details +!> This routine initializes edgeSignOnCell, edgeSignOnVertex, and !> kiteIndexOnCell. ! !----------------------------------------------------------------------- - subroutine ocn_setup_sign_and_index_fields(meshPool)!{{{ + subroutine ocn_init_routines_setup_sign_and_index_fields(meshPool)!{{{ type (mpas_pool_type), intent(inout) :: meshPool @@ -246,7 +256,7 @@ subroutine ocn_setup_sign_and_index_fields(meshPool)!{{{ kiteIndexOnCell = 0.0_RKIND do iCell = 1, nCells - do i = 1, nEdgesOnCell(iCell) + do i = 1, nEdgesOnCell(iCell) iEdge = edgesOnCell(i, iCell) iVertex = verticesOnCell(i, iCell) @@ -278,26 +288,21 @@ subroutine ocn_setup_sign_and_index_fields(meshPool)!{{{ end do end do - end subroutine ocn_setup_sign_and_index_fields!}}} + end subroutine ocn_init_routines_setup_sign_and_index_fields!}}} !*********************************************************************** ! -! routine ocn_compute_mesh_scaling +! routine ocn_init_routines_compute_mesh_scaling ! !> \brief set up mesh scaling variables !> \author Doug Jacobsen, Mark Petersen, Todd Ringler !> \date September 2011 -!> \details +!> \details !> This routine initializes meshScaling, meshScalingDel2, and !> meshScalingDel4 ! !----------------------------------------------------------------------- - subroutine ocn_compute_mesh_scaling(meshPool, scaleHmixWithMesh, maxMeshDensity)!{{{ - - use mpas_derived_types - use mpas_pool_routines - - implicit none + subroutine ocn_init_routines_compute_mesh_scaling(meshPool, scaleHmixWithMesh, maxMeshDensity)!{{{ type (mpas_pool_type), intent(inout) :: meshPool logical, intent(in) :: scaleHmixWithMesh @@ -332,29 +337,23 @@ subroutine ocn_compute_mesh_scaling(meshPool, scaleHmixWithMesh, maxMeshDensity) end do end if - end subroutine ocn_compute_mesh_scaling!}}} + end subroutine ocn_init_routines_compute_mesh_scaling!}}} !*********************************************************************** ! -! routine ocn_init_vert_coord +! routine ocn_init_routines_vert_coord ! !> \brief initialize vertical coordinate variables !> \author Doug Jacobsen, Mark Petersen, Todd Ringler !> \date September 2011 -!> \details +!> \details !> This routine initializes vertical coordinate variables ! !----------------------------------------------------------------------- - subroutine ocn_init_vert_coord(domain)!{{{ + subroutine ocn_init_routines_vert_coord(domain)!{{{ ! Initialize zlevel-type variables and adjust initial conditions for ! partial bottom cells. - use mpas_derived_types - use mpas_pool_routines - use mpas_configure - - implicit none - type (domain_type), intent(inout) :: domain type (mpas_pool_type), pointer :: statePool @@ -372,7 +371,7 @@ subroutine ocn_init_vert_coord(domain)!{{{ real (kind=RKIND), dimension(:), pointer :: refBottomDepth, & refBottomDepthTopOfCell, vertCoordMovementWeights, bottomDepth, refZMid, refLayerThickness real (kind=RKIND), dimension(:), allocatable :: minBottomDepth, minBottomDepthMid, zMidZLevel - + real (kind=RKIND), dimension(:,:), pointer :: layerThickness, restingThickness real (kind=RKIND), dimension(:,:,:), pointer :: tracers integer, pointer :: nVertLevels, nCells, num_tracers @@ -442,7 +441,7 @@ subroutine ocn_init_vert_coord(domain)!{{{ ! bottomDepth variable and h,T,S variables for full thickness cells. ! If running with pbcs, set config_alter_ICs_for_pbc='zlevel_pbcs_on'. Then thin pbc cells ! will be changed, and h,T,S will be altered to match the pbcs. - ! If running without pbcs, set config_alter_ICs_for_pbc='zlevel_pbcs_off'. Then + ! If running without pbcs, set config_alter_ICs_for_pbc='zlevel_pbcs_off'. Then ! bottomDepth will be altered so it is full cells everywhere. ! If your input file does not include bottomDepth, the false option will ! initialize bottomDepth correctly for a non-pbc run. @@ -459,7 +458,7 @@ subroutine ocn_init_vert_coord(domain)!{{{ ! min_pbc_fraction restricts pbcs from being too small. ! A typical value is 10%, so pbcs must occupy at least 10% of the cell thickness. ! If min_pbc_fraction = 0.0, bottomDepth gives the actual depth for that cell. - ! If min_pbc_fraction = 1.0, bottomDepth reverts to discrete z-level depths, same + ! If min_pbc_fraction = 1.0, bottomDepth reverts to discrete z-level depths, same ! as partial_bottom_cells = .false. minBottomDepth(1) = (1.0-config_min_pbc_fraction)*refBottomDepth(1) @@ -487,7 +486,7 @@ subroutine ocn_init_vert_coord(domain)!{{{ k = maxLevelCell(iCell) ! Alter thickness of bottom level to account for PBC - layerThickness(k,iCell) = bottomDepth(iCell) - refBottomDepthTopOfCell(k) + layerThickness(k,iCell) = bottomDepth(iCell) - refBottomDepthTopOfCell(k) ! Linearly interpolate the initial T&S for new location of bottom cell for PBCs zMidPBC = -0.5*(bottomDepth(iCell) + refBottomDepthTopOfCell(k)) @@ -499,7 +498,7 @@ subroutine ocn_init_vert_coord(domain)!{{{ *(zMidPBC - zMidZLevel(k)) enddo - enddo + enddo deallocate(minBottomDepth,zMidZLevel) @@ -566,8 +565,172 @@ subroutine ocn_init_vert_coord(domain)!{{{ block => block % next end do - end subroutine ocn_init_vert_coord!}}} + end subroutine ocn_init_routines_vert_coord!}}} + +!*********************************************************************** +! +! routine ocn_init_routines_block +! +!> \brief Initialize blocks within MPAS-Ocean core +!> \author Doug Jacobsen, Mark Petersen, Todd Ringler +!> \date September 2011 +!> \details +!> This routine calls all block-level initializations required to begin a +!> simulation with MPAS-Ocean +! +!----------------------------------------------------------------------- + + subroutine ocn_init_routines_block(block, dt, err)!{{{ + + type (block_type), intent(inout) :: block + real (kind=RKIND), intent(in) :: dt + integer, intent(out) :: err + + type (mpas_pool_type), pointer :: meshPool, averagePool, statePool + type (mpas_pool_type), pointer :: forcingPool, diagnosticsPool, scratchPool + integer :: i, iEdge, iCell, k + integer :: err1 + + integer, dimension(:), pointer :: nAdvCellsForEdge, maxLevelCell + integer, dimension(:), pointer :: maxLevelEdgeBot, maxLevelEdgeTop + integer, dimension(:,:), pointer :: advCellsForEdge, highOrderAdvectionMask, boundaryCell + real (kind=RKIND), dimension(:), pointer :: areaCell, boundaryLayerDepth + real (kind=RKIND), dimension(:,:), pointer :: advCoefs, advCoefs3rd, normalTransportVelocity + real (kind=RKIND), dimension(:,:), pointer :: layerThickness + real (kind=RKIND), dimension(:,:), pointer :: normalVelocity, normalGMBolusVelocity, edgeTangentVectors + real (kind=RKIND), dimension(:,:), pointer :: velocityX, velocityY, velocityZ + real (kind=RKIND), dimension(:,:), pointer :: velocityZonal, velocityMeridional + real (kind=RKIND), dimension(:,:,:), pointer :: derivTwo + + real (kind=RKIND), dimension(:,:,:), pointer :: tracers + + integer, pointer :: nCells, nEdges, nVertices, nVertLevels + integer, pointer :: config_horiz_tracer_adv_order + logical, pointer :: config_hmix_scaleWithMesh, config_do_restart + logical, pointer :: config_use_standardGM + real (kind=RKIND), pointer :: config_maxMeshDensity + + call mpas_pool_get_dimension(block % dimensions, 'nCells', nCells) + call mpas_pool_get_dimension(block % dimensions, 'nEdges', nEdges) + call mpas_pool_get_dimension(block % dimensions, 'nVertices', nVertices) + call mpas_pool_get_dimension(block % dimensions, 'nVertLevels', nVertLevels) + + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + call mpas_pool_get_subpool(block % structs, 'state', statePool) + call mpas_pool_get_subpool(block % structs, 'forcing', forcingPool) + call mpas_pool_get_subpool(block % structs, 'diagnostics', diagnosticsPool) + call mpas_pool_get_subpool(block % structs, 'scratch', scratchPool) + call mpas_pool_get_subpool(block % structs, 'average', averagePool) + + call mpas_pool_get_array(meshPool, 'derivTwo', derivTwo) + call mpas_pool_get_array(meshPool, 'advCoefs', advCoefs) + call mpas_pool_get_array(meshPool, 'advCoefs3rd', advCoefs3rd) + call mpas_pool_get_array(meshPool, 'nAdvCellsForEdge', nAdvCellsForEdge) + call mpas_pool_get_array(meshPool, 'advCellsForEdge', advCellsForEdge) + call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) + call mpas_pool_get_array(meshPool, 'highOrderAdvectionMask', highOrderAdvectionMask) + call mpas_pool_get_array(meshPool, 'boundaryCell', boundaryCell) + call mpas_pool_get_array(meshPool, 'edgeTangentVectors', edgeTangentVectors) + call mpas_pool_get_array(meshPool, 'areaCell', areaCell) + call mpas_pool_get_array(meshPool, 'boundaryCell', boundaryCell) + call mpas_pool_get_array(meshPool, 'maxLevelEdgeBot', maxLevelEdgeBot) + call mpas_pool_get_array(meshPool, 'maxLevelEdgeTop', maxLevelEdgeTop) + + call mpas_pool_get_array(diagnosticsPool, 'normalTransportVelocity', normalTransportVelocity) + call mpas_pool_get_array(diagnosticsPool, 'normalGMBolusVelocity', normalGMBolusVelocity) + call mpas_pool_get_array(diagnosticsPool, 'velocityX', velocityX) + call mpas_pool_get_array(diagnosticsPool, 'velocityY', velocityY) + call mpas_pool_get_array(diagnosticsPool, 'velocityZ', velocityZ) + call mpas_pool_get_array(diagnosticsPool, 'velocityZonal', velocityZonal) + call mpas_pool_get_array(diagnosticsPool, 'velocityMeridional', velocityMeridional) + call mpas_pool_get_array(diagnosticsPool, 'boundaryLayerDepth', boundaryLayerDepth) + + call mpas_pool_get_array(statePool, 'normalVelocity', normalVelocity, 1) + call mpas_pool_get_array(statePool, 'layerThickness', layerThickness, 1) + call mpas_pool_get_array(statePool, 'tracers', tracers, 1) + + call mpas_pool_get_config(block % configs, 'config_horiz_tracer_adv_order', config_horiz_tracer_adv_order) + call mpas_pool_get_config(block % configs, 'config_hmix_scaleWithMesh', config_hmix_scaleWithMesh) + call mpas_pool_get_config(block % configs, 'config_maxMeshDensity', config_maxMeshDensity) + call mpas_pool_get_config(block % configs, 'config_use_standardGM', config_use_standardGM) + call mpas_pool_get_config(block % configs, 'config_do_restart', config_do_restart) + + call ocn_init_routines_setup_sign_and_index_fields(meshPool) + call mpas_initialize_deriv_two(meshPool, derivTwo, err) + call mpas_tracer_advection_coefficients(meshPool, & + config_horiz_tracer_adv_order, derivTwo, advCoefs, & + advCoefs3rd, nAdvCellsForEdge, advCellsForEdge, & + err1, maxLevelCell, highOrderAdvectionMask, & + boundaryCell) + err = ior(err, err1) + + call ocn_time_average_init(averagePool) + + if (.not. config_do_restart) then + do iCell=1,nCells + boundaryLayerDepth(iCell) = layerThickness(1, iCell) * 0.5 + end do + end if + + call mpas_timer_start("diagnostic solve") + call ocn_diagnostic_solve(dt, statePool, forcingPool, meshPool, diagnosticsPool, scratchPool) + call mpas_timer_stop("diagnostic solve") + + ! initialize velocities and tracers on land to be zero. + areaCell(nCells+1) = -1.0e34 + + layerThickness(:, nCells+1) = 0.0 + + do iEdge=1, nEdges + normalVelocity(maxLevelEdgeTop(iEdge)+1:maxLevelEdgeBot(iEdge), iEdge) = 0.0 + + normalVelocity(maxLevelEdgeBot(iEdge)+1:nVertLevels,iEdge) = -1.0e34 + end do + + do iCell=1,nCells + tracers(:, maxLevelCell(iCell)+1:nVertLevels,iCell) = -1.0e34 + end do + + ! ------------------------------------------------------------------ + ! Accumulating various parametrizations of the transport velocity + ! ------------------------------------------------------------------ + normalTransportVelocity(:,:) = normalVelocity(:,:) + + ! Compute normalGMBolusVelocity, relativeSlope and RediDiffVertCoef if respective flags are turned on + if (config_use_standardGM) then + call ocn_gm_compute_Bolus_velocity(diagnosticsPool, meshPool, scratchPool) + end if + + if (config_use_standardGM) then + normalTransportVelocity(:,:) = normalTransportVelocity(:,:) + normalGMBolusVelocity(:,:) + end if + ! ------------------------------------------------------------------ + ! End: Accumulating various parametrizations of the transport velocity + ! ------------------------------------------------------------------ + + call ocn_init_routines_compute_mesh_scaling(meshPool, config_hmix_scaleWithMesh, config_maxMeshDensity) + + call mpas_rbf_interp_initialize(meshPool) + call mpas_initialize_tangent_vectors(meshPool, edgeTangentVectors) + + call mpas_init_reconstruct(meshPool) + + call mpas_reconstruct(meshPool, normalVelocity, & + velocityX, & + velocityY, & + velocityZ, & + velocityZonal, & + velocityMeridional & + ) + + if (config_use_standardGM) then + call ocn_reconstruct_gm_vectors(diagnosticsPool, meshPool) + end if + + call mpas_pool_initialize_time_levels(statePool) + + end subroutine ocn_init_routines_block!}}} -end module ocn_init +end module ocn_init_routines ! vim: foldmethod=marker diff --git a/src/core_ocean/shared/mpas_ocn_tendency.F b/src/core_ocean/shared/mpas_ocn_tendency.F index 0ab585aaf9..1d0168a50e 100644 --- a/src/core_ocean/shared/mpas_ocn_tendency.F +++ b/src/core_ocean/shared/mpas_ocn_tendency.F @@ -50,10 +50,6 @@ module ocn_tendency private save - type (timer_node), pointer :: thickHadvTimer, thickVadvTimer - type (timer_node), pointer :: velCorTimer, velVadvTimer, velPgradTimer, velHmixTimer, velForceTimer - type (timer_node), pointer :: tracerHadvTimer, tracerVadvTimer, tracerHmixTimer, tracerRestoringTimer - !-------------------------------------------------------------------- ! ! Public parameters @@ -104,9 +100,9 @@ subroutine ocn_tend_thick(tendPool, forcingPool, diagnosticsPool, meshPool)!{{{ type (mpas_pool_type), intent(in) :: diagnosticsPool !< Input: Diagnostics information type (mpas_pool_type), intent(in) :: meshPool !< Input: Mesh information - real (kind=RKIND), dimension(:), pointer :: surfaceMassFlux + real (kind=RKIND), dimension(:), pointer :: surfaceThicknessFlux real (kind=RKIND), dimension(:,:), pointer :: layerThickness, layerThicknessEdge, & - vertAleTransportTop, tend_layerThickness, normalTransportVelocity, transmissionCoefficients + vertAleTransportTop, tend_layerThickness, normalTransportVelocity, fractionAbsorbed integer :: err @@ -122,8 +118,8 @@ subroutine ocn_tend_thick(tendPool, forcingPool, diagnosticsPool, meshPool)!{{{ call mpas_pool_get_array(tendPool, 'layerThickness', tend_layerThickness) - call mpas_pool_get_array(forcingPool, 'surfaceMassFlux', surfaceMassFlux) - call mpas_pool_get_array(forcingPool, 'transmissionCoefficients', transmissionCoefficients) + call mpas_pool_get_array(forcingPool, 'surfaceThicknessFlux', surfaceThicknessFlux) + call mpas_pool_get_array(forcingPool, 'fractionAbsorbed', fractionAbsorbed) ! ! height tendency: start accumulating tendency terms @@ -140,23 +136,23 @@ subroutine ocn_tend_thick(tendPool, forcingPool, diagnosticsPool, meshPool)!{{{ ! ! QC Comment (3/15/12): need to make sure that uTranport is the right ! transport velocity here. - call mpas_timer_start("hadv", .false., thickHadvTimer) + call mpas_timer_start("hadv") call ocn_thick_hadv_tend(meshPool, normalTransportVelocity, layerThicknessEdge, tend_layerThickness, err) - call mpas_timer_stop("hadv", thickHadvTimer) + call mpas_timer_stop("hadv") ! ! height tendency: vertical advection term -d/dz(hw) ! - call mpas_timer_start("vadv", .false., thickVadvTimer) + call mpas_timer_start("vadv") call ocn_thick_vadv_tend(meshPool, vertAleTransportTop, tend_layerThickness, err) - call mpas_timer_stop("vadv", thickVadvTimer) + call mpas_timer_stop("vadv") ! ! surface flux tendency ! - call mpas_timer_start("surface flux", .false.) + call mpas_timer_start("surface flux") - call ocn_thick_surface_flux_tend(meshPool, transmissionCoefficients, layerThickness, surfaceMassFlux, tend_layerThickness, err) + call ocn_thick_surface_flux_tend(meshPool, fractionAbsorbed, layerThickness, surfaceThicknessFlux, tend_layerThickness, err) call mpas_timer_stop("surface flux") call mpas_timer_stop("ocn_tend_thick") @@ -250,22 +246,22 @@ subroutine ocn_tend_vel(tendPool, statePool, forcingPool, diagnosticsPool, meshP ! velocity tendency: nonlinear Coriolis term and grad of kinetic energy ! - call mpas_timer_start("coriolis", .false., velCorTimer) + call mpas_timer_start("coriolis") call ocn_vel_coriolis_tend(meshPool, normalizedRelativeVorticityEdge, normalizedPlanetaryVorticityEdge, layerThicknessEdge, & normalVelocity, kineticEnergyCell, tend_normalVelocity, err) - call mpas_timer_stop("coriolis", velCorTimer) + call mpas_timer_stop("coriolis") ! ! velocity tendency: vertical advection term -w du/dz ! - call mpas_timer_start("vadv", .false., velVadvTimer) + call mpas_timer_start("vadv") call ocn_vel_vadv_tend(meshPool, normalVelocity, layerThicknessEdge, vertAleTransportTop, tend_normalVelocity, err) - call mpas_timer_stop("vadv", velVadvTimer) + call mpas_timer_stop("vadv") ! ! velocity tendency: pressure gradient ! - call mpas_timer_start("pressure grad", .false., velPgradTimer) + call mpas_timer_start("pressure grad") if (config_pressure_gradient_type.eq.'Jacobian_from_TS') then ! only pass EOS derivatives if needed. call mpas_pool_get_array(diagnosticsPool, 'inSituThermalExpansionCoeff',inSituThermalExpansionCoeff) @@ -278,25 +274,25 @@ subroutine ocn_tend_vel(tendPool, statePool, forcingPool, diagnosticsPool, meshP indexTemperature, indexSalinity, tracers, tend_normalVelocity, err, & inSituThermalExpansionCoeff,inSituSalineContractionCoeff) endif - call mpas_timer_stop("pressure grad", velPgradTimer) + call mpas_timer_stop("pressure grad") ! ! velocity tendency: del2 dissipation, \nu_2 \nabla^2 u ! computed as \nu( \nabla divergence + k \times \nabla relativeVorticity ) ! strictly only valid for config_mom_del2 == constant ! - call mpas_timer_start("hmix", .false., velHmixTimer) + call mpas_timer_start("hmix") call ocn_vel_hmix_tend(meshPool, divergence, relativeVorticity, normalVelocity, tangentialVelocity, viscosity, & tend_normalVelocity, scratchPool, err) - call mpas_timer_stop("hmix", velHmixTimer) + call mpas_timer_stop("hmix") ! ! velocity tendency: forcing and bottom drag ! - call mpas_timer_start("forcings", .false., velForceTimer) + call mpas_timer_start("forcings") call ocn_vel_forcing_tend(meshPool, normalVelocity, surfaceWindStress, layerThicknessEdge, tend_normalVelocity, err) - call mpas_timer_stop("forcings", velForceTimer) + call mpas_timer_stop("forcings") ! ! velocity tendency: vertical mixing d/dz( nu_v du/dz)) @@ -331,7 +327,7 @@ subroutine ocn_tend_tracer(tendPool, statePool, forcingPool, diagnosticsPool, me real (kind=RKIND), dimension(:), pointer :: penetrativeTemperatureFlux real (kind=RKIND), dimension(:,:), pointer :: & normalTransportVelocity, layerThickness,vertAleTransportTop, layerThicknessEdge, vertDiffTopOfCell, & - tend_layerThickness, normalThicknessFlux, surfaceTracerFlux, transmissionCoefficients, zMid, relativeSlopeTopOfEdge, & + tend_layerThickness, normalThicknessFlux, surfaceTracerFlux, fractionAbsorbed, zMid, relativeSlopeTopOfEdge, & relativeSlopeTapering, relativeSlopeTaperingCell real (kind=RKIND), dimension(:,:,:), pointer :: & tracers, tend_tr, vertNonLocalFlux @@ -368,7 +364,7 @@ subroutine ocn_tend_tracer(tendPool, statePool, forcingPool, diagnosticsPool, me call mpas_pool_get_array(forcingPool, 'penetrativeTemperatureFlux', penetrativeTemperatureFlux) call mpas_pool_get_array(forcingPool, 'surfaceTracerFlux', surfaceTracerFlux) - call mpas_pool_get_array(forcingPool, 'transmissionCoefficients', transmissionCoefficients) + call mpas_pool_get_array(forcingPool, 'fractionAbsorbed', fractionAbsorbed) call mpas_pool_get_array(tendPool, 'tracers', tend_tr) call mpas_pool_get_array(tendPool, 'layerThickness', tend_layerThickness) @@ -399,29 +395,29 @@ subroutine ocn_tend_tracer(tendPool, statePool, forcingPool, diagnosticsPool, me ! ! Monotonoic Advection, or standard advection - call mpas_timer_start("adv", .false., tracerHadvTimer) + call mpas_timer_start("adv") call ocn_tracer_advection_tend(tracers, normalThicknessFlux, vertAleTransportTop, layerThickness, layerThickness, dt, meshPool, tend_layerThickness, tend_tr) - call mpas_timer_stop("adv", tracerHadvTimer) + call mpas_timer_stop("adv") ! ! tracer tendency: del2 horizontal tracer diffusion, div(h \kappa_2 \nabla \phi) ! - call mpas_timer_start("hmix", .false., tracerHmixTimer) + call mpas_timer_start("hmix") call ocn_tracer_hmix_tend(meshPool, scratchPool, layerThickness, layerThicknessEdge, zMid, tracers, & relativeSlopeTopOfEdge, relativeSlopeTapering, relativeSlopeTaperingCell, tend_tr, err) - call mpas_timer_stop("hmix", tracerHmixTimer) + call mpas_timer_stop("hmix") ! ! Perform forcing from surface fluxes ! - call mpas_timer_start("surface_flux", .false.) - call ocn_tracer_surface_flux_tend(meshPool, transmissionCoefficients, layerThickness, surfaceTracerFlux, tend_tr, err) + call mpas_timer_start("surface_flux") + call ocn_tracer_surface_flux_tend(meshPool, fractionAbsorbed, layerThickness, surfaceTracerFlux, tend_tr, err) call mpas_timer_stop("surface_flux") ! ! Performing shortwave absorption ! - call mpas_timer_start("short wave", .false.) + call mpas_timer_start("short wave") call ocn_tracer_short_wave_absorption_tend(meshPool, indexTemperature, layerThickness, penetrativeTemperatureFlux, tend_tr, err) call mpas_timer_stop("short wave") @@ -429,7 +425,7 @@ subroutine ocn_tend_tracer(tendPool, statePool, forcingPool, diagnosticsPool, me ! Compute tracer tendency due to non-local flux computed in KPP ! if (config_use_cvmix_kpp) then - call mpas_timer_start("non-local flux from KPP", .false.) + call mpas_timer_start("non-local flux from KPP") call ocn_tracer_nonlocalflux_tend(meshPool, vertNonLocalFlux, surfaceTracerFlux, tend_tr, err) call mpas_timer_stop("non-local flux from KPP") endif @@ -558,9 +554,9 @@ subroutine ocn_tend_freq_filtered_thickness(tendPool, statePool, diagnosticsPool ! ! high frequency thickness tendency: del2 horizontal hhf diffusion, div(\kappa_{hf} \nabla h^{hf}) ! - call mpas_timer_start("hmix", .false., tracerHmixTimer) + call mpas_timer_start("hmix") call ocn_high_freq_thickness_hmix_del2_tend(meshPool, highFreqThickness, tend_highFreqThickness, err) - call mpas_timer_stop("hmix", tracerHmixTimer) + call mpas_timer_stop("hmix") call mpas_timer_stop("ocn_tend_freq_filtered_thickness") diff --git a/src/core_ocean/shared/mpas_ocn_test.F b/src/core_ocean/shared/mpas_ocn_test.F index dcf4e71712..0a1aae428e 100644 --- a/src/core_ocean/shared/mpas_ocn_test.F +++ b/src/core_ocean/shared/mpas_ocn_test.F @@ -57,8 +57,6 @@ module ocn_test !-------------------------------------------------------------------- logical :: hmixOn - type (timer_node), pointer :: del2Timer, del2TensorTimer, leithTimer, del4Timer, del4TensorTimer - !*********************************************************************** diff --git a/src/core_ocean/shared/mpas_ocn_thick_surface_flux.F b/src/core_ocean/shared/mpas_ocn_thick_surface_flux.F index fc747e24bf..8593715cd2 100644 --- a/src/core_ocean/shared/mpas_ocn_thick_surface_flux.F +++ b/src/core_ocean/shared/mpas_ocn_thick_surface_flux.F @@ -51,9 +51,7 @@ module ocn_thick_surface_flux ! !-------------------------------------------------------------------- - logical :: surfaceMassFluxOn - real (kind=RKIND) :: refDensity - + logical :: surfaceThicknessFluxOn !*********************************************************************** @@ -72,7 +70,7 @@ module ocn_thick_surface_flux ! !----------------------------------------------------------------------- - subroutine ocn_thick_surface_flux_tend(meshPool, transmissionCoefficients, layerThickness, surfaceMassFlux, tend, err)!{{{ + subroutine ocn_thick_surface_flux_tend(meshPool, transmissionCoefficients, layerThickness, surfaceThicknessFlux, tend, err)!{{{ !----------------------------------------------------------------- ! ! input variables @@ -89,7 +87,7 @@ subroutine ocn_thick_surface_flux_tend(meshPool, transmissionCoefficients, layer layerThickness !< Input: Layer thickness real (kind=RKIND), dimension(:), intent(in) :: & - surfaceMassFlux !< Input: surface flux of mass + surfaceThicknessFlux !< Input: surface flux of thickness !----------------------------------------------------------------- @@ -124,7 +122,7 @@ subroutine ocn_thick_surface_flux_tend(meshPool, transmissionCoefficients, layer err = 0 - if (.not. surfaceMassFluxOn) return + if (.not. surfaceThicknessFluxOn) return call mpas_pool_get_array(meshPool, 'maxLevelCell', maxLevelCell) call mpas_pool_get_array(meshPool, 'cellMask', cellMask) @@ -136,11 +134,11 @@ subroutine ocn_thick_surface_flux_tend(meshPool, transmissionCoefficients, layer do k = 1, maxLevelCell(iCell) remainingFlux = remainingFlux - transmissionCoefficients(k, iCell) - tend(k, iCell) = tend(k, iCell) + cellMask(k, iCell) * (surfaceMassFlux(iCell) / refDensity) * transmissionCoefficients(k, iCell) + tend(k, iCell) = tend(k, iCell) + cellMask(k, iCell) * surfaceThicknessFlux(iCell) * transmissionCoefficients(k, iCell) end do if(maxLevelCell(iCell) > 0 .and. remainingFlux > 0.0_RKIND) then - tend(maxLevelCell(iCell), iCell) = tend(maxLevelCell(iCell), iCell) + cellMask(maxLevelCell(iCell), iCell) * remainingFlux * surfaceMassFlux(iCell) / refDensity + tend(maxLevelCell(iCell), iCell) = tend(maxLevelCell(iCell), iCell) + cellMask(maxLevelCell(iCell), iCell) * remainingFlux * surfaceThicknessFlux(iCell) end if end do @@ -175,24 +173,20 @@ subroutine ocn_thick_surface_flux_init(err)!{{{ logical, pointer :: config_disable_thick_sflux character (len=StrKIND), pointer :: config_forcing_type - real (kind=RKIND), pointer :: config_density0 err = 0 call mpas_pool_get_config(ocnConfigs, 'config_disable_thick_sflux', config_disable_thick_sflux) call mpas_pool_get_config(ocnConfigs, 'config_forcing_type', config_forcing_type) - call mpas_pool_get_config(ocnConfigs, 'config_density0', config_density0) - - refDensity = config_density0 - surfaceMassFluxOn = .true. + surfaceThicknessFluxOn = .true. if (config_disable_thick_sflux) then - surfaceMassFluxOn = .false. + surfaceThicknessFluxOn = .false. end if if (config_forcing_type == trim('off')) then - surfaceMassFluxOn = .false. + surfaceThicknessFluxOn = .false. end if diff --git a/src/core_ocean/shared/mpas_ocn_tracer_hmix.F b/src/core_ocean/shared/mpas_ocn_tracer_hmix.F index eac606e100..a7cfbd8b88 100644 --- a/src/core_ocean/shared/mpas_ocn_tracer_hmix.F +++ b/src/core_ocean/shared/mpas_ocn_tracer_hmix.F @@ -55,8 +55,6 @@ module ocn_tracer_hmix !-------------------------------------------------------------------- logical :: tracerHmixOn - type (timer_node), pointer :: del2Timer, del4Timer - !*********************************************************************** @@ -139,13 +137,13 @@ subroutine ocn_tracer_hmix_tend(meshPool, scratchPool, layerThickness, layerThic if(.not.tracerHmixOn) return - call mpas_timer_start("del2", .false., del2Timer) + call mpas_timer_start("del2") call ocn_tracer_hmix_del2_tend(meshPool, scratchPool, layerThickness, layerThicknessEdge, zMid, tracers, & relativeSlopeTopOfEdge, relativeSlopeTapering, relativeSlopeTaperingCell, tend, err1) - call mpas_timer_stop("del2", del2Timer) - call mpas_timer_start("del4", .false., del4Timer) + call mpas_timer_stop("del2") + call mpas_timer_start("del4") call ocn_tracer_hmix_del4_tend(meshPool, layerThicknessEdge, tracers, tend, err2) - call mpas_timer_stop("del4", del4Timer) + call mpas_timer_stop("del4") err = ior(err1, err2) diff --git a/src/core_ocean/shared/mpas_ocn_tracer_nonlocalflux.F b/src/core_ocean/shared/mpas_ocn_tracer_nonlocalflux.F index fa8ee97159..e44d7512f6 100644 --- a/src/core_ocean/shared/mpas_ocn_tracer_nonlocalflux.F +++ b/src/core_ocean/shared/mpas_ocn_tracer_nonlocalflux.F @@ -23,7 +23,6 @@ module ocn_tracer_nonlocalflux use mpas_derived_types use mpas_pool_routines - use mpas_configure use ocn_constants implicit none diff --git a/src/core_ocean/shared/mpas_ocn_tracer_short_wave_absorption_jerlov.F b/src/core_ocean/shared/mpas_ocn_tracer_short_wave_absorption_jerlov.F index 4a40616437..4496f78126 100644 --- a/src/core_ocean/shared/mpas_ocn_tracer_short_wave_absorption_jerlov.F +++ b/src/core_ocean/shared/mpas_ocn_tracer_short_wave_absorption_jerlov.F @@ -148,7 +148,7 @@ subroutine ocn_tracer_short_wave_absorption_jerlov_tend(meshPool, index_temperat depth = depth + refBottomDepth(k) call ocn_get_jerlov_fraction(depth, weights(k+1)) - tend(index_temperature, k, iCell) = tend(index_temperature, k, iCell) + penetrativeTemperatureFlux(iCell)*(weights(k) - weights(k+1)) / layerThickness(k, iCell) + tend(index_temperature, k, iCell) = tend(index_temperature, k, iCell) + penetrativeTemperatureFlux(iCell)*(weights(k) - weights(k+1)) end do end do else @@ -158,7 +158,7 @@ subroutine ocn_tracer_short_wave_absorption_jerlov_tend(meshPool, index_temperat depth = depth + layerThickness(k, iCell) call ocn_get_jerlov_fraction(depth, weights(k+1)) - tend(index_temperature, k, iCell) = tend(index_temperature, k, iCell) + penetrativeTemperatureFlux(iCell)*(weights(k) - weights(k+1)) / layerThickness(k, iCell) + tend(index_temperature, k, iCell) = tend(index_temperature, k, iCell) + penetrativeTemperatureFlux(iCell)*(weights(k) - weights(k+1)) end do end do end if diff --git a/src/core_ocean/shared/mpas_ocn_tracer_surface_flux.F b/src/core_ocean/shared/mpas_ocn_tracer_surface_flux.F index 23bd548859..f80dc624fd 100644 --- a/src/core_ocean/shared/mpas_ocn_tracer_surface_flux.F +++ b/src/core_ocean/shared/mpas_ocn_tracer_surface_flux.F @@ -69,7 +69,7 @@ module ocn_tracer_surface_flux ! !----------------------------------------------------------------------- - subroutine ocn_tracer_surface_flux_tend(meshPool, transmissionCoefficients, layerThickness, surfaceTracerFlux, tend, err)!{{{ + subroutine ocn_tracer_surface_flux_tend(meshPool, fractionAbsorbed, layerThickness, surfaceTracerFlux, tend, err)!{{{ !----------------------------------------------------------------- ! ! input variables @@ -86,7 +86,7 @@ subroutine ocn_tracer_surface_flux_tend(meshPool, transmissionCoefficients, laye surfaceTracerFlux !< Input: surface tracer fluxes real (kind=RKIND), dimension(:,:), intent(in) :: & - transmissionCoefficients !< Input: Coefficients for the application of surface fluxes + fractionAbsorbed !< Input: Coefficients for the application of surface fluxes !----------------------------------------------------------------- ! @@ -132,10 +132,10 @@ subroutine ocn_tracer_surface_flux_tend(meshPool, transmissionCoefficients, laye do iCell = 1, nCells remainingFlux = 1.0_RKIND do k = 1, maxLevelCell(iCell) - remainingFlux = remainingFlux - transmissionCoefficients(k, iCell) + remainingFlux = remainingFlux - fractionAbsorbed(k, iCell) do iTracer = 1, nTracers - tend(iTracer, k, iCell) = tend(iTracer, k, iCell) + cellMask(k, icell) * surfaceTracerFlux(iTracer, iCell) * transmissionCoefficients(k, iCell) + tend(iTracer, k, iCell) = tend(iTracer, k, iCell) + cellMask(k, icell) * surfaceTracerFlux(iTracer, iCell) * fractionAbsorbed(k, iCell) end do end do diff --git a/src/core_ocean/shared/mpas_ocn_vel_hmix.F b/src/core_ocean/shared/mpas_ocn_vel_hmix.F index 5f85a1a78d..86d2e6e258 100644 --- a/src/core_ocean/shared/mpas_ocn_vel_hmix.F +++ b/src/core_ocean/shared/mpas_ocn_vel_hmix.F @@ -56,8 +56,6 @@ module ocn_vel_hmix !-------------------------------------------------------------------- logical :: hmixOn - type (timer_node), pointer :: del2Timer, del2TensorTimer, leithTimer, del4Timer, del4TensorTimer - !*********************************************************************** @@ -148,29 +146,29 @@ subroutine ocn_vel_hmix_tend(meshPool, divergence, relativeVorticity, normalVelo viscosity = 0.0 err = 0 - call mpas_timer_start("del2", .false., del2Timer) + call mpas_timer_start("del2") call ocn_vel_hmix_del2_tend(meshPool, divergence, relativeVorticity, viscosity, tend, err1) - call mpas_timer_stop("del2", del2Timer) + call mpas_timer_stop("del2") err = ior(err1, err) - call mpas_timer_start("del2_tensor", .false., del2TensorTimer) + call mpas_timer_start("del2_tensor") call ocn_vel_hmix_del2_tensor_tend(meshPool, normalVelocity, tangentialVelocity, viscosity, scratchPool, tend, err1) - call mpas_timer_stop("del2_tensor", del2TensorTimer) + call mpas_timer_stop("del2_tensor") err = ior(err1, err) - call mpas_timer_start("leith", .false., leithTimer) + call mpas_timer_start("leith") call ocn_vel_hmix_leith_tend(meshPool, divergence, relativeVorticity, viscosity, tend, err1) - call mpas_timer_stop("leith", leithTimer) + call mpas_timer_stop("leith") err = ior(err1, err) - call mpas_timer_start("del4", .false., del4Timer) + call mpas_timer_start("del4") call ocn_vel_hmix_del4_tend(meshPool, divergence, relativeVorticity, tend, err1) - call mpas_timer_stop("del4", del4Timer) + call mpas_timer_stop("del4") err = ior(err1, err) - call mpas_timer_start("del4_tensor", .false., del4TensorTimer) + call mpas_timer_start("del4_tensor") call ocn_vel_hmix_del4_tensor_tend(meshPool, normalVelocity, tangentialVelocity, viscosity, scratchPool, tend, err1) - call mpas_timer_stop("del4_tensor", del4TensorTimer) + call mpas_timer_stop("del4_tensor") err = ior(err1, err) !-------------------------------------------------------------------- diff --git a/src/core_ocean/shared/mpas_ocn_vmix.F b/src/core_ocean/shared/mpas_ocn_vmix.F index b4ff3bce5f..710ed6fc1e 100644 --- a/src/core_ocean/shared/mpas_ocn_vmix.F +++ b/src/core_ocean/shared/mpas_ocn_vmix.F @@ -547,7 +547,6 @@ subroutine ocn_vmix_init(domain, err)!{{{ call ocn_vmix_coefs_redi_init(err_tmp) err = ior(err, err_tmp) - write(6,*) 'ocn_vmix_init complete' !-------------------------------------------------------------------- end subroutine ocn_vmix_init!}}} diff --git a/src/core_ocean/shared/mpas_ocn_vmix_coefs_redi.F b/src/core_ocean/shared/mpas_ocn_vmix_coefs_redi.F index f208f8be8e..5b1495c8a4 100644 --- a/src/core_ocean/shared/mpas_ocn_vmix_coefs_redi.F +++ b/src/core_ocean/shared/mpas_ocn_vmix_coefs_redi.F @@ -17,7 +17,6 @@ module ocn_vmix_coefs_redi use mpas_derived_types use mpas_pool_routines - use mpas_configure use mpas_timer use ocn_constants diff --git a/src/core_ocean/shared/mpas_ocn_vmix_coefs_rich.F b/src/core_ocean/shared/mpas_ocn_vmix_coefs_rich.F index 524cc0c7b7..1185fcd305 100644 --- a/src/core_ocean/shared/mpas_ocn_vmix_coefs_rich.F +++ b/src/core_ocean/shared/mpas_ocn_vmix_coefs_rich.F @@ -33,8 +33,6 @@ module ocn_vmix_coefs_rich private save - type (timer_node), pointer :: richEOSTimer - !-------------------------------------------------------------------- ! ! Public parameters @@ -154,7 +152,7 @@ subroutine ocn_vmix_coefs_rich_build(meshPool, statePool, diagnosticsPool, err, call mpas_pool_get_array(statePool, 'layerThickness', layerThickness, timeLevel) call mpas_pool_get_array(statePool, 'tracers', tracers, timeLevel) - call mpas_timer_start("eos rich", .false., richEOSTimer) + call mpas_timer_start("eos rich") ! compute in-place density call ocn_equation_of_state_density(statePool, diagnosticsPool, meshPool, 0, 'relative', density, err, timeLevelIn=timeLevel) @@ -163,7 +161,7 @@ subroutine ocn_vmix_coefs_rich_build(meshPool, statePool, diagnosticsPool, err, ! That is, layer k has been displaced to the depth of layer k+1. call ocn_equation_of_state_density(statePool, diagnosticsPool, meshPool, 1, 'relative', displacedDensity, err, timeLevelIn=timeLevel) - call mpas_timer_stop("eos rich", richEOSTimer) + call mpas_timer_stop("eos rich") call ocn_vmix_get_rich_numbers(meshPool, indexT, indexS, normalVelocity, layerThickness, layerThicknessEdge, & density, displacedDensity, tracers, RiTopOfEdge, RiTopOfCell, err1) diff --git a/src/core_ocean/shared/mpas_ocn_vmix_cvmix.F b/src/core_ocean/shared/mpas_ocn_vmix_cvmix.F index 00a1b89573..caf54b5e12 100644 --- a/src/core_ocean/shared/mpas_ocn_vmix_cvmix.F +++ b/src/core_ocean/shared/mpas_ocn_vmix_cvmix.F @@ -131,20 +131,23 @@ subroutine ocn_vmix_coefs_cvmix_build(meshPool, statePool, diagnosticsPool, err, vertViscTopOfCell, vertDiffTopOfCell, layerThickness, & zMid, zTop, density, displacedDensity, potentialDensity, & bulkRichardsonNumber, RiTopOfCell, BruntVaisalaFreqTop, & - bulkRichardsonNumberBuoy, bulkRichardsonNumberShear + bulkRichardsonNumberBuoy, bulkRichardsonNumberShear, unresolvedShear real (kind=RKIND), dimension(:,:,:), pointer :: vertNonLocalFlux integer, pointer :: index_vertNonLocalFluxTemp - integer, pointer :: config_cvmix_kpp_niterate logical, pointer :: config_use_cvmix_shear, config_use_cvmix_convection, config_use_cvmix_kpp + logical, pointer :: config_use_cvmix_fixed_boundary_layer + real (kind=RKIND), pointer :: config_cvmix_kpp_stop_OBL_search, config_cvmix_kpp_criticalBulkRichardsonNumber + real (kind=RKIND), pointer :: config_cvmix_kpp_boundary_layer_depth character (len=StrKIND), pointer :: config_cvmix_shear_mixing_scheme, config_cvmix_kpp_matching - integer :: k, iCell, jCell, iNeighbor, iter, timeLevel + integer :: k, iCell, jCell, iNeighbor, iter, timeLevel, kIndexOBL integer, pointer :: nVertLevels, nCells - real (kind=RKIND) :: r, layerSum + real (kind=RKIND) :: r, layerSum, bulkRichardsonNumberStop real (kind=RKIND), dimension(:), allocatable :: sigma, Nsqr_iface, turbulentScalarVelocityScale, tmp real (kind=RKIND), dimension(:), allocatable, target :: RiSmoothed, BVFSmoothed + logical :: bulkRichardsonFlag real (kind=RKIND), pointer :: config_cvmix_background_viscosity, config_cvmix_background_diffusion @@ -167,7 +170,6 @@ subroutine ocn_vmix_coefs_cvmix_build(meshPool, statePool, diagnosticsPool, err, timeLevel = 1 end if - ! write(stdoutUnit,*) 'TDR: ocn_vmix_coefs_cvmix_build enter' ! ! only build up viscosity/diffusivity if CVMix is turned on ! @@ -178,12 +180,15 @@ subroutine ocn_vmix_coefs_cvmix_build(meshPool, statePool, diagnosticsPool, err, ! call mpas_pool_get_config(ocnConfigs, 'config_cvmix_background_viscosity', config_cvmix_background_viscosity) call mpas_pool_get_config(ocnConfigs, 'config_cvmix_background_diffusion', config_cvmix_background_diffusion) - call mpas_pool_get_config(ocnConfigs, 'config_cvmix_kpp_niterate', config_cvmix_kpp_niterate) + call mpas_pool_get_config(ocnConfigs, 'config_cvmix_kpp_stop_OBL_search', config_cvmix_kpp_stop_OBL_search) + call mpas_pool_get_config(ocnConfigs, 'config_cvmix_kpp_criticalBulkRichardsonNumber', config_cvmix_kpp_criticalBulkRichardsonNumber) call mpas_pool_get_config(ocnConfigs, 'config_use_cvmix_shear', config_use_cvmix_shear) call mpas_pool_get_config(ocnConfigs, 'config_use_cvmix_convection', config_use_cvmix_convection) call mpas_pool_get_config(ocnConfigs, 'config_use_cvmix_kpp', config_use_cvmix_kpp) call mpas_pool_get_config(ocnConfigs, 'config_cvmix_shear_mixing_scheme', config_cvmix_shear_mixing_scheme) call mpas_pool_get_config(ocnConfigs, 'config_cvmix_kpp_matching', config_cvmix_kpp_matching) + call mpas_pool_get_config(ocnConfigs, 'config_use_cvmix_fixed_boundary_layer', config_use_cvmix_fixed_boundary_layer) + call mpas_pool_get_config(ocnConfigs, 'config_cvmix_kpp_boundary_layer_depth', config_cvmix_kpp_boundary_layer_depth) call mpas_pool_get_dimension(meshPool, 'nVertLevels', nVertLevels) call mpas_pool_get_dimension(meshPool, 'nCells', nCells) @@ -214,6 +219,7 @@ subroutine ocn_vmix_coefs_cvmix_build(meshPool, statePool, diagnosticsPool, err, call mpas_pool_get_array(diagnosticsPool, 'displacedDensity', displacedDensity) call mpas_pool_get_array(diagnosticsPool, 'potentialDensity', potentialDensity) call mpas_pool_get_array(diagnosticsPool, 'bulkRichardsonNumber', bulkRichardsonNumber) + call mpas_pool_get_array(diagnosticsPool, 'unresolvedShear', unresolvedShear) call mpas_pool_get_array(diagnosticsPool, 'boundaryLayerDepth', boundaryLayerDepth) call mpas_pool_get_array(diagnosticsPool, 'RiTopOfCell', RiTopOfCell) call mpas_pool_get_array(diagnosticsPool, 'BruntVaisalaFreqTop',BruntVaisalaFreqTop) @@ -309,133 +315,142 @@ subroutine ocn_vmix_coefs_cvmix_build(meshPool, statePool, diagnosticsPool, err, cvmix_variables % WaterDensity_cntr => density(:,iCell) cvmix_variables % AdiabWaterDensity_cntr => displacedDensity(:,iCell) - ! eliminate 2dz mode from Ri + ! fill Ri RiSmoothed(1:nVertLevels) = RiTopOfCell(1:nVertLevels,iCell) RiSmoothed(nVertLevels+1) = RiSmoothed(nVertLevels) do k=2,maxLevelCell(iCell) ! For input to cvmix, Richardson number should be positive. - tmp(k) = max(0.0, (RiSmoothed(k-1)+2*RiSmoothed(k)+RiSmoothed(k+1))/4.0 ) + RiSmoothed(k)=max(0.0_RKIND, RiSmoothed(k)) enddo - k=1 - tmp(k)=tmp(k+1) - k=maxLevelCell(iCell)+1 - tmp(k:nVertLevels+1)=tmp(k-1) - RiSmoothed(:) = tmp(:) cvmix_variables%ShearRichardson_iface => RiSmoothed - ! eliminate 2dz mode from BVF + ! fill BVF BVFSmoothed(1:nVertLevels) = BruntVaisalaFreqTop(1:nVertLevels,iCell) BVFSmoothed(nVertLevels+1) = BVFSmoothed(nVertLevels) - do k=2,maxLevelCell(iCell) - tmp(k) = (BVFSmoothed(k-1)+2*BVFSmoothed(k)+BVFSmoothed(k+1))/4.0 - enddo - k=1 - tmp(k)=tmp(k+1) - k=maxLevelCell(iCell)+1 - tmp(k:nVertLevels+1)=tmp(k-1) - BVFSmoothed(:) = tmp(:) cvmix_variables%SqrBuoyancyFreq_iface => BVFSmoothed ! fill the intent(in) KPP cvmix_variables % SurfaceFriction = surfaceFrictionVelocity(iCell) cvmix_variables % SurfaceBuoyancyForcing = surfaceBuoyancyForcing(iCell) - ! call shear-based mixing scheme - if (config_use_cvmix_shear) then - cvmix_variables % Mdiff_iface(:)=0.0 - cvmix_variables % Tdiff_iface(:)=0.0 - call cvmix_coeffs_shear( & - cvmix_variables, & - cvmix_background_params, & - 1, & - .false., & - cvmix_shear_params) - - ! add shear mixing to vertical viscosity/diffusivity - ! at present, shear mixing adds in background values when using PP, but background is accounted for seperately. so remove background from shear mixing values - if(config_cvmix_shear_mixing_scheme=='PP') then - vertViscTopOfCell(:,iCell) = vertViscTopOfCell(:,iCell) + cvmix_variables % Mdiff_iface(:) - config_cvmix_background_viscosity - vertDiffTopOfCell(:,iCell) = vertDiffTopOfCell(:,iCell) + cvmix_variables % Tdiff_iface(:) - config_cvmix_background_diffusion - else - vertViscTopOfCell(:,iCell) = vertViscTopOfCell(:,iCell) + cvmix_variables % Mdiff_iface(:) - vertDiffTopOfCell(:,iCell) = vertDiffTopOfCell(:,iCell) + cvmix_variables % Tdiff_iface(:) - endif - - endif ! if (config_use_cvmix_shear) - - ! - ! put tidal mixing here - ! - - ! - ! put double diffusion mxing here - ! - ! call kpp ocean mixed layer scheme if (cvmixKPPOn) then - ! copy data into cvmix_variables, then iterate + ! copy data into cvmix_variables cvmix_variables % Mdiff_iface(:)=vertViscTopOfCell(:,iCell) cvmix_variables % Tdiff_iface(:)=vertDiffTopOfCell(:,iCell) - cvmix_variables % BoundaryLayerDepth = boundaryLayerDepth(iCell) - cvmix_variables % kOBL_depth = cvmix_kpp_compute_kOBL_depth(cvmix_variables%zw_iface, cvmix_variables%zt_cntr, cvmix_variables%BoundaryLayerDepth) - - do iter=1,config_cvmix_kpp_niterate - if(cvmix_variables % BoundaryLayerDepth .lt. layerThickness(1,iCell)/2.0) then - cvmix_variables % BoundaryLayerDepth = layerThickness(1,iCell)/2.0 + 0.01 - endif + if (config_use_cvmix_fixed_boundary_layer) then + cvmix_variables % BoundaryLayerDepth = config_cvmix_kpp_boundary_layer_depth - if(cvmix_variables % BoundaryLayerDepth .gt. abs(cvmix_variables%zw_iface(maxLevelCell(iCell)+1))) then - cvmix_variables % BoundaryLayerDepth = abs(cvmix_variables%zt_cntr(maxLevelCell(iCell))) + 0.01 - endif + else - ! compute ocean boundary layer depth - do k=1,maxLevelCell(iCell) - sigma(k) = -cvmix_variables % zt_cntr(k) / cvmix_variables % BoundaryLayerDepth + ! set stratification + do k=1,maxLevelCell(iCell) Nsqr_iface(k) = BVFSmoothed(k) - enddo - k=maxLevelCell(iCell)+1 - sigma(k:nVertLevels) = sigma(k-1) - Nsqr_iface(k:nVertLevels+1) = Nsqr_iface(k-1) - - ! eliminate 2dz mode from N2 - tmp(:)=0.0 - do k=2,maxLevelCell(iCell) - tmp(k) = (Nsqr_iface(k-1)+2*Nsqr_iface(k)+Nsqr_iface(k+1))/4.0 - enddo - k=maxLevelCell(iCell)+1 - tmp(1)=tmp(2) - tmp(k:nVertLevels+1)=tmp(k-1) - Nsqr_iface(:)=tmp(:) - - ! compute the turbulent scales in order to compute the bulk Richardson number - call cvmix_kpp_compute_turbulent_scales( & - sigma(:), & - cvmix_variables % BoundaryLayerDepth, & - cvmix_variables % SurfaceBuoyancyForcing, & - cvmix_variables % SurfaceFriction, & - w_s = turbulentScalarVelocityScale(:) ) - - cvmix_variables % BulkRichardson_cntr = cvmix_kpp_compute_bulk_Richardson( & - cvmix_variables % zt_cntr(1:nVertLevels), & - bulkRichardsonNumberBuoy(1:nVertLevels,iCell), & - bulkRichardsonNumberShear(1:nVertLevels,iCell), & - ws_cntr = turbulentScalarVelocityScale(:), & - Nsqr_iface = Nsqr_iface(1:nVertLevels+1) ) - - ! compute the boundary layer depth based on model state at current time step - call cvmix_kpp_compute_OBL_depth( CVmix_vars = cvmix_variables) - - ! given current estimate of OBL and vertical profile of visc/diff, compute boundary layer mixing - call cvmix_coeffs_kpp( CVmix_vars = cvmix_variables ) - - end do ! iterate + enddo + k=min(maxLevelCell(iCell)+1,nVertLevels) + Nsqr_iface(k:nVertLevels+1) = Nsqr_iface(k-1) + + ! compute bulk Richardson number + ! assume boundary layer depth is at bottom of every kIndexOBL cell + bulkRichardsonNumberStop = config_cvmix_kpp_stop_OBL_search * config_cvmix_kpp_criticalBulkRichardsonNumber + bulkRichardsonNumber(:,iCell) = bulkRichardsonNumberStop - 1.0 + kIndexOBL=1 + bulkRichardsonFlag = .false. + do while (.not.bulkRichardsonFlag) + + ! set OBL at bottome of kIndexOBL cell for computation of bulk Richardson number + cvmix_variables % BoundaryLayerDepth = cvmix_variables % zw_iface(kIndexOBL+1) + + ! define sigma based on assumption of where OBL bottom resides + do k=1,maxLevelCell(iCell) + sigma(k) = -cvmix_variables % zt_cntr(k) / cvmix_variables % BoundaryLayerDepth + enddo + do k=maxLevelCell(iCell)+1,nVertLevels + sigma(k) = sigma(maxLevelCell(iCell)) + enddo + + ! compute the turbulent scales in order to compute the bulk Richardson number + call cvmix_kpp_compute_turbulent_scales( & + sigma_coord = sigma(1:nVertLevels), & + OBL_depth = cvmix_variables % BoundaryLayerDepth, & + surf_buoy_force = cvmix_variables % SurfaceBuoyancyForcing, & + surf_fric_vel = cvmix_variables % SurfaceFriction, & + w_s = turbulentScalarVelocityScale(1:nVertLevels)) + + cvmix_variables % BulkRichardson_cntr = cvmix_kpp_compute_bulk_Richardson( & + zt_cntr = cvmix_variables % zt_cntr(1:nVertLevels), & + delta_buoy_cntr = bulkRichardsonNumberBuoy(1:nVertLevels,iCell), & + delta_Vsqr_cntr = bulkRichardsonNumberShear(1:nVertLevels,iCell), & + ws_cntr = turbulentScalarVelocityScale(:), & + Nsqr_iface = Nsqr_iface(1:nVertLevels+1) ) + + unresolvedShear(:,iCell) = cvmix_kpp_compute_unresolved_shear( & + zt_cntr = cvmix_variables % zt_cntr(1:nVertLevels), & + ws_cntr = turbulentScalarVelocityScale(1:nVertLevels), & + Nsqr_iface = Nsqr_iface(1:nVertLevels+1)) + + ! each level of bulk Richardson is computed as if OBL resided at bottom of that level + bulkRichardsonNumber(kIndexOBL,iCell) = cvmix_variables % BulkRichardson_cntr(kIndexOBL) + + ! test to see if search should be ended + if(kIndexOBL.eq.maxLevelCell(iCell)) bulkRichardsonFlag=.true. + if(bulkRichardsonNumber(kIndexOBL,iCell).gt.bulkRichardsonNumberStop) bulkRichardsonFlag=.true. + + ! move downward one level + kIndexOBL = kIndexOBL + 1 + + enddo ! do while (.not.bulkRichardsonFlag) + + call cvmix_kpp_compute_OBL_depth( & + Ri_bulk = bulkRichardsonNumber(1:nVertLevels,iCell), & + zw_iface = cvmix_variables % zw_iface(1:nVertLevels+1), & + OBL_depth = cvmix_variables % BoundaryLayerDepth, & + kOBL_depth = cvmix_variables % kOBL_depth, & + zt_cntr = cvmix_variables % zt_cntr(1:nVertLevels), & + surf_fric = cvmix_variables % SurfaceFriction, & + surf_buoy = cvmix_variables % SurfaceBuoyancyForcing, & + Coriolis = cvmix_variables % Coriolis) + + endif ! if (config_use_cvmix_fixed_boundary_layer) then + + ! apply minimum limit to OBL + if(cvmix_variables % BoundaryLayerDepth .lt. layerThickness(1,iCell)/2.0) then + cvmix_variables % BoundaryLayerDepth = layerThickness(1,iCell)/2.0 + endif + + ! apply maximum limit to OBL + if(cvmix_variables % BoundaryLayerDepth .gt. abs(cvmix_variables%zt_cntr(maxLevelCell(iCell)))) then + cvmix_variables % BoundaryLayerDepth = abs(cvmix_variables%zt_cntr(maxLevelCell(iCell))) + endif + + cvmix_variables % kOBL_depth = cvmix_kpp_compute_kOBL_depth( & + zw_iface = cvmix_variables%zw_iface(1:nVertLevels+1), & + zt_cntr = cvmix_variables%zt_cntr(1:nVertLevels), & + OBL_depth = cvmix_variables % BoundaryLayerDepth ) + + call cvmix_coeffs_kpp( & + Mdiff_out = cvmix_variables % Mdiff_iface(1:nVertLevels+1), & + Tdiff_out = cvmix_variables % Tdiff_iface(1:nVertLevels+1), & + Sdiff_out = cvmix_variables % Sdiff_iface(1:nVertLevels+1), & + zw = cvmix_variables%zw_iface(1:nVertLevels+1), & + zt = cvmix_variables%zt_cntr(1:nVertLevels), & + old_Mdiff = cvmix_variables%Mdiff_iface(1:nVertLevels+1), & + old_Tdiff = cvmix_variables%Tdiff_iface(1:nVertLevels+1), & + old_Sdiff = cvmix_variables%Sdiff_iface(1:nVertLevels+1), & + OBL_depth = cvmix_variables%BoundaryLayerDepth, & + kOBL_depth = cvmix_variables%kOBL_depth, & + Tnonlocal = cvmix_variables%kpp_Tnonlocal_iface(1:nVertLevels+1), & + Snonlocal = cvmix_variables%kpp_Snonlocal_iface(1:nVertLevels+1), & + surf_fric = cvmix_variables%SurfaceFriction, & + surf_buoy = cvmix_variables%SurfaceBuoyancyForcing, & + nlev = maxLevelCell(iCell), & + max_nlev = nVertLevels) ! intent out of BoundaryLayerDepth is boundary layer depth measured in meters and vertical index boundaryLayerDepth(iCell) = cvmix_variables % BoundaryLayerDepth indexBoundaryLayerDepth(iCell) = cvmix_variables % kOBL_depth - bulkRichardsonNumber(:,iCell) = cvmix_variables % BulkRichardson_cntr(:) ! if using KPP with "MatchBoth" matching, then the output from KPP is the full viscosity/diffusivity ! if using KPP with "SimpleShape" matching, then the output from KPP needs to be added to current viscosity/diffusivity @@ -476,6 +491,42 @@ subroutine ocn_vmix_coefs_cvmix_build(meshPool, statePool, diagnosticsPool, err, endif endif ! if (config_use_cvmix_convection) + ! call shear-based mixing scheme + if (config_use_cvmix_shear) then + cvmix_variables % Mdiff_iface(:)=0.0 + cvmix_variables % Tdiff_iface(:)=0.0 + call cvmix_coeffs_shear( & + cvmix_variables, & + cvmix_shear_params) + + ! add shear mixing to vertical viscosity/diffusivity + ! at present, shear mixing adds in background values when using PP, but background is accounted for seperately. so remove background from shear mixing values + if(config_cvmix_shear_mixing_scheme=='PP') then + cvmix_variables % Mdiff_iface(:) = cvmix_variables % Mdiff_iface(:) - config_cvmix_background_viscosity + cvmix_variables % Tdiff_iface(:) = cvmix_variables % Tdiff_iface(:) - config_cvmix_background_diffusion + endif + + if(config_use_cvmix_kpp) then + do k = int(indexBoundaryLayerDepth(iCell)) + 1, maxLevelCell(iCell) + vertViscTopOfCell(k,iCell) = vertViscTopOfCell(k,iCell) + cvmix_variables % Mdiff_iface(k) + vertDiffTopOfCell(k,iCell) = vertDiffTopOfCell(k,iCell) + cvmix_variables % Tdiff_iface(k) + enddo + else + vertViscTopOfCell(:,iCell) = vertViscTopOfCell(:,iCell) + cvmix_variables % Mdiff_iface(:) + vertDiffTopOfCell(:,iCell) = vertDiffTopOfCell(:,iCell) + cvmix_variables % Tdiff_iface(:) + endif + + endif ! if (config_use_cvmix_shear) + + ! + ! put tidal mixing here + ! + + ! + ! put double diffusion mxing here + ! + + ! computation of viscosity/diffusivity complete ! impose no-flux boundary conditions at top and bottom by zero viscosity/diffusivity vertViscTopOfCell(1,iCell) = 0.0 @@ -570,7 +621,8 @@ subroutine ocn_vmix_cvmix_init(domain,err)!{{{ character (len=StrKIND), pointer :: config_cvmix_kpp_matching, config_cvmix_kpp_interpolationOMLType logical, pointer :: config_cvmix_kpp_EkmanOBL, config_cvmix_kpp_MonObOBL real (kind=RKIND), pointer :: config_cvmix_kpp_criticalBulkRichardsonNumber, & - config_cvmix_kpp_surface_layer_extent + config_cvmix_kpp_surface_layer_extent, & + config_cvmix_kpp_stop_OBL_search ! ! assume no errors during initialization and set to 1 when error is encountered ! @@ -595,6 +647,7 @@ subroutine ocn_vmix_cvmix_init(domain,err)!{{{ call mpas_pool_get_config(ocnConfigs, 'config_use_cvmix_double_diffusion', config_use_cvmix_double_diffusion) call mpas_pool_get_config(ocnConfigs, 'config_use_cvmix_kpp', config_use_cvmix_kpp) call mpas_pool_get_config(ocnConfigs, 'config_cvmix_kpp_criticalBulkRichardsonNumber', config_cvmix_kpp_criticalBulkRichardsonNumber) + call mpas_pool_get_config(ocnConfigs, 'config_cvmix_kpp_stop_OBL_search', config_cvmix_kpp_stop_OBL_search) call mpas_pool_get_config(ocnConfigs, 'config_cvmix_kpp_interpolationOMLType', config_cvmix_kpp_interpolationOMLType) call mpas_pool_get_config(ocnConfigs, 'config_cvmix_kpp_interpolationOMLType', config_cvmix_kpp_interpolationOMLType) call mpas_pool_get_config(ocnConfigs, 'config_cvmix_kpp_EkmanOBL', config_cvmix_kpp_EkmanOBL) @@ -639,8 +692,8 @@ subroutine ocn_vmix_cvmix_init(domain,err)!{{{ ! if (config_use_cvmix_background .or. config_use_cvmix_shear) then call cvmix_init_bkgnd( & - bkgnd_diff = config_cvmix_background_diffusion, & - bkgnd_visc = config_cvmix_background_viscosity, & + bkgnd_Tdiff = config_cvmix_background_diffusion, & + bkgnd_Mdiff = config_cvmix_background_viscosity, & CVmix_bkgnd_params_user = cvmix_background_params) endif @@ -711,7 +764,7 @@ subroutine ocn_vmix_cvmix_init(domain,err)!{{{ lEkman = config_cvmix_kpp_EkmanOBL, & lMonOb = config_cvmix_kpp_MonObOBL, & MatchTechnique = config_cvmix_kpp_matching, & - surf_layer_ext = config_cvmix_kpp_surface_layer_extent) + surf_layer_ext = config_cvmix_kpp_surface_layer_extent) endif diff --git a/src/core_sw/Registry.xml b/src/core_sw/Registry.xml index b6840f0dcc..401bbaf3fc 100644 --- a/src/core_sw/Registry.xml +++ b/src/core_sw/Registry.xml @@ -1,5 +1,5 @@ - + diff --git a/src/core_sw/mpas_sw_advection.F b/src/core_sw/mpas_sw_advection.F index d7375ac6f4..84d92f71c0 100644 --- a/src/core_sw/mpas_sw_advection.F +++ b/src/core_sw/mpas_sw_advection.F @@ -10,7 +10,6 @@ module sw_advection use mpas_kind_types use mpas_derived_types use mpas_pool_routines - use mpas_configure use mpas_constants diff --git a/src/core_sw/mpas_sw_core.F b/src/core_sw/mpas_sw_core.F index 77d253d0c5..743a118c42 100644 --- a/src/core_sw/mpas_sw_core.F +++ b/src/core_sw/mpas_sw_core.F @@ -17,7 +17,6 @@ module sw_core function sw_core_init(domain, startTimeStamp) result(iErr) - use mpas_configure use mpas_derived_types use mpas_pool_routines use mpas_stream_manager diff --git a/src/core_sw/mpas_sw_core_interface.F b/src/core_sw/mpas_sw_core_interface.F index b4ae937304..0d963b9559 100644 --- a/src/core_sw/mpas_sw_core_interface.F +++ b/src/core_sw/mpas_sw_core_interface.F @@ -12,6 +12,7 @@ module sw_core_interface use mpas_dmpar use mpas_constants use mpas_io_units + use mpas_attlist use sw_core public @@ -42,6 +43,7 @@ subroutine sw_setup_core(core)!{{{ core % get_mesh_stream => sw_get_mesh_stream core % setup_immutable_streams => sw_setup_immutable_streams core % setup_derived_dimensions => sw_setup_derived_dimensions + core % setup_decomposed_dimensions => sw_setup_decomposed_dimensions core % setup_block => sw_setup_block core % setup_namelist => sw_setup_namelists @@ -86,7 +88,7 @@ end subroutine sw_setup_domain!}}} !> *not* allocated until after this routine is called. ! !----------------------------------------------------------------------- - function sw_setup_packages(configPool, packagePool) result(ierr)!{{{ + function sw_setup_packages(configPool, packagePool, iocontext) result(ierr)!{{{ use mpas_derived_types @@ -94,6 +96,7 @@ function sw_setup_packages(configPool, packagePool) result(ierr)!{{{ type (mpas_pool_type), intent(inout) :: configPool type (mpas_pool_type), intent(inout) :: packagePool + type (mpas_io_context_type), intent(inout) :: iocontext integer :: ierr ierr = 0 diff --git a/src/core_sw/mpas_sw_global_diagnostics.F b/src/core_sw/mpas_sw_global_diagnostics.F index aa7b067b09..ba6b3ef7b8 100644 --- a/src/core_sw/mpas_sw_global_diagnostics.F +++ b/src/core_sw/mpas_sw_global_diagnostics.F @@ -9,7 +9,6 @@ module sw_global_diagnostics use mpas_derived_types use mpas_pool_routines - use mpas_configure use mpas_constants use mpas_dmpar diff --git a/src/core_sw/mpas_sw_test_cases.F b/src/core_sw/mpas_sw_test_cases.F index 29756bcbb1..5e703ff396 100644 --- a/src/core_sw/mpas_sw_test_cases.F +++ b/src/core_sw/mpas_sw_test_cases.F @@ -9,7 +9,6 @@ module sw_test_cases use mpas_derived_types use mpas_pool_routines - use mpas_configure use mpas_constants diff --git a/src/core_sw/mpas_sw_time_integration.F b/src/core_sw/mpas_sw_time_integration.F index bd77a265a0..c0969fcf79 100644 --- a/src/core_sw/mpas_sw_time_integration.F +++ b/src/core_sw/mpas_sw_time_integration.F @@ -10,7 +10,6 @@ module sw_time_integration use mpas_vector_reconstruction use mpas_derived_types use mpas_pool_routines - use mpas_configure use mpas_constants use mpas_dmpar @@ -98,9 +97,6 @@ subroutine sw_rk4(domain, dt) real (kind=RKIND), dimension(:,:), pointer :: uReconstructX, uReconstructY, uReconstructZ, uReconstructZonal, uReconstructMeridional real (kind=RKIND), dimension(:,:,:), pointer :: tracersOld, tracersNew, tracersProvis, tracersTend - type (field2DReal), pointer :: pvEdgeField, divergenceField, vorticityField, uField, hField - type (field3DReal), pointer :: tracersField - integer, pointer :: config_test_case real (kind=RKIND), pointer :: config_h_mom_eddy_visc4 @@ -197,17 +193,11 @@ subroutine sw_rk4(domain, dt) do rk_step = 1, 4 ! --- update halos for diagnostic variables - call mpas_pool_get_subpool(domain % blocklist % structs, 'provis_state', provisStatePool) - - call mpas_pool_get_field(provisStatePool, 'pv_edge', pvEdgeField, 1) - - call mpas_dmpar_exch_halo_field(pvEdgeField) + call mpas_dmpar_field_halo_exch(domain, 'pv_edge', timeLevel=1) if (config_h_mom_eddy_visc4 > 0.0) then - call mpas_pool_get_field(statePool, 'divergence', divergenceField, 2) - call mpas_pool_get_field(statePool, 'vorticity', vorticityField, 2) - call mpas_dmpar_exch_halo_field(divergenceField) - call mpas_dmpar_exch_halo_field(vorticityField) + call mpas_dmpar_field_halo_exch(domain, 'divergence', timeLevel=2) + call mpas_dmpar_field_halo_exch(domain, 'vorticity', timeLevel=2) end if ! --- compute tendencies @@ -226,15 +216,9 @@ subroutine sw_rk4(domain, dt) ! --- update halos for prognostic variables - call mpas_pool_get_subpool(domain % blocklist % structs, 'tend', tendPool) - - call mpas_pool_get_field(tendPool, 'u', uField) - call mpas_pool_get_field(tendPool, 'h', hField) - call mpas_pool_get_field(tendPool, 'tracers', tracersField) - - call mpas_dmpar_exch_halo_field(uField) - call mpas_dmpar_exch_halo_field(hField) - call mpas_dmpar_exch_halo_field(tracersField) + call mpas_dmpar_field_halo_exch(domain, 'tend_u') + call mpas_dmpar_field_halo_exch(domain, 'tend_h') + call mpas_dmpar_field_halo_exch(domain, 'tend_tracers') ! --- compute next substep state diff --git a/src/core_test/Makefile b/src/core_test/Makefile index f867eb7888..d784b4fcef 100644 --- a/src/core_test/Makefile +++ b/src/core_test/Makefile @@ -1,6 +1,12 @@ .SUFFIXES: .F .o -OBJS = mpas_test_core.o mpas_test_core_interface.o +OBJS = mpas_test_core.o \ + mpas_test_core_interface.o \ + mpas_test_core_halo_exch.o \ + mpas_test_core_streams.o \ + mpas_test_core_field_tests.o \ + mpas_test_core_timekeeping_tests.o \ + mpas_test_core_sorting.o all: core_test @@ -25,9 +31,17 @@ post_build: cp default_inputs/* $(ROOT_DIR)/default_inputs/. ( cd $(ROOT_DIR)/default_inputs; for FILE in `ls -1`; do if [ ! -e ../$$FILE ]; then cp $$FILE ../.; fi; done ) -mpas_test_core_interface: mpas_test_core.o +mpas_test_core_interface.o: mpas_test_core.o -mpas_test_core.o: +mpas_test_core.o: mpas_test_core_halo_exch.o mpas_test_core_streams.o mpas_test_core_field_tests.o mpas_test_core_timekeeping_tests.o mpas_test_core_sorting.o + +mpas_test_core_halo_exch.o: + +mpas_test_core_field_tests.o: + +mpas_test_core_streams.o: + +mpas_test_core_sorting.o: clean: $(RM) *.o *.mod *.f90 libdycore.a diff --git a/src/core_test/Registry.xml b/src/core_test/Registry.xml index 16b7a849a5..9e969559ec 100644 --- a/src/core_test/Registry.xml +++ b/src/core_test/Registry.xml @@ -1,5 +1,5 @@ - + @@ -76,12 +76,16 @@ - - + + + + + + - + @@ -115,4 +119,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/core_test/mpas_test_core.F b/src/core_test/mpas_test_core.F index b971969e3c..c02f889589 100644 --- a/src/core_test/mpas_test_core.F +++ b/src/core_test/mpas_test_core.F @@ -10,6 +10,11 @@ module test_core use mpas_framework use mpas_timekeeping + use test_core_halo_exch + use test_core_field_tests + use test_core_timekeeping_tests + use mpas_stream_manager + type (MPAS_Clock_type), pointer :: clock contains @@ -29,7 +34,6 @@ module test_core !----------------------------------------------------------------------- function test_core_init(domain, startTimeStamp) result(iErr)!{{{ - use mpas_configure use mpas_derived_types implicit none @@ -38,6 +42,8 @@ function test_core_init(domain, startTimeStamp) result(iErr)!{{{ character(len=*), intent(out) :: startTimeStamp type (MPAS_Time_Type) :: startTime + type (mpas_pool_type), pointer :: modelPool + character (len=StrKIND), pointer :: xtime integer :: iErr @@ -54,6 +60,13 @@ function test_core_init(domain, startTimeStamp) result(iErr)!{{{ startTime = mpas_get_clock_time(clock, MPAS_START_TIME, iErr) call mpas_get_time(startTime, dateTimeString=startTimeStamp) + call mpas_pool_get_subpool(domain % blocklist % structs, 'model', modelPool) + call mpas_pool_get_array(modelPool, 'xtime', xtime) + xtime = startTimeStamp + + call mpas_stream_mgr_read(domain % streamManager, ierr=iErr) + call mpas_stream_mgr_reset_alarms(domain % streamManager, direction=MPAS_STREAM_INPUT, ierr=iErr) + end function test_core_init!}}} @@ -75,18 +88,86 @@ function test_core_run(domain) result(iErr)!{{{ use mpas_derived_types use mpas_kind_types use mpas_timer + use mpas_vector_operations + use mpas_geometry_utils + use test_core_streams, only : test_core_streams_test + use test_core_sorting, only : test_core_test_sorting implicit none type (domain_type), intent(inout) :: domain integer :: iErr + type (block_type), pointer :: block + type (mpas_pool_type), pointer :: pool type (mpas_pool_iterator_type) :: itr + integer :: numThreads, threadLimit, maxThreads + integer, dimension(:), allocatable :: threadErrs + maxThreads = mpas_threading_get_max_threads() + allocate(threadErrs(maxThreads)) + threadErrs(:) = 0 + + ! + ! Test performance of framework sorting routines + ! + call test_core_test_sorting(domain, iErr) + if (iErr == 0) then + write(stderrUnit,*) ' * Sorting tests: SUCCESS' + else + write(stderrUnit,*) ' * Sorting tests: FAILURE' + end if iErr = 0 + call mpas_unit_test_fix_periodicity(iErr) + call mpas_unit_test_triangle_signed_area_sphere(iErr) + + call mpas_unit_test_velocity_conversion(iErr) + call mpas_unit_test_wachspress_hexagon(iErr) + call mpas_unit_test_wachspress_triangle(iErr) + + !$omp parallel default(firstprivate) shared(domain, threadErrs) + call test_core_halo_exch_test(domain, threadErrs, iErr) + !$omp end parallel + if ( iErr == 0 ) then + write(stderrUnit, *) ' * Halo Exchange Test: SUCCESS' + else + write(stderrUnit, *) ' * Halo Exchange Test: FAILURE' + end if + + !$omp parallel default(firstprivate) shared(domain, threadErrs) + call test_core_test_fields(domain, threadErrs, ierr) + if ( iErr == 0 ) then + write(stderrUnit, *) ' * Field Tests: SUCCESS' + else + write(stderrUnit, *) ' * Field Tests: FAILURE' + end if + !$omp end parallel + + call test_core_streams_test(domain, threadErrs, iErr) + if ( iErr == 0 ) then + write(stderrUnit, *) 'Stream I/O tests: SUCCESS' + else + write(stderrUnit, *) 'Stream I/O tests: FAILURE' + end if + + + call test_core_test_intervals(domain, threadErrs, iErr) + + ! Test writing of block write streams, which have the prefix 'block_' + block => domain % blocklist + do while (associated(block)) + call mpas_stream_mgr_reset_alarms(domain % streamManager, streamID="block_.*") + call mpas_stream_mgr_block_write(domain % streamManager, block, streamID="block_.*", forceWriteNow=.true.) + block => block % next + end do + + call mpas_stream_mgr_write(domain % streamManager, forceWriteNow=.true.) + + deallocate(threadErrs) + end function test_core_run!}}} !*********************************************************************** diff --git a/src/core_test/mpas_test_core_field_tests.F b/src/core_test/mpas_test_core_field_tests.F new file mode 100644 index 0000000000..63247a7843 --- /dev/null +++ b/src/core_test/mpas_test_core_field_tests.F @@ -0,0 +1,171 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! + +!#define HALO_EXCH_DEBUG + +module test_core_field_tests + + use mpas_derived_types + use mpas_pool_routines + use mpas_field_routines + use mpas_dmpar + use mpas_threading + use mpas_io_units + use mpas_timer + use mpas_attlist + + implicit none + private + + public :: test_core_test_fields + + contains + + !*********************************************************************** + ! + ! routine test_core_test_fields + ! + !> \brief MPAS Test Core field tests routine + !> \author Doug Jacobsen + !> \date 04/26/2016 + !> \details + !> This routine performs tests related to field types. + ! + !----------------------------------------------------------------------- + subroutine test_core_test_fields(domain, threadErrs, err)!{{{ + + type (domain_type), intent(inout) :: domain + integer, dimension(:), intent(out) :: threadErrs + integer, intent(out) :: err + + integer :: threadNum + integer :: iErr + + err = 0 + + threadNum = mpas_threading_get_thread_num() + + call mpas_timer_start('field tests') + if ( threadNum == 0 ) then + write(stderrUnit, *) ' - Performing attribute list tests' + end if + call test_core_attribute_list_test(domain, threadErrs, iErr) + call mpas_threading_barrier() + if ( threadNum == 0 ) then + write(stderrUnit, *) ' -- Return code: ', iErr + err = ior(err, iErr) + end if + + call mpas_timer_stop('field tests') + + end subroutine test_core_test_fields!}}} + + !*********************************************************************** + ! + ! routine test_core_attribute_list_test + ! + !> \brief MPAS Test Core attribute list tests routine + !> \author Doug Jacobsen + !> \date 04/26/2016 + !> \details + !> This routine performs tests of attribute lists. + ! + !----------------------------------------------------------------------- + subroutine test_core_attribute_list_test(domain, threadErrs, ierr)!{{{ + type (domain_type), intent(inout) :: domain + integer, dimension(:), intent(out) :: threadErrs + integer, intent(out) :: ierr + + type ( att_list_type ), pointer :: srcList, destList + integer :: srcInt, destInt + integer, dimension(:), pointer :: srcIntA, destIntA + real (kind=RKIND) :: srcReal, destReal + real (kind=RKIND), dimension(:), pointer :: srcRealA, destRealA + character (len=StrKIND) :: srcText, destText + + integer :: threadNum + + iErr = 0 + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + allocate(srcList) + nullify(destList) + + nullify(srcList % next) + srcList % attName = '' + srcList % attType = -1 + + allocate(srcIntA(3)) + allocate(srcRealA(5)) + + srcInt = 3 + srcIntA(:) = 4 + srcReal = 5.0_RKIND + srcRealA(:) = 6.0_RKIND + srcText = 'testingString' + + call mpas_add_att(srcList, 'testInt', srcInt) + call mpas_add_att(srcList, 'testIntA', srcIntA) + call mpas_add_att(srcList, 'testReal', srcReal) + call mpas_add_att(srcList, 'testRealA', srcRealA) + call mpas_add_att(srcList, 'testText', srcText) + + call mpas_duplicate_attlist(srcList, destList) + + call mpas_get_att(destList, 'testInt', destInt) + call mpas_get_att(destList, 'testIntA', destIntA) + call mpas_get_att(destList, 'testReal', destReal) + call mpas_get_att(destList, 'testRealA', destRealA) + call mpas_get_att(destList, 'testText', destText) + + destIntA(:) = destIntA(:) - srcIntA(:) + destRealA(:) = destRealA(:) - srcRealA(:) + + if ( srcInt /= destInt ) then + threadErrs( threadNum ) = 1 + write(stderrUnit, *) ' ERROR: Duplicate int does not match' + end if + + if ( sum(destIntA) /= 0 ) then + threadErrs( threadNum ) = 1 + write(stderrUnit, *) ' ERROR: Duplicate int array does not match' + end if + + if ( srcReal /= destReal ) then + threadErrs( threadNum ) = 1 + write(stderrUnit, *) ' ERROR: Duplicate real does not match' + end if + + if ( sum(destRealA) /= 0.0_RKIND ) then + threadErrs( threadNum ) = 1 + write(stderrUnit, *) ' ERROR: Duplicate real array does not match' + end if + + if ( trim(srcText) /= trim(destText) ) then + threadErrs( threadNum ) = 1 + write(stderrUnit, *) ' ERROR: Duplicate string does not match' + end if + + call mpas_deallocate_attlist(srcList) + call mpas_deallocate_attlist(destList) + + deallocate(srcIntA) + deallocate(srcRealA) + end if + + call mpas_threading_barrier() + + if ( sum(threadErrs) /= 0 ) then + iErr = 1 + end if + + end subroutine test_core_attribute_list_test!}}} + +end module test_core_field_tests diff --git a/src/core_test/mpas_test_core_halo_exch.F b/src/core_test/mpas_test_core_halo_exch.F new file mode 100644 index 0000000000..9f74b084db --- /dev/null +++ b/src/core_test/mpas_test_core_halo_exch.F @@ -0,0 +1,1617 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! + +!#define HALO_EXCH_DEBUG + +module test_core_halo_exch + + use mpas_derived_types + use mpas_pool_routines + use mpas_field_routines + use mpas_dmpar + use mpas_threading + use mpas_io_units + use mpas_timer + + implicit none + private + + public :: test_core_halo_exch_test + + contains + + !*********************************************************************** + ! + ! routine test_core_halo_exch_test + ! + !> \brief MPAS Test Core halo exchange test + !> \author Doug Jacobsen + !> \date 10/21/2015 + !> \details + !> This routine performs tests related to halo exchanges. + ! + !----------------------------------------------------------------------- + subroutine test_core_halo_exch_test(domain, threadErrs, err)!{{{ + + type (domain_type), intent(inout) :: domain + integer, dimension(:), intent(out) :: threadErrs + integer, intent(out) :: err + + integer :: threadNum + integer :: iErr + + err = 0 + + threadNum = mpas_threading_get_thread_num() + + call mpas_timer_start('halo exch tests') + if ( threadNum == 0 ) then + write(stderrUnit, *) ' - Performing exchange group tests' + end if + call test_core_halo_exch_group_test(domain, threadErrs, iErr) + call mpas_threading_barrier() + if ( threadNum == 0 ) then + write(stderrUnit, *) ' -- Return code: ', iErr + err = ior(err, iErr) + end if + + if ( threadNum == 0 ) then + write(stderrUnit, *) ' - Performing single field halo exchange tests' + end if + call test_core_halo_exch_single_field_test(domain, threadErrs, iErr) + call mpas_threading_barrier() + if ( threadNum == 0 ) then + write(stderrUnit, *) ' -- Return code: ', iErr + err = ior(err, iErr) + end if + + if ( threadNum == 0 ) then + write(stderrUnit, *) ' - Performing old halo exchange tests' + end if + call test_core_halo_exch_full_test(domain, threadErrs, iErr) + call mpas_threading_barrier() + if ( threadNum == 0 ) then + write(stderrUnit, *) ' -- Return code: ', iErr + err = ior(err, iErr) + end if + + call mpas_timer_stop('halo exch tests') + + end subroutine test_core_halo_exch_test!}}} + + !*********************************************************************** + ! + ! routine test_core_halo_exch_full_test + ! + !> \brief MPAS Test Core halo exchange full test + !> \author Doug Jacobsen + !> \date 10/21/2015 + !> \details + !> This routine performs tests related to halo exchanges. It creates + !> fields, and fills their zero halos with testable values. Then performs halo + !> exchanges, and differences the full result with the expect value. If the + !> test passes, it returns a zero, if it fails it returns 1. + ! + !----------------------------------------------------------------------- + subroutine test_core_halo_exch_full_test(domain, threadErrs, err)!{{{ + + type (domain_type), intent(inout) :: domain + integer, dimension(:), intent(out) :: threadErrs + integer, intent(out) :: err + + type (block_type), pointer :: block + type (mpas_pool_type), pointer :: meshPool, haloExchTestPool + + type (field5DReal), pointer :: real5DField + type (field4DReal), pointer :: real4DField + type (field3DReal), pointer :: real3DField + type (field2DReal), pointer :: real2DField + type (field1DReal), pointer :: real1DField + type (field3DInteger), pointer :: int3DField + type (field2DInteger), pointer :: int2DField + type (field1DInteger), pointer :: int1DField + + real (kind=RKIND), dimension(:, :, :, :, :), pointer :: real5D + real (kind=RKIND), dimension(:, :, :, :), pointer :: real4D + real (kind=RKIND), dimension(:, :, :), pointer :: real3D + real (kind=RKIND), dimension(:, :), pointer :: real2D + real (kind=RKIND), dimension(:), pointer :: real1D + + real (kind=RKIND) :: realValue + integer :: integerValue + + integer, dimension(:, :, :), pointer :: int3D + integer, dimension(:, :), pointer :: int2D + integer, dimension(:), pointer :: int1D + + integer :: i, j, k, l, m + integer :: iDim1, iDim2, iDim3, iDim4, iDim5 + integer, pointer :: nCells, nEdges, nVertices + integer, pointer :: nCellsSolve, nEdgesSolve, nVerticesSolve + integer, dimension(:), pointer :: indexToCellID + integer, dimension(:), pointer :: indexToEdgeID + integer, dimension(:), pointer :: indexToVertexID + + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() + 1 + + threadErrs(threadNum) = 0 + + call test_core_halo_exch_setup_fields(domain, threadErrs, err) + + call mpas_threading_barrier() + + call mpas_timer_start('old halo exchanges') + ! Perform halo exchanges + call mpas_pool_get_subpool(domain % blocklist % structs, 'haloExchTest', haloExchTestPool) + + ! Exchange persistent cell fields + call mpas_pool_get_field(haloExchTestPool, 'cellPersistReal5D', real5DField) + call mpas_pool_get_field(haloExchTestPool, 'cellPersistReal4D', real4DField) + call mpas_pool_get_field(haloExchTestPool, 'cellPersistReal3D', real3DField) + call mpas_pool_get_field(haloExchTestPool, 'cellPersistReal2D', real2DField) + call mpas_pool_get_field(haloExchTestPool, 'cellPersistReal1D', real1DField) + call mpas_pool_get_field(haloExchTestPool, 'cellPersistInt3D', int3DField) + call mpas_pool_get_field(haloExchTestPool, 'cellPersistInt2D', int2DField) + call mpas_pool_get_field(haloExchTestPool, 'cellPersistInt1D', int1DField) + + call mpas_dmpar_exch_halo_field(real5DField) + call mpas_dmpar_exch_halo_field(real4DField) + call mpas_dmpar_exch_halo_field(real3DField) + call mpas_dmpar_exch_halo_field(real2DField) + call mpas_dmpar_exch_halo_field(real1DField) + call mpas_dmpar_exch_halo_field(int3DField) + call mpas_dmpar_exch_halo_field(int2DField) + call mpas_dmpar_exch_halo_field(int1DField) + + ! Exchange persistent edge fields + call mpas_pool_get_field(haloExchTestPool, 'edgePersistReal5D', real5DField) + call mpas_pool_get_field(haloExchTestPool, 'edgePersistReal4D', real4DField) + call mpas_pool_get_field(haloExchTestPool, 'edgePersistReal3D', real3DField) + call mpas_pool_get_field(haloExchTestPool, 'edgePersistReal2D', real2DField) + call mpas_pool_get_field(haloExchTestPool, 'edgePersistReal1D', real1DField) + call mpas_pool_get_field(haloExchTestPool, 'edgePersistInt3D', int3DField) + call mpas_pool_get_field(haloExchTestPool, 'edgePersistInt2D', int2DField) + call mpas_pool_get_field(haloExchTestPool, 'edgePersistInt1D', int1DField) + + call mpas_dmpar_exch_halo_field(real5DField) + call mpas_dmpar_exch_halo_field(real4DField) + call mpas_dmpar_exch_halo_field(real3DField) + call mpas_dmpar_exch_halo_field(real2DField) + call mpas_dmpar_exch_halo_field(real1DField) + call mpas_dmpar_exch_halo_field(int3DField) + call mpas_dmpar_exch_halo_field(int2DField) + call mpas_dmpar_exch_halo_field(int1DField) + + ! Exchange persistent vertex fields + call mpas_pool_get_field(haloExchTestPool, 'vertexPersistReal5D', real5DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexPersistReal4D', real4DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexPersistReal3D', real3DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexPersistReal2D', real2DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexPersistReal1D', real1DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexPersistInt3D', int3DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexPersistInt2D', int2DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexPersistInt1D', int1DField) + + call mpas_dmpar_exch_halo_field(real5DField) + call mpas_dmpar_exch_halo_field(real4DField) + call mpas_dmpar_exch_halo_field(real3DField) + call mpas_dmpar_exch_halo_field(real2DField) + call mpas_dmpar_exch_halo_field(real1DField) + call mpas_dmpar_exch_halo_field(int3DField) + call mpas_dmpar_exch_halo_field(int2DField) + call mpas_dmpar_exch_halo_field(int1DField) + + ! Exchange scratch cell fields + call mpas_pool_get_field(haloExchTestPool, 'cellScratchReal5D', real5DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchReal4D', real4DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchReal3D', real3DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchReal2D', real2DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchReal1D', real1DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchInt3D', int3DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchInt2D', int2DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchInt1D', int1DField) + + call mpas_dmpar_exch_halo_field(real5DField) + call mpas_dmpar_exch_halo_field(real4DField) + call mpas_dmpar_exch_halo_field(real3DField) + call mpas_dmpar_exch_halo_field(real2DField) + call mpas_dmpar_exch_halo_field(real1DField) + call mpas_dmpar_exch_halo_field(int3DField) + call mpas_dmpar_exch_halo_field(int2DField) + call mpas_dmpar_exch_halo_field(int1DField) + + ! Exchange edge scratch fields + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchReal5D', real5DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchReal4D', real4DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchReal3D', real3DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchReal2D', real2DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchReal1D', real1DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchInt3D', int3DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchInt2D', int2DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchInt1D', int1DField) + + call mpas_dmpar_exch_halo_field(real5DField) + call mpas_dmpar_exch_halo_field(real4DField) + call mpas_dmpar_exch_halo_field(real3DField) + call mpas_dmpar_exch_halo_field(real2DField) + call mpas_dmpar_exch_halo_field(real1DField) + call mpas_dmpar_exch_halo_field(int3DField) + call mpas_dmpar_exch_halo_field(int2DField) + call mpas_dmpar_exch_halo_field(int1DField) + + ! Exchange scratch vertex fields + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchReal5D', real5DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchReal4D', real4DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchReal3D', real3DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchReal2D', real2DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchReal1D', real1DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchInt3D', int3DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchInt2D', int2DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchInt1D', int1DField) + + call mpas_dmpar_exch_halo_field(real5DField) + call mpas_dmpar_exch_halo_field(real4DField) + call mpas_dmpar_exch_halo_field(real3DField) + call mpas_dmpar_exch_halo_field(real2DField) + call mpas_dmpar_exch_halo_field(real1DField) + call mpas_dmpar_exch_halo_field(int3DField) + call mpas_dmpar_exch_halo_field(int2DField) + call mpas_dmpar_exch_halo_field(int1DField) + + call mpas_timer_stop('old halo exchanges') + + call mpas_threading_barrier() + + call test_core_halo_exch_validate_fields(domain, threadErrs, err) + + end subroutine test_core_halo_exch_full_test!}}} + + + !*********************************************************************** + ! + ! routine test_core_halo_exch_single_field_test + ! + !> \brief MPAS Test Core halo exchange single field test + !> \author Doug Jacobsen + !> \date 01/12/2016 + !> \details + !> This routine performs tests of single field halo exchanges. It creates + !> fields, and fills their zero halos with testable values. Then performs halo + !> exchanges, and differences the full result with the expect value. If the + !> test passes, it returns a zero, if it fails it returns 1. + !> To perform the halo exchanges, it uses the single field halo exchange + !> routines rather than exchange groups. + ! + !----------------------------------------------------------------------- + subroutine test_core_halo_exch_single_field_test(domain, threadErrs, err)!{{{ + + type (domain_type), intent(inout) :: domain + integer, dimension(:), intent(out) :: threadErrs + integer, intent(out) :: err + + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() + 1 + + threadErrs(threadNum) = 0 + + call test_core_halo_exch_setup_fields(domain, threadErrs, err) + + call mpas_threading_barrier() + + call mpas_timer_start('single halo exchanges') + ! Perform halo exchanges + + ! Exchange persistent cell fields + call mpas_dmpar_field_halo_exch(domain, 'cellPersistReal5D') + call mpas_dmpar_field_halo_exch(domain, 'cellPersistReal4D') + call mpas_dmpar_field_halo_exch(domain, 'cellPersistReal3D') + call mpas_dmpar_field_halo_exch(domain, 'cellPersistReal2D') + call mpas_dmpar_field_halo_exch(domain, 'cellPersistReal1D') + call mpas_dmpar_field_halo_exch(domain, 'cellPersistInt3D') + call mpas_dmpar_field_halo_exch(domain, 'cellPersistInt2D') + call mpas_dmpar_field_halo_exch(domain, 'cellPersistInt1D') + + ! Exchange persistent edge fields + call mpas_dmpar_field_halo_exch(domain, 'edgePersistReal5D') + call mpas_dmpar_field_halo_exch(domain, 'edgePersistReal4D') + call mpas_dmpar_field_halo_exch(domain, 'edgePersistReal3D') + call mpas_dmpar_field_halo_exch(domain, 'edgePersistReal2D') + call mpas_dmpar_field_halo_exch(domain, 'edgePersistReal1D') + call mpas_dmpar_field_halo_exch(domain, 'edgePersistInt3D') + call mpas_dmpar_field_halo_exch(domain, 'edgePersistInt2D') + call mpas_dmpar_field_halo_exch(domain, 'edgePersistInt1D') + + ! Exchange persistent vertex fields + call mpas_dmpar_field_halo_exch(domain, 'vertexPersistReal5D') + call mpas_dmpar_field_halo_exch(domain, 'vertexPersistReal4D') + call mpas_dmpar_field_halo_exch(domain, 'vertexPersistReal3D') + call mpas_dmpar_field_halo_exch(domain, 'vertexPersistReal2D') + call mpas_dmpar_field_halo_exch(domain, 'vertexPersistReal1D') + call mpas_dmpar_field_halo_exch(domain, 'vertexPersistInt3D') + call mpas_dmpar_field_halo_exch(domain, 'vertexPersistInt2D') + call mpas_dmpar_field_halo_exch(domain, 'vertexPersistInt1D') + + ! Exchange scratch cell fields + call mpas_dmpar_field_halo_exch(domain, 'cellScratchReal5D') + call mpas_dmpar_field_halo_exch(domain, 'cellScratchReal4D') + call mpas_dmpar_field_halo_exch(domain, 'cellScratchReal3D') + call mpas_dmpar_field_halo_exch(domain, 'cellScratchReal2D') + call mpas_dmpar_field_halo_exch(domain, 'cellScratchReal1D') + call mpas_dmpar_field_halo_exch(domain, 'cellScratchInt3D') + call mpas_dmpar_field_halo_exch(domain, 'cellScratchInt2D') + call mpas_dmpar_field_halo_exch(domain, 'cellScratchInt1D') + + ! Exchange edge scratch fields + call mpas_dmpar_field_halo_exch(domain, 'edgeScratchReal5D') + call mpas_dmpar_field_halo_exch(domain, 'edgeScratchReal4D') + call mpas_dmpar_field_halo_exch(domain, 'edgeScratchReal3D') + call mpas_dmpar_field_halo_exch(domain, 'edgeScratchReal2D') + call mpas_dmpar_field_halo_exch(domain, 'edgeScratchReal1D') + call mpas_dmpar_field_halo_exch(domain, 'edgeScratchInt3D') + call mpas_dmpar_field_halo_exch(domain, 'edgeScratchInt2D') + call mpas_dmpar_field_halo_exch(domain, 'edgeScratchInt1D') + + ! Exchange scratch vertex fields + call mpas_dmpar_field_halo_exch(domain, 'vertexScratchReal5D') + call mpas_dmpar_field_halo_exch(domain, 'vertexScratchReal4D') + call mpas_dmpar_field_halo_exch(domain, 'vertexScratchReal3D') + call mpas_dmpar_field_halo_exch(domain, 'vertexScratchReal2D') + call mpas_dmpar_field_halo_exch(domain, 'vertexScratchReal1D') + call mpas_dmpar_field_halo_exch(domain, 'vertexScratchInt3D') + call mpas_dmpar_field_halo_exch(domain, 'vertexScratchInt2D') + call mpas_dmpar_field_halo_exch(domain, 'vertexScratchInt1D') + + call mpas_timer_stop('single halo exchanges') + + call mpas_threading_barrier() + + call test_core_halo_exch_validate_fields(domain, threadErrs, err) + + end subroutine test_core_halo_exch_single_field_test!}}} + + + !*********************************************************************** + ! + ! routine test_core_halo_exch_group_test + ! + !> \brief MPAS Test Core halo exchange group test + !> \author Doug Jacobsen + !> \date 01/05/2016 + !> \details + !> This routine performs tests related to halo exchanges using exchange groups. + ! + !----------------------------------------------------------------------- + subroutine test_core_halo_exch_group_test(domain, threadErrs, err)!{{{ + + type (domain_type), intent(inout) :: domain + integer, dimension(:), intent(out) :: threadErrs + integer, intent(out) :: err + + character (len=StrKIND) :: groupName + character (len=StrKIND) :: fieldName + + call mpas_timer_start('build exchange groups') + + groupName = 'cellPersistGroup' + call mpas_dmpar_exch_group_create(domain, groupName) + + fieldName = 'cellPersistInt1D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'cellPersistInt2D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'cellPersistInt3D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'cellPersistReal1D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'cellPersistReal2D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'cellPersistReal3D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'cellPersistReal4D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'cellPersistReal5D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + groupName = 'edgePersistGroup' + call mpas_dmpar_exch_group_create(domain, groupName) + + fieldName = 'edgePersistInt1D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'edgePersistInt2D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'edgePersistInt3D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'edgePersistReal1D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'edgePersistReal2D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'edgePersistReal3D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'edgePersistReal4D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'edgePersistReal5D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + groupName = 'vertexPersistGroup' + call mpas_dmpar_exch_group_create(domain, groupName) + + fieldName = 'vertexPersistInt1D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'vertexPersistInt2D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'vertexPersistInt3D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'vertexPersistReal1D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'vertexPersistReal2D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'vertexPersistReal3D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'vertexPersistReal4D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'vertexPersistReal5D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + groupName = 'cellScratchGroup' + call mpas_dmpar_exch_group_create(domain, groupName) + + fieldName = 'cellScratchInt1D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'cellScratchInt2D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'cellScratchInt3D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'cellScratchReal1D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'cellScratchReal2D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'cellScratchReal3D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'cellScratchReal4D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'cellScratchReal5D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + groupName = 'edgeScratchGroup' + call mpas_dmpar_exch_group_create(domain, groupName) + + fieldName = 'edgeScratchInt1D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'edgeScratchInt2D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'edgeScratchInt3D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'edgeScratchReal1D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'edgeScratchReal2D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'edgeScratchReal3D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'edgeScratchReal4D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'edgeScratchReal5D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + groupName = 'vertexScratchGroup' + call mpas_dmpar_exch_group_create(domain, groupName) + + fieldName = 'vertexScratchInt1D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'vertexScratchInt2D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'vertexScratchInt3D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'vertexScratchReal1D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'vertexScratchReal2D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'vertexScratchReal3D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'vertexScratchReal4D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + fieldName = 'vertexScratchReal5D' + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName) + + + call mpas_timer_stop('build exchange groups') + + call test_core_halo_exch_setup_fields(domain, threadErrs, err) + + call mpas_threading_barrier() + + call mpas_timer_start('group halo exchanges') + + groupName = 'cellPersistGroup' + call mpas_threading_barrier() + call mpas_dmpar_exch_group_begin_halo_exch(domain, groupName) + call mpas_dmpar_exch_group_local_halo_exch(domain, groupName) + call mpas_dmpar_exch_group_end_halo_exch(domain, groupName) + + groupName = 'edgePersistGroup' + call mpas_threading_barrier() + call mpas_dmpar_exch_group_begin_halo_exch(domain, groupName) + call mpas_dmpar_exch_group_local_halo_exch(domain, groupName) + call mpas_dmpar_exch_group_end_halo_exch(domain, groupName) + + groupName = 'vertexPersistGroup' + call mpas_threading_barrier() + call mpas_dmpar_exch_group_begin_halo_exch(domain, groupName) + call mpas_dmpar_exch_group_local_halo_exch(domain, groupName) + call mpas_dmpar_exch_group_end_halo_exch(domain, groupName) + + groupName = 'cellScratchGroup' + call mpas_threading_barrier() + call mpas_dmpar_exch_group_begin_halo_exch(domain, groupName) + call mpas_dmpar_exch_group_local_halo_exch(domain, groupName) + call mpas_dmpar_exch_group_end_halo_exch(domain, groupName) + + groupName = 'edgeScratchGroup' + call mpas_threading_barrier() + call mpas_dmpar_exch_group_begin_halo_exch(domain, groupName) + call mpas_dmpar_exch_group_local_halo_exch(domain, groupName) + call mpas_dmpar_exch_group_end_halo_exch(domain, groupName) + + groupName = 'vertexScratchGroup' + call mpas_threading_barrier() + call mpas_dmpar_exch_group_begin_halo_exch(domain, groupName) + call mpas_dmpar_exch_group_local_halo_exch(domain, groupName) + call mpas_dmpar_exch_group_end_halo_exch(domain, groupName) + + call mpas_timer_stop('group halo exchanges') + + call test_core_halo_exch_validate_fields(domain, threadErrs, err) + + end subroutine test_core_halo_exch_group_test!}}} + + + !*********************************************************************** + ! + ! routine test_core_halo_exch_setup_fields + ! + !> \brief MPAS Test Core halo exchange field setup routine + !> \author Doug Jacobsen + !> \date 01/06/2016 + !> \details + !> This routine sets up fields for a halo exchange test. It initializes + !> their values based on indexTo*ID, where * is cell, edge, or vertex. + !> Additionally, it allocates the scratch fields and fills them with the + !> correct values. + ! + !----------------------------------------------------------------------- + subroutine test_core_halo_exch_setup_fields(domain, threadErrs, err)!{{{ + + type (domain_type), intent(inout) :: domain + integer, dimension(:), intent(out) :: threadErrs + integer, intent(out) :: err + + type (block_type), pointer :: block + type (mpas_pool_type), pointer :: meshPool, haloExchTestPool + + type (field5DReal), pointer :: real5DField + type (field4DReal), pointer :: real4DField + type (field3DReal), pointer :: real3DField + type (field2DReal), pointer :: real2DField + type (field1DReal), pointer :: real1DField + type (field3DInteger), pointer :: int3DField + type (field2DInteger), pointer :: int2DField + type (field1DInteger), pointer :: int1DField + + real (kind=RKIND), dimension(:, :, :, :, :), pointer :: real5D + real (kind=RKIND), dimension(:, :, :, :), pointer :: real4D + real (kind=RKIND), dimension(:, :, :), pointer :: real3D + real (kind=RKIND), dimension(:, :), pointer :: real2D + real (kind=RKIND), dimension(:), pointer :: real1D + + real (kind=RKIND) :: realValue + integer :: integerValue + + integer, dimension(:, :, :), pointer :: int3D + integer, dimension(:, :), pointer :: int2D + integer, dimension(:), pointer :: int1D + + integer :: i, j, k, l, m + integer :: iDim1, iDim2, iDim3, iDim4, iDim5 + integer, pointer :: nCells, nEdges, nVertices + integer, pointer :: nCellsSolve, nEdgesSolve, nVerticesSolve + integer, dimension(:), pointer :: indexToCellID + integer, dimension(:), pointer :: indexToEdgeID + integer, dimension(:), pointer :: indexToVertexID + + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() + 1 + + threadErrs(threadNum) = 0 + + ! Allocate all scratch fields + ! Fill all fields with index values + block => domain % blocklist + do while ( associated(block) ) + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + call mpas_pool_get_subpool(block % structs, 'haloExchTest', haloExchTestPool) + + call mpas_pool_get_array(meshPool, 'indexToCellID', indexToCellID) + call mpas_pool_get_array(meshPool, 'indexToEdgeID', indexToEdgeID) + call mpas_pool_get_array(meshPool, 'indexToVertexID', indexToVertexID) + + call mpas_pool_get_dimension(meshPool, 'nCells', nCells) + call mpas_pool_get_dimension(meshPool, 'nEdges', nEdges) + call mpas_pool_get_dimension(meshPool, 'nVertices', nVertices) + call mpas_pool_get_dimension(meshPool, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(meshPool, 'nEdgesSolve', nEdgesSolve) + call mpas_pool_get_dimension(meshPool, 'nVerticesSolve', nVerticesSolve) + + ! Fill persistent cell fields + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal5D', real5D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal4D', real4D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal3D', real3D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal2D', real2D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal1D', real1D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistInt3D', int3D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistInt2D', int2D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistInt1D', int1D) + + iDim1 = nCellsSolve + iDim2 = size(real5D, dim=4) + iDim3 = size(real5D, dim=3) + iDim4 = size(real5D, dim=2) + iDim5 = size(real5D, dim=1) + + !$omp do schedule(runtime) private(j, k, l, m, realValue, integerValue) + do i = 1, iDim1 + realValue = real(indexToCellID(i), kind=RKIND) + integerValue = indexToCellID(i) + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + real5D(m, l, k, j, i) = realValue + end do + real4D(l, k, j, i) = realValue + end do + real3D(k, j, i) = realValue + int3D(k, j, i) = integerValue + end do + real2D(j, i) = realValue + int2D(j, i) = integerValue + end do + real1D(i) = realValue + int1D(i) = integerValue + end do + !$omp end do + + ! Fill persistent edge fields + call mpas_pool_get_array(haloExchTestPool, 'edgePersistReal5D', real5D) + call mpas_pool_get_array(haloExchTestPool, 'edgePersistReal4D', real4D) + call mpas_pool_get_array(haloExchTestPool, 'edgePersistReal3D', real3D) + call mpas_pool_get_array(haloExchTestPool, 'edgePersistReal2D', real2D) + call mpas_pool_get_array(haloExchTestPool, 'edgePersistReal1D', real1D) + call mpas_pool_get_array(haloExchTestPool, 'edgePersistInt3D', int3D) + call mpas_pool_get_array(haloExchTestPool, 'edgePersistInt2D', int2D) + call mpas_pool_get_array(haloExchTestPool, 'edgePersistInt1D', int1D) + + iDim1 = nEdgesSolve + iDim2 = size(real5D, dim=4) + iDim3 = size(real5D, dim=3) + iDim4 = size(real5D, dim=2) + iDim5 = size(real5D, dim=1) + + !$omp do schedule(runtime) private(j, k, l, m, realValue, integerValue) + do i = 1, iDim1 + realValue = real(indexToEdgeID(i), kind=RKIND) + integerValue = indexToEdgeID(i) + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + real5D(m, l, k, j, i) = realValue + end do + real4D(l, k, j, i) = realValue + end do + real3D(k, j, i) = realValue + int3D(k, j, i) = integerValue + end do + real2D(j, i) = realValue + int2D(j, i) = integerValue + end do + real1D(i) = realValue + int1D(i) = integerValue + end do + !$omp end do + + ! Fill persistent vertex fields + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistReal5D', real5D) + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistReal4D', real4D) + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistReal3D', real3D) + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistReal2D', real2D) + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistReal1D', real1D) + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistInt3D', int3D) + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistInt2D', int2D) + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistInt1D', int1D) + + iDim1 = nVerticesSolve + iDim2 = size(real5D, dim=4) + iDim3 = size(real5D, dim=3) + iDim4 = size(real5D, dim=2) + iDim5 = size(real5D, dim=1) + + !$omp do schedule(runtime) private(j, k, l, m, realValue, integerValue) + do i = 1, iDim1 + realValue = real(indexToVertexID(i), kind=RKIND) + integerValue = indexToVertexID(i) + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + real5D(m, l, k, j, i) = realValue + end do + real4D(l, k, j, i) = realValue + end do + real3D(k, j, i) = realValue + int3D(k, j, i) = integerValue + end do + real2D(j, i) = realValue + int2D(j, i) = integerValue + end do + real1D(i) = realValue + int1D(i) = integerValue + end do + !$omp end do + + ! Allocate scratch cell fields + call mpas_pool_get_field(haloExchTestPool, 'cellScratchReal5D', real5DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchReal4D', real4DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchReal3D', real3DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchReal2D', real2DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchReal1D', real1DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchInt3D', int3DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchInt2D', int2DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchInt1D', int1DField) + + call mpas_allocate_scratch_field(real5DField, .true.) + call mpas_allocate_scratch_field(real4DField, .true.) + call mpas_allocate_scratch_field(real3DField, .true.) + call mpas_allocate_scratch_field(real2DField, .true.) + call mpas_allocate_scratch_field(real1DField, .true.) + call mpas_allocate_scratch_field(int3DField, .true.) + call mpas_allocate_scratch_field(int2DField, .true.) + call mpas_allocate_scratch_field(int1DField, .true.) + + call mpas_threading_barrier() + + call mpas_pool_get_array(haloExchTestPool, 'cellScratchReal5D', real5D) + call mpas_pool_get_array(haloExchTestPool, 'cellScratchReal4D', real4D) + call mpas_pool_get_array(haloExchTestPool, 'cellScratchReal3D', real3D) + call mpas_pool_get_array(haloExchTestPool, 'cellScratchReal2D', real2D) + call mpas_pool_get_array(haloExchTestPool, 'cellScratchReal1D', real1D) + call mpas_pool_get_array(haloExchTestPool, 'cellScratchInt3D', int3D) + call mpas_pool_get_array(haloExchTestPool, 'cellScratchInt2D', int2D) + call mpas_pool_get_array(haloExchTestPool, 'cellScratchInt1D', int1D) + + iDim1 = nCellsSolve + iDim2 = size(real5D, dim=4) + iDim3 = size(real5D, dim=3) + iDim4 = size(real5D, dim=2) + iDim5 = size(real5D, dim=1) + + !$omp do schedule(runtime) private(j, k, l, m, realValue, integerValue) + do i = 1, iDim1 + realValue = real(indexToCellID(i), kind=RKIND) + integerValue = indexToCellID(i) + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + real5D(m, l, k, j, i) = realValue + end do + real4D(l, k, j, i) = realValue + end do + real3D(k, j, i) = realValue + int3D(k, j, i) = integerValue + end do + real2D(j, i) = realValue + int2D(j, i) = integerValue + end do + real1D(i) = realValue + int1D(i) = integerValue + end do + !$omp end do + + ! Allocate scratch edge fields + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchReal5D', real5DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchReal4D', real4DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchReal3D', real3DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchReal2D', real2DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchReal1D', real1DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchInt3D', int3DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchInt2D', int2DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchInt1D', int1DField) + + call mpas_allocate_scratch_field(real5DField, .true.) + call mpas_allocate_scratch_field(real4DField, .true.) + call mpas_allocate_scratch_field(real3DField, .true.) + call mpas_allocate_scratch_field(real2DField, .true.) + call mpas_allocate_scratch_field(real1DField, .true.) + call mpas_allocate_scratch_field(int3DField, .true.) + call mpas_allocate_scratch_field(int2DField, .true.) + call mpas_allocate_scratch_field(int1DField, .true.) + + call mpas_threading_barrier() + + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchReal5D', real5D) + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchReal4D', real4D) + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchReal3D', real3D) + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchReal2D', real2D) + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchReal1D', real1D) + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchInt3D', int3D) + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchInt2D', int2D) + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchInt1D', int1D) + + iDim1 = nEdgesSolve + iDim2 = size(real5D, dim=4) + iDim3 = size(real5D, dim=3) + iDim4 = size(real5D, dim=2) + iDim5 = size(real5D, dim=1) + + !$omp do schedule(runtime) private(j, k, l, m, realValue, integerValue) + do i = 1, iDim1 + realValue = real(indexToEdgeID(i), kind=RKIND) + integerValue = indexToEdgeID(i) + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + real5D(m, l, k, j, i) = realValue + end do + real4D(l, k, j, i) = realValue + end do + real3D(k, j, i) = realValue + int3D(k, j, i) = integerValue + end do + real2D(j, i) = realValue + int2D(j, i) = integerValue + end do + real1D(i) = realValue + int1D(i) = integerValue + end do + !$omp end do + + ! Allocate scratch vertex fields + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchReal5D', real5DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchReal4D', real4DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchReal3D', real3DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchReal2D', real2DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchReal1D', real1DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchInt3D', int3DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchInt2D', int2DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchInt1D', int1DField) + + call mpas_allocate_scratch_field(real5DField, .true.) + call mpas_allocate_scratch_field(real4DField, .true.) + call mpas_allocate_scratch_field(real3DField, .true.) + call mpas_allocate_scratch_field(real2DField, .true.) + call mpas_allocate_scratch_field(real1DField, .true.) + call mpas_allocate_scratch_field(int3DField, .true.) + call mpas_allocate_scratch_field(int2DField, .true.) + call mpas_allocate_scratch_field(int1DField, .true.) + + call mpas_threading_barrier() + + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchReal5D', real5D) + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchReal4D', real4D) + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchReal3D', real3D) + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchReal2D', real2D) + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchReal1D', real1D) + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchInt3D', int3D) + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchInt2D', int2D) + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchInt1D', int1D) + + iDim1 = nVerticesSolve + iDim2 = size(real5D, dim=4) + iDim3 = size(real5D, dim=3) + iDim4 = size(real5D, dim=2) + iDim5 = size(real5D, dim=1) + + !$omp do schedule(runtime) private(j, k, l, m, realValue, integerValue) + do i = 1, iDim1 + realValue = real(indexToVertexID(i), kind=RKIND) + integerValue = indexToVertexID(i) + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + real5D(m, l, k, j, i) = realValue + end do + real4D(l, k, j, i) = realValue + end do + real3D(k, j, i) = realValue + int3D(k, j, i) = integerValue + end do + real2D(j, i) = realValue + int2D(j, i) = integerValue + end do + real1D(i) = realValue + int1D(i) = integerValue + end do + !$omp end do + + block => block % next + end do + + call mpas_threading_barrier() + + end subroutine test_core_halo_exch_setup_fields!}}} + + !*********************************************************************** + ! + ! routine test_core_halo_exch_validate_fields + ! + !> \brief MPAS Test Core halo exchange validate fields routine + !> \author Doug Jacobsen + !> \date 01/06/2016 + !> \details + !> This routine validates the fields after a halo exchange. It checks that + !> every entry in the field has the expected value, when compared with + !> indexTo*ID, when indexTo*ID is cell, edge, or vertex. + !> If the validation fails, a 1 is return, and if the validation passes a 0 is return. + !> Additionally, this routine deallocates scratch fields. + ! + !----------------------------------------------------------------------- + subroutine test_core_halo_exch_validate_fields(domain, threadErrs, err)!{{{ + + type (domain_type), intent(inout) :: domain + integer, dimension(:), intent(out) :: threadErrs + integer, intent(out) :: err + + type (block_type), pointer :: block + type (mpas_pool_type), pointer :: meshPool, haloExchTestPool + + type (field5DReal), pointer :: real5DField + type (field4DReal), pointer :: real4DField + type (field3DReal), pointer :: real3DField + type (field2DReal), pointer :: real2DField + type (field1DReal), pointer :: real1DField + type (field3DInteger), pointer :: int3DField + type (field2DInteger), pointer :: int2DField + type (field1DInteger), pointer :: int1DField + + real (kind=RKIND), dimension(:, :, :, :, :), pointer :: real5D + real (kind=RKIND), dimension(:, :, :, :), pointer :: real4D + real (kind=RKIND), dimension(:, :, :), pointer :: real3D + real (kind=RKIND), dimension(:, :), pointer :: real2D + real (kind=RKIND), dimension(:), pointer :: real1D + + real (kind=RKIND) :: realValue + integer :: integerValue + + integer, dimension(:, :, :), pointer :: int3D + integer, dimension(:, :), pointer :: int2D + integer, dimension(:), pointer :: int1D + + integer :: i, j, k, l, m + integer :: iDim1, iDim2, iDim3, iDim4, iDim5 + integer, pointer :: nCells, nEdges, nVertices + integer, pointer :: nCellsSolve, nEdgesSolve, nVerticesSolve + integer, dimension(:), pointer :: indexToCellID + integer, dimension(:), pointer :: indexToEdgeID + integer, dimension(:), pointer :: indexToVertexID + + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() + 1 + + threadErrs(threadNum) = 0 + + ! Validate results + block => domain % blocklist + do while ( associated(block) ) + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + call mpas_pool_get_subpool(block % structs, 'haloExchTest', haloExchTestPool) + + call mpas_pool_get_array(meshPool, 'indexToCellID', indexToCellID) + call mpas_pool_get_array(meshPool, 'indexToEdgeID', indexToEdgeID) + call mpas_pool_get_array(meshPool, 'indexToVertexID', indexToVertexID) + + call mpas_pool_get_dimension(meshPool, 'nCells', nCells) + call mpas_pool_get_dimension(meshPool, 'nEdges', nEdges) + call mpas_pool_get_dimension(meshPool, 'nVertices', nVertices) + call mpas_pool_get_dimension(meshPool, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(meshPool, 'nEdgesSolve', nEdgesSolve) + call mpas_pool_get_dimension(meshPool, 'nVerticesSolve', nVerticesSolve) + + indexToCellID( nCells + 1 ) = 0 + indexToEdgeID( nEdges + 1 ) = 0 + indexToVertexID( nVertices + 1 ) = 0 + + ! Compare persistent cell fields + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal5D', real5D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal4D', real4D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal3D', real3D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal2D', real2D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal1D', real1D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistInt3D', int3D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistInt2D', int2D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistInt1D', int1D) + + ! Subtract index from all peristent cell fields + iDim1 = size(real5D, dim=5) + iDim2 = size(real5D, dim=4) + iDim3 = size(real5D, dim=3) + iDim4 = size(real5D, dim=2) + iDim5 = size(real5D, dim=1) + + !$omp do schedule(runtime) private(j, k, l, m, realValue, integerValue) + do i = 1, iDim1 + realValue = real(indexToCellID(i), kind=RKIND) + integerValue = indexToCellID(i) + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + real5D(m, l, k, j, i) = real5D(m, l, k, j, i) - realValue + end do + real4D(l, k, j, i) = real4D(l, k, j, i) - realValue + end do + real3D(k, j, i) = real3D(k, j, i) - realValue + int3D(k, j, i) = int3D(k, j, i) - integerValue + end do + real2D(j, i) = real2D(j, i) - realValue + int2D(j, i) = int2D(j, i) - integerValue + end do + real1D(i) = real1D(i) - realValue + int1D(i) = int1D(i) - integerValue + end do + !$omp end do + + ! Validate that all differences are zero. +#ifdef HALO_EXCH_DEBUG + write(stderrUnit, *) ' -- Testing persistent cell fields' +#endif + if ( sum(real5D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real4D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real3D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real2D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real1D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int3D) /= 0 ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int2D) /= 0 ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int1D) /= 0 ) then + threadErrs(threadNum) = 1 + end if +#ifdef HALO_EXCH_DEBUG + write(stderrUnit, *) ' -- Test result: ', threadErrs(threadNum) +#endif + + ! Compare persistent edge fields + call mpas_pool_get_array(haloExchTestPool, 'edgePersistReal5D', real5D) + call mpas_pool_get_array(haloExchTestPool, 'edgePersistReal4D', real4D) + call mpas_pool_get_array(haloExchTestPool, 'edgePersistReal3D', real3D) + call mpas_pool_get_array(haloExchTestPool, 'edgePersistReal2D', real2D) + call mpas_pool_get_array(haloExchTestPool, 'edgePersistReal1D', real1D) + call mpas_pool_get_array(haloExchTestPool, 'edgePersistInt3D', int3D) + call mpas_pool_get_array(haloExchTestPool, 'edgePersistInt2D', int2D) + call mpas_pool_get_array(haloExchTestPool, 'edgePersistInt1D', int1D) + + ! Subtract index from all peristent edge fields + iDim1 = size(real5D, dim=5) + iDim2 = size(real5D, dim=4) + iDim3 = size(real5D, dim=3) + iDim4 = size(real5D, dim=2) + iDim5 = size(real5D, dim=1) + + !$omp do schedule(runtime) private(j, k, l, m, realValue, integerValue) + do i = 1, iDim1 + realValue = real(indexToEdgeID(i), kind=RKIND) + integerValue = indexToEdgeID(i) + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + real5D(m, l, k, j, i) = real5D(m, l, k, j, i) - realValue + end do + real4D(l, k, j, i) = real4D(l, k, j, i) - realValue + end do + real3D(k, j, i) = real3D(k, j, i) - realValue + int3D(k, j, i) = int3D(k, j, i) - integerValue + end do + real2D(j, i) = real2D(j, i) - realValue + int2D(j, i) = int2D(j, i) - integerValue + end do + real1D(i) = real1D(i) - realValue + int1D(i) = int1D(i) - integerValue + end do + !$omp end do + + ! Validate that all differences are zero. +#ifdef HALO_EXCH_DEBUG + write(stderrUnit, *) ' -- Testing persistent Edge fields' +#endif + if ( sum(real5D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real4D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real3D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real2D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real1D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int3D) /= 0 ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int2D) /= 0 ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int1D) /= 0 ) then + threadErrs(threadNum) = 1 + end if +#ifdef HALO_EXCH_DEBUG + write(stderrUnit, *) ' -- Test result: ', threadErrs(threadNum) +#endif + + ! Compare persistent vertex fields + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistReal5D', real5D) + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistReal4D', real4D) + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistReal3D', real3D) + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistReal2D', real2D) + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistReal1D', real1D) + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistInt3D', int3D) + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistInt2D', int2D) + call mpas_pool_get_array(haloExchTestPool, 'vertexPersistInt1D', int1D) + + ! Subtract index from all peristent vertex fields + iDim1 = size(real5D, dim=5) + iDim2 = size(real5D, dim=4) + iDim3 = size(real5D, dim=3) + iDim4 = size(real5D, dim=2) + iDim5 = size(real5D, dim=1) + + !$omp do schedule(runtime) private(j, k, l, m, realValue, integerValue) + do i = 1, iDim1 + realValue = real(indexToVertexID(i), kind=RKIND) + integerValue = indexToVertexID(i) + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + real5D(m, l, k, j, i) = real5D(m, l, k, j, i) - realValue + end do + real4D(l, k, j, i) = real4D(l, k, j, i) - realValue + end do + real3D(k, j, i) = real3D(k, j, i) - realValue + int3D(k, j, i) = int3D(k, j, i) - integerValue + end do + real2D(j, i) = real2D(j, i) - realValue + int2D(j, i) = int2D(j, i) - integerValue + end do + real1D(i) = real1D(i) - realValue + int1D(i) = int1D(i) - integerValue + end do + !$omp end do + + ! Validate that all differences are zero. +#ifdef HALO_EXCH_DEBUG + write(stderrUnit, *) ' -- Testing persistent Vertex fields' +#endif + if ( sum(real5D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real4D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real3D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real2D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real1D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int3D) /= 0 ) then + threadErrs(threadNum) = 1 + end if +#ifdef HALO_EXCH_DEBUG + write(stderrUnit, *) ' -- Test result: ', threadErrs(threadNum) +#endif + + ! Compare scratch cell fields + call mpas_pool_get_array(haloExchTestPool, 'cellScratchReal5D', real5D) + call mpas_pool_get_array(haloExchTestPool, 'cellScratchReal4D', real4D) + call mpas_pool_get_array(haloExchTestPool, 'cellScratchReal3D', real3D) + call mpas_pool_get_array(haloExchTestPool, 'cellScratchReal2D', real2D) + call mpas_pool_get_array(haloExchTestPool, 'cellScratchReal1D', real1D) + call mpas_pool_get_array(haloExchTestPool, 'cellScratchInt3D', int3D) + call mpas_pool_get_array(haloExchTestPool, 'cellScratchInt2D', int2D) + call mpas_pool_get_array(haloExchTestPool, 'cellScratchInt1D', int1D) + + ! Subtract index from all peristent cell fields + iDim1 = size(real5D, dim=5) + iDim2 = size(real5D, dim=4) + iDim3 = size(real5D, dim=3) + iDim4 = size(real5D, dim=2) + iDim5 = size(real5D, dim=1) + + !$omp do schedule(runtime) private(j, k, l, m, realValue, integerValue) + do i = 1, iDim1 + realValue = real(indexToCellID(i), kind=RKIND) + integerValue = indexToCellID(i) + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + real5D(m, l, k, j, i) = real5D(m, l, k, j, i) - realValue + end do + real4D(l, k, j, i) = real4D(l, k, j, i) - realValue + end do + real3D(k, j, i) = real3D(k, j, i) - realValue + int3D(k, j, i) = int3D(k, j, i) - integerValue + end do + real2D(j, i) = real2D(j, i) - realValue + int2D(j, i) = int2D(j, i) - integerValue + end do + real1D(i) = real1D(i) - realValue + int1D(i) = int1D(i) - integerValue + end do + !$omp end do + + ! Validate that all differences are zero. +#ifdef HALO_EXCH_DEBUG + write(stderrUnit, *) ' -- Testing scratch cell fields' +#endif + if ( sum(real5D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real4D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real3D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real2D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real1D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int3D) /= 0 ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int2D) /= 0 ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int1D) /= 0 ) then + threadErrs(threadNum) = 1 + end if +#ifdef HALO_EXCH_DEBUG + write(stderrUnit, *) ' -- Test result: ', threadErrs(threadNum) +#endif + + ! Compare scratch edge fields + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchReal5D', real5D) + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchReal4D', real4D) + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchReal3D', real3D) + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchReal2D', real2D) + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchReal1D', real1D) + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchInt3D', int3D) + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchInt2D', int2D) + call mpas_pool_get_array(haloExchTestPool, 'edgeScratchInt1D', int1D) + + ! Subtract index from all peristent edge fields + iDim1 = size(real5D, dim=5) + iDim2 = size(real5D, dim=4) + iDim3 = size(real5D, dim=3) + iDim4 = size(real5D, dim=2) + iDim5 = size(real5D, dim=1) + + !$omp do schedule(runtime) private(j, k, l, m) + do i = 1, iDim1 + realValue = real(indexToEdgeID(i), kind=RKIND) + integerValue = indexToEdgeID(i) + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + real5D(m, l, k, j, i) = real5D(m, l, k, j, i) - realValue + end do + real4D(l, k, j, i) = real4D(l, k, j, i) - realValue + end do + real3D(k, j, i) = real3D(k, j, i) - realValue + int3D(k, j, i) = int3D(k, j, i) - integerValue + end do + real2D(j, i) = real2D(j, i) - realValue + int2D(j, i) = int2D(j, i) - integerValue + end do + real1D(i) = real1D(i) - realValue + int1D(i) = int1D(i) - integerValue + end do + !$omp end do + + ! Validate that all differences are zero. +#ifdef HALO_EXCH_DEBUG + write(stderrUnit, *) ' -- Testing scratch edge fields' +#endif + if ( sum(real5D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real4D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real3D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real2D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real1D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int3D) /= 0 ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int2D) /= 0 ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int1D) /= 0 ) then + threadErrs(threadNum) = 1 + end if +#ifdef HALO_EXCH_DEBUG + write(stderrUnit, *) ' -- Test result: ', threadErrs(threadNum) +#endif + + ! Compare scratch vertex fields + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchReal5D', real5D) + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchReal4D', real4D) + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchReal3D', real3D) + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchReal2D', real2D) + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchReal1D', real1D) + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchInt3D', int3D) + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchInt2D', int2D) + call mpas_pool_get_array(haloExchTestPool, 'vertexScratchInt1D', int1D) + + ! Subtract index from all peristent vertex fields + iDim1 = size(real5D, dim=5) + iDim2 = size(real5D, dim=4) + iDim3 = size(real5D, dim=3) + iDim4 = size(real5D, dim=2) + iDim5 = size(real5D, dim=1) + + !$omp do schedule(runtime) private(j, k, l, m, realValue, integerValue) + do i = 1, iDim1 + realValue = real(indexToVertexID(i), kind=RKIND) + integerValue = indexToVertexID(i) + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim4 + real5D(m, l, k, j, i) = real5D(m, l, k, j, i) - realValue + end do + real4D(l, k, j, i) = real4D(l, k, j, i) - realValue + end do + real3D(k, j, i) = real3D(k, j, i) - realValue + int3D(k, j, i) = int3D(k, j, i) - integerValue + end do + real2D(j, i) = real2D(j, i) - realValue + int2D(j, i) = int2D(j, i) - integerValue + end do + real1D(i) = real1D(i) - realValue + int1D(i) = int1D(i) - integerValue + end do + !$omp end do + + ! Validate that all differences are zero. +#ifdef HALO_EXCH_DEBUG + write(stderrUnit, *) ' -- Testing scratch vertex fields' +#endif + if ( sum(real5D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real4D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real3D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real2D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(real1D) /= 0.0_RKIND ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int3D) /= 0 ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int2D) /= 0 ) then + threadErrs(threadNum) = 1 + end if + + if ( sum(int1D) /= 0 ) then + threadErrs(threadNum) = 1 + end if +#ifdef HALO_EXCH_DEBUG + write(stderrUnit, *) ' -- Test result: ', threadErrs(threadNum) +#endif + + block => block % next + end do + + call mpas_threading_barrier() + + ! Deallocatae all scratch fields + block => domain % blocklist + do while ( associated(block) ) + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + + ! Allocate scratch cell fields + call mpas_pool_get_field(haloExchTestPool, 'cellScratchReal5D', real5DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchReal4D', real4DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchReal3D', real3DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchReal2D', real2DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchReal1D', real1DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchInt3D', int3DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchInt2D', int2DField) + call mpas_pool_get_field(haloExchTestPool, 'cellScratchInt1D', int1DField) + + call mpas_deallocate_scratch_field(real5DField, .true.) + call mpas_deallocate_scratch_field(real4DField, .true.) + call mpas_deallocate_scratch_field(real3DField, .true.) + call mpas_deallocate_scratch_field(real2DField, .true.) + call mpas_deallocate_scratch_field(real1DField, .true.) + call mpas_deallocate_scratch_field(int3DField, .true.) + call mpas_deallocate_scratch_field(int2DField, .true.) + call mpas_deallocate_scratch_field(int1DField, .true.) + + ! Allocate scratch edge fields + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchReal5D', real5DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchReal4D', real4DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchReal3D', real3DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchReal2D', real2DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchReal1D', real1DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchInt3D', int3DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchInt2D', int2DField) + call mpas_pool_get_field(haloExchTestPool, 'edgeScratchInt1D', int1DField) + + call mpas_deallocate_scratch_field(real5DField, .true.) + call mpas_deallocate_scratch_field(real4DField, .true.) + call mpas_deallocate_scratch_field(real3DField, .true.) + call mpas_deallocate_scratch_field(real2DField, .true.) + call mpas_deallocate_scratch_field(real1DField, .true.) + call mpas_deallocate_scratch_field(int3DField, .true.) + call mpas_deallocate_scratch_field(int2DField, .true.) + call mpas_deallocate_scratch_field(int1DField, .true.) + + ! Allocate scratch vertex fields + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchReal5D', real5DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchReal4D', real4DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchReal3D', real3DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchReal2D', real2DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchReal1D', real1DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchInt3D', int3DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchInt2D', int2DField) + call mpas_pool_get_field(haloExchTestPool, 'vertexScratchInt1D', int1DField) + + call mpas_deallocate_scratch_field(real5DField, .true.) + call mpas_deallocate_scratch_field(real4DField, .true.) + call mpas_deallocate_scratch_field(real3DField, .true.) + call mpas_deallocate_scratch_field(real2DField, .true.) + call mpas_deallocate_scratch_field(real1DField, .true.) + call mpas_deallocate_scratch_field(int3DField, .true.) + call mpas_deallocate_scratch_field(int2DField, .true.) + call mpas_deallocate_scratch_field(int1DField, .true.) + + block => block % next + end do + + err = sum(threadErrs) + + end subroutine test_core_halo_exch_validate_fields!}}} + + +end module test_core_halo_exch diff --git a/src/core_test/mpas_test_core_interface.F b/src/core_test/mpas_test_core_interface.F index e8f90f32a6..435f8bd46c 100644 --- a/src/core_test/mpas_test_core_interface.F +++ b/src/core_test/mpas_test_core_interface.F @@ -12,6 +12,7 @@ module test_core_interface use mpas_dmpar use mpas_constants use mpas_io_units + use mpas_attlist use test_core public @@ -42,6 +43,7 @@ subroutine test_setup_core(core)!{{{ core % get_mesh_stream => test_get_mesh_stream core % setup_immutable_streams => test_setup_immutable_streams core % setup_derived_dimensions => test_setup_derived_dimensions + core % setup_decomposed_dimensions => test_setup_decomposed_dimensions core % setup_block => test_setup_block core % setup_namelist => test_setup_namelists @@ -86,7 +88,7 @@ end subroutine test_setup_domain!}}} !> *not* allocated until after this routine is called. ! !----------------------------------------------------------------------- - function test_setup_packages(configPool, packagePool) result(ierr)!{{{ + function test_setup_packages(configPool, packagePool, iocontext) result(ierr)!{{{ use mpas_derived_types @@ -94,6 +96,7 @@ function test_setup_packages(configPool, packagePool) result(ierr)!{{{ type (mpas_pool_type), intent(inout) :: configPool type (mpas_pool_type), intent(inout) :: packagePool + type (mpas_io_context_type), intent(inout) :: iocontext integer :: ierr ierr = 0 diff --git a/src/core_test/mpas_test_core_sorting.F b/src/core_test/mpas_test_core_sorting.F new file mode 100644 index 0000000000..35fcc5c379 --- /dev/null +++ b/src/core_test/mpas_test_core_sorting.F @@ -0,0 +1,140 @@ +! Copyright (c) 2016, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the +! LICENSE file distributed with this code, or at +! http://mpas-dev.github.com/license.html . +! +module test_core_sorting + + use mpas_derived_types + use mpas_io_units + + private + + public :: test_core_test_sorting + + contains + + !*********************************************************************** + ! + ! routine test_core_test_sorting + ! + !> \brief Tests performance of sorting routines in framework + !> \author Michael Duda + !> \date 1 September 2016 + !> \details + !> This routine tests the performance of the sorting routines in + !> the MPAS framework on various input patterns. Timing information for + !> each pattern is written to stderrUnit. + ! + !----------------------------------------------------------------------- + subroutine test_core_test_sorting(domain, err) + + use mpas_sort, only : mpas_quicksort + use mpas_timer, only : mpas_timer_start, mpas_timer_stop + + implicit none + + type (domain_type), intent(inout) :: domain + integer, intent(out) :: err + + integer :: i, l, r, pivot + real (kind=RKIND) :: temp + integer, parameter :: n = 122880 + real (kind=RKIND), dimension(n) :: vals + integer :: count_start, count_stop, count_rate + + + write(stderrUnit,*) 'Sorting tests:' + + ! + ! Random values + ! + do i=1,n + call random_number(vals(i)) + end do + call system_clock(count=count_start) + call mpas_timer_start('sorting: random') + call mpas_quicksort(n, vals) + call mpas_timer_stop('sorting: random') + call system_clock(count=count_stop) + call system_clock(count_rate=count_rate) + write(stderrUnit,*) ' random input timing (quicksort): ', real(count_stop - count_start) / real(count_rate) + + ! + ! Values in sorted, ascending order + ! + do i=1,n + vals(i) = real(i) + end do + call system_clock(count=count_start) + call mpas_timer_start('sorting: sorted') + call mpas_quicksort(n, vals) + call mpas_timer_stop('sorting: sorted') + call system_clock(count=count_stop) + call system_clock(count_rate=count_rate) + write(stderrUnit,*) ' sorted input timing (quicksort): ', real(count_stop - count_start) / real(count_rate) + + ! + ! Values in sorted, descending order + ! + do i=1,n + vals(i) = real(n-i+1) + end do + call system_clock(count=count_start) + call mpas_timer_start('sorting: reverse sorted') + call mpas_quicksort(n, vals) + call mpas_timer_stop('sorting: reverse sorted') + call system_clock(count=count_stop) + call system_clock(count_rate=count_rate) + write(stderrUnit,*) ' reverse sorted input timing (quicksort): ', real(count_stop - count_start) / real(count_rate) + + ! + ! Constant values + ! + vals(:) = 42.0 + call system_clock(count=count_start) + call mpas_timer_start('sorting: constant') + call mpas_quicksort(n, vals) + call mpas_timer_stop('sorting: constant') + call system_clock(count=count_stop) + call system_clock(count_rate=count_rate) + write(stderrUnit,*) ' constant input timing (quicksort): ', real(count_stop - count_start) / real(count_rate) + + ! + ! Construct theoretically worst-case input for quicksort based on current + ! method of chosing pivot element + ! + do i=1,n + vals(i) = real(i) + end do + r = n + do l=n-1,1,-1 + ! Swap l and r + temp = vals(l) + vals(l) = vals(r) + vals(r) = temp + + pivot = (l+r)/2 + + ! Swap pivot and r + temp = vals(pivot) + vals(pivot) = vals(r) + vals(r) = temp + end do + + call system_clock(count=count_start) + call mpas_timer_start('sorting: worst-case') + call mpas_quicksort(n, vals) + call mpas_timer_stop('sorting: worst-case') + call system_clock(count=count_stop) + call system_clock(count_rate=count_rate) + write(stderrUnit,*) ' ''worst-case'' input timing (quicksort): ', real(count_stop - count_start) / real(count_rate) + + err = 0 + + end subroutine test_core_test_sorting + +end module test_core_sorting diff --git a/src/core_test/mpas_test_core_streams.F b/src/core_test/mpas_test_core_streams.F new file mode 100644 index 0000000000..b056fa0801 --- /dev/null +++ b/src/core_test/mpas_test_core_streams.F @@ -0,0 +1,396 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +module test_core_streams + + use mpas_derived_types + use mpas_pool_routines + use mpas_field_routines + use mpas_dmpar + use mpas_io_units + + + contains + + + !*********************************************************************** + ! + ! routine test_core_streams_test + ! + !> \brief tests reading/writing single- and double-precision streams + !> \author Michael Duda + !> \date 2 February 2016 + !> \details + !> This routine verifies that both single- and double-precision streams + !> can be written via the mpas_stream_manager module. Only real-valued + !> fields are read/written, under the assumption that any precision + !> changes would not affect logical, character, or integer data. + !> + !> It is assumed that there is a var_struct containing fields + !> cellPersistReal{0,1,2,3,4,5}D that can be added to the streams + !> that are created by this routine. + ! + !----------------------------------------------------------------------- + subroutine test_core_streams_test(domain, threadErrs, ierr) + + use mpas_stream_manager + + implicit none + + type (domain_type), intent(inout) :: domain + integer, dimension(:), intent(out) :: threadErrs + integer, intent(out) :: ierr + + integer :: i, j, k, l, m + integer :: iDim1, iDim2, iDim3, iDim4, iDim5 + integer :: local_ierr + integer, pointer :: nCellsSolve + integer, dimension(:), pointer :: indexToCellID + type (block_type), pointer :: block + type (mpas_pool_type), pointer :: meshPool + type (mpas_pool_type), pointer :: haloExchTestPool + + real (kind=RKIND), dimension(:, :, :, :, :), pointer :: real5D + real (kind=RKIND), dimension(:, :, :, :), pointer :: real4D + real (kind=RKIND), dimension(:, :, :), pointer :: real3D + real (kind=RKIND), dimension(:, :), pointer :: real2D + real (kind=RKIND), dimension(:), pointer :: real1D + real (kind=RKIND), pointer :: real0D + real (kind=RKIND) :: realValue + real (kind=RKIND), dimension(5) :: d + + + ierr = 0 + + block => domain % blocklist + call mpas_pool_get_subpool(block % structs, 'mesh', meshPool) + call mpas_pool_get_subpool(block % structs, 'haloExchTest', haloExchTestPool) + call mpas_pool_get_dimension(meshPool, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_array(meshPool, 'indexToCellID', indexToCellID) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal5D', real5D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal4D', real4D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal3D', real3D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal2D', real2D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal1D', real1D) + call mpas_pool_get_array(haloExchTestPool, 'cellPersistReal0D', real0D) + + + ! + ! Initialize fields before writing them out + ! + iDim1 = nCellsSolve + iDim2 = size(real5D, dim=4) + iDim3 = size(real5D, dim=3) + iDim4 = size(real5D, dim=2) + iDim5 = size(real5D, dim=1) + + do i = 1, iDim1 + realValue = real(indexToCellID(i), kind=RKIND)/10.0_RKIND + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + real5D(m, l, k, j, i) = realValue + end do + real4D(l, k, j, i) = realValue + end do + real3D(k, j, i) = realValue + end do + real2D(j, i) = realValue + end do + real1D(i) = realValue + end do + real0D = 2.0_RKIND * asin(1.0_RKIND) + + + ! + ! Create output streams and write real-valued fields in both r4 and r8 precision + ! + call MPAS_stream_mgr_create_stream(domain % streamManager, 'R4_stream', MPAS_STREAM_OUTPUT, 'r4_data.nc', & + realPrecision=MPAS_IO_SINGLE_PRECISION, & + clobberMode=MPAS_STREAM_CLOBBER_TRUNCATE, & + ierr=local_ierr) + if (local_ierr /= MPAS_STREAM_MGR_NOERR) then + ierr = 1 + write(stderrUnit,*) 'Error creating ''R4_stream''.' + return + end if + + call MPAS_stream_mgr_create_stream(domain % streamManager, 'R8_stream', MPAS_STREAM_OUTPUT, 'r8_data.nc', & + realPrecision=MPAS_IO_DOUBLE_PRECISION, & + clobberMode=MPAS_STREAM_CLOBBER_TRUNCATE, & + ierr=local_ierr) + if (local_ierr /= MPAS_STREAM_MGR_NOERR) then + ierr = 1 + write(stderrUnit,*) 'Error creating ''R8_stream''.' + return + end if + + call MPAS_stream_mgr_create_stream(domain % streamManager, 'R8_time_stream', MPAS_STREAM_OUTPUT, 'r8_data.$Y-$M-$D-$d_$h.$m.$s.nc', & + filenameInterval="0001-00-00_00:00:00", & + realPrecision=MPAS_IO_DOUBLE_PRECISION, & + clobberMode=MPAS_STREAM_CLOBBER_TRUNCATE, & + ierr=local_ierr) + if (local_ierr /= MPAS_STREAM_MGR_NOERR) then + ierr = 1 + write(stderrUnit,*) 'Error creating ''R8_stream''.' + return + end if + + + call MPAS_stream_mgr_add_field(domain % streamManager, 'R4_stream', 'xtime', ierr=local_ierr) + call MPAS_stream_mgr_add_field(domain % streamManager, 'R4_stream', 'cellPersistReal5D', ierr=local_ierr) + call MPAS_stream_mgr_add_field(domain % streamManager, 'R4_stream', 'cellPersistReal4D', ierr=local_ierr) + call MPAS_stream_mgr_add_field(domain % streamManager, 'R4_stream', 'cellPersistReal3D', ierr=local_ierr) + call MPAS_stream_mgr_add_field(domain % streamManager, 'R4_stream', 'cellPersistReal2D', ierr=local_ierr) + call MPAS_stream_mgr_add_field(domain % streamManager, 'R4_stream', 'cellPersistReal1D', ierr=local_ierr) + call MPAS_stream_mgr_add_field(domain % streamManager, 'R4_stream', 'cellPersistReal0D', ierr=local_ierr) + + call MPAS_stream_mgr_add_stream_fields(domain % streamManager, 'R8_stream', 'R4_stream', ierr=local_ierr) + + call MPAS_stream_mgr_add_stream_fields(domain % streamManager, 'R8_time_stream', 'R4_stream', ierr=local_ierr) + + call MPAS_stream_mgr_write(domain % streamManager, 'R4_stream', forceWriteNow=.true., ierr=local_ierr) + call MPAS_stream_mgr_write(domain % streamManager, 'R8_stream', forceWriteNow=.true., ierr=local_ierr) + call MPAS_stream_mgr_write(domain % streamManager, 'R8_time_stream', forceWriteNow=.true., ierr=local_ierr) + call MPAS_stream_mgr_write(domain % streamManager, 'R8_time_stream', forceWriteNow=.true., writeTime="9999-01-01_00:00:00", ierr=local_ierr) + + + ! + ! Destroy streams + ! + call MPAS_stream_mgr_destroy_stream(domain % streamManager, 'R4_stream', local_ierr) + if (local_ierr /= MPAS_STREAM_MGR_NOERR) then + ierr = 1 + write(stderrUnit,*) 'Error destroying ''R4_stream''.' + return + end if + + call MPAS_stream_mgr_destroy_stream(domain % streamManager, 'R8_stream', local_ierr) + if (local_ierr /= MPAS_STREAM_MGR_NOERR) then + ierr = 1 + write(stderrUnit,*) 'Error destroying ''R8_stream''.' + return + end if + + + + + ! + ! Create input streams and read real-valued fields in both r4 and r8 precision + ! + call MPAS_stream_mgr_create_stream(domain % streamManager, 'R4_stream', MPAS_STREAM_INPUT, 'r4_data.nc', & + realPrecision=MPAS_IO_SINGLE_PRECISION, & + clobberMode=MPAS_STREAM_CLOBBER_TRUNCATE, & + ierr=local_ierr) + if (local_ierr /= MPAS_STREAM_MGR_NOERR) then + ierr = 1 + write(stderrUnit,*) 'Error creating ''R4_stream''.' + return + end if + + call MPAS_stream_mgr_create_stream(domain % streamManager, 'R8_stream', MPAS_STREAM_INPUT, 'r8_data.nc', & + realPrecision=MPAS_IO_DOUBLE_PRECISION, & + clobberMode=MPAS_STREAM_CLOBBER_TRUNCATE, & + ierr=local_ierr) + if (local_ierr /= MPAS_STREAM_MGR_NOERR) then + ierr = 1 + write(stderrUnit,*) 'Error creating ''R8_stream''.' + return + end if + + call MPAS_stream_mgr_add_field(domain % streamManager, 'R4_stream', 'xtime', ierr=local_ierr) + call MPAS_stream_mgr_add_field(domain % streamManager, 'R4_stream', 'cellPersistReal5D', ierr=local_ierr) + call MPAS_stream_mgr_add_field(domain % streamManager, 'R4_stream', 'cellPersistReal4D', ierr=local_ierr) + call MPAS_stream_mgr_add_field(domain % streamManager, 'R4_stream', 'cellPersistReal3D', ierr=local_ierr) + call MPAS_stream_mgr_add_field(domain % streamManager, 'R4_stream', 'cellPersistReal2D', ierr=local_ierr) + call MPAS_stream_mgr_add_field(domain % streamManager, 'R4_stream', 'cellPersistReal1D', ierr=local_ierr) + call MPAS_stream_mgr_add_field(domain % streamManager, 'R4_stream', 'cellPersistReal0D', ierr=local_ierr) + + call MPAS_stream_mgr_add_stream_fields(domain % streamManager, 'R8_stream', 'R4_stream', ierr=local_ierr) + + ! + ! Reset contents of real arrays before reading back from streams + ! + do i = 1, iDim1 + realValue = -1.0 + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + real5D(m, l, k, j, i) = realValue + end do + real4D(l, k, j, i) = realValue + end do + real3D(k, j, i) = realValue + end do + real2D(j, i) = realValue + end do + real1D(i) = realValue + end do + real0D = -1.0 + + call MPAS_stream_mgr_read(domain % streamManager, 'R4_stream', rightNow=.true., ierr=local_ierr) + + ! + ! Verify contents of real arrays after reading back from streams + ! + d(:) = 0.0 + do i = 1, iDim1 + realValue = real(real(indexToCellID(i), kind=RKIND)/10.0_RKIND,kind=R4KIND) + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + d(5) = d(5) + (real5D(m, l, k, j, i) - realValue) + end do + d(4) = d(4) + (real4D(l, k, j, i) - realValue) + end do + d(3) = d(3) + (real3D(k, j, i) - realValue) + end do + d(2) = d(2) + (real2D(j, i) - realValue) + end do + d(1) = d(1) + (real1D(i) - realValue) + end do + if (real0D /= real(2.0_RKIND * asin(1.0_RKIND),kind=R4KIND)) then + write(stderrUnit,'(a)') ' Difference detected when reading back 0-d field from single-precision stream - FAILURE' + ierr = ierr + 1 + else + write(stderrUnit,'(a)') ' Reading 0-d field from single-precision stream - SUCCESS' + end if + if (d(1) /= 0.0) then + write(stderrUnit,'(a)') ' Difference detected when reading back 1-d field from single-precision stream - FAILURE' + ierr = ierr + 1 + else + write(stderrUnit,'(a)') ' Reading 1-d field from single-precision stream - SUCCESS' + end if + if (d(2) /= 0.0) then + write(stderrUnit,'(a)') ' Difference detected when reading back 2-d field from single-precision stream - FAILURE' + ierr = ierr + 1 + else + write(stderrUnit,'(a)') ' Reading 2-d field from single-precision stream - SUCCESS' + end if + if (d(3) /= 0.0) then + write(stderrUnit,'(a)') ' Difference detected when reading back 3-d field from single-precision stream - FAILURE' + ierr = ierr + 1 + else + write(stderrUnit,'(a)') ' Reading 3-d field from single-precision stream - SUCCESS' + end if + if (d(4) /= 0.0) then + write(stderrUnit,'(a)') ' Difference detected when reading back 4-d field from single-precision stream - FAILURE' + ierr = ierr + 1 + else + write(stderrUnit,'(a)') ' Reading 4-d field from single-precision stream - SUCCESS' + end if + if (d(5) /= 0.0) then + write(stderrUnit,'(a)') ' Difference detected when reading back 5-d field from single-precision stream - FAILURE' + ierr = ierr + 1 + else + write(stderrUnit,'(a)') ' Reading 5-d field from single-precision stream - SUCCESS' + end if + + ! + ! Reset contents of real arrays before reading back from streams + ! + do i = 1, iDim1 + realValue = -1.0 + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + real5D(m, l, k, j, i) = realValue + end do + real4D(l, k, j, i) = realValue + end do + real3D(k, j, i) = realValue + end do + real2D(j, i) = realValue + end do + real1D(i) = realValue + end do + real0D = -1.0 + + call MPAS_stream_mgr_read(domain % streamManager, 'R8_stream', rightNow=.true., ierr=local_ierr) + + ! + ! Verify contents of real arrays after reading back from streams + ! + d(:) = 0.0 + do i = 1, iDim1 + realValue = real(indexToCellID(i), kind=RKIND)/10.0_RKIND + do j = 1, iDim2 + do k = 1, iDim3 + do l = 1, iDim4 + do m = 1, iDim5 + d(5) = d(5) + (real5D(m, l, k, j, i) - realValue) + end do + d(4) = d(4) + (real4D(l, k, j, i) - realValue) + end do + d(3) = d(3) + (real3D(k, j, i) - realValue) + end do + d(2) = d(2) + (real2D(j, i) - realValue) + end do + d(1) = d(1) + (real1D(i) - realValue) + end do + if (real0D /= (2.0_RKIND * asin(1.0_RKIND))) then + write(stderrUnit,'(a)') ' Difference detected when reading back 0-d field from double-precision stream - FAILURE' + ierr = ierr + 1 + else + write(stderrUnit,'(a)') ' Reading 0-d field from double-precision stream - SUCCESS' + end if + if (d(1) /= 0.0) then + write(stderrUnit,'(a)') ' Difference detected when reading back 1-d field from double-precision stream - FAILURE' + ierr = ierr + 1 + else + write(stderrUnit,'(a)') ' Reading 1-d field from double-precision stream - SUCCESS' + end if + if (d(2) /= 0.0) then + write(stderrUnit,'(a)') ' Difference detected when reading back 2-d field from double-precision stream - FAILURE' + ierr = ierr + 1 + else + write(stderrUnit,'(a)') ' Reading 2-d field from double-precision stream - SUCCESS' + end if + if (d(3) /= 0.0) then + write(stderrUnit,'(a)') ' Difference detected when reading back 3-d field from double-precision stream - FAILURE' + ierr = ierr + 1 + else + write(stderrUnit,'(a)') ' Reading 3-d field from double-precision stream - SUCCESS' + end if + if (d(4) /= 0.0) then + write(stderrUnit,'(a)') ' Difference detected when reading back 4-d field from double-precision stream - FAILURE' + ierr = ierr + 1 + else + write(stderrUnit,'(a)') ' Reading 4-d field from double-precision stream - SUCCESS' + end if + if (d(5) /= 0.0) then + write(stderrUnit,'(a)') ' Difference detected when reading back 5-d field from double-precision stream - FAILURE' + ierr = ierr + 1 + else + write(stderrUnit,'(a)') ' Reading 5-d field from double-precision stream - SUCCESS' + end if + + + ! + ! Destroy streams + ! + call MPAS_stream_mgr_destroy_stream(domain % streamManager, 'R4_stream', local_ierr) + if (local_ierr /= MPAS_STREAM_MGR_NOERR) then + ierr = 1 + write(stderrUnit,*) 'Error destroying ''R4_stream''.' + return + end if + + call MPAS_stream_mgr_destroy_stream(domain % streamManager, 'R8_stream', local_ierr) + if (local_ierr /= MPAS_STREAM_MGR_NOERR) then + ierr = 1 + write(stderrUnit,*) 'Error destroying ''R8_stream''.' + return + end if + + end subroutine test_core_streams_test + +end module test_core_streams diff --git a/src/core_test/mpas_test_core_timekeeping_tests.F b/src/core_test/mpas_test_core_timekeeping_tests.F new file mode 100644 index 0000000000..d5769b2c39 --- /dev/null +++ b/src/core_test/mpas_test_core_timekeeping_tests.F @@ -0,0 +1,172 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +module test_core_timekeeping_tests + + use mpas_derived_types + use mpas_pool_routines + use mpas_field_routines + use mpas_dmpar + use mpas_threading + use mpas_io_units + use mpas_timer + use mpas_timekeeping + + implicit none + private + + public :: test_core_test_intervals + + contains + + !*********************************************************************** + ! + ! routine test_core_test_intervals + ! + !> \brief MPAS Test Core timekeeping interval tests + !> \author Doug Jacobsen + !> \date 08/23/2016 + !> \details + !> This routine performs tests related to timekeeping intervals + ! + !----------------------------------------------------------------------- + subroutine test_core_test_intervals(domain, threadErrs, err)!{{{ + + type (domain_type), intent(inout) :: domain + integer, dimension(:), intent(out) :: threadErrs + integer, intent(out) :: err + + character (len=StrKIND) :: int1_str, int2_str + character (len=StrKIND) :: ref_str + + integer :: threadNum + integer :: iErr, err_tmp + integer :: divs + + call mpas_timer_start('timekeeping tests') + + threadNum = mpas_threading_get_thread_num() + err = 0 + + if ( threadNum == 0 ) then + write(stderrUnit, *) ' Performing time interval tests' + + write(stderrUnit, *) ' Test 1:' + call test_core_interval_test('0001-01-01_00:00:00', '0000-01-00_10:00:00', '0001_00:00:00', 31, '0000_10:00:00', err_tmp) + if ( err_tmp == 0 ) then + write(stderrUnit, *) ' Result: PASSED' + else + write(stderrUnit, *) ' * Result: FAILED' + end if + + write(stderrUnit, *) ' Test 2:' + call test_core_interval_test('0001-01-01_00:00:00', '0000-01-00_00:00:00', '0001_00:00:00', 31, '0000_00:00:00', err_tmp) + if ( err_tmp == 0 ) then + write(stderrUnit, *) ' Result: PASSED' + else + write(stderrUnit, *) ' * Result: FAILED' + end if + + write(stderrUnit, *) ' Test 3:' + call test_core_interval_test('0001-02-01_00:00:00', '0000-01-00_10:00:00', '0001_00:00:00', 28, '0000_10:00:00', err_tmp) + if ( err_tmp == 0 ) then + write(stderrUnit, *) ' Result: PASSED' + else + write(stderrUnit, *) ' * Result: FAILED' + end if + + write(stderrUnit, *) ' Test 4:' + call test_core_interval_test('0001-02-01_00:00:00', '0000-01-00_00:00:00', '0001_00:00:00', 28, '0000_00:00:00', err_tmp) + if ( err_tmp == 0 ) then + write(stderrUnit, *) ' Result: PASSED' + else + write(stderrUnit, *) ' * Result: FAILED' + end if + + write(stderrUnit, *) ' Test 5:' + call test_core_interval_test('0001-01-01_00:00:00', '0000-00-00_01:00:00', '0000_00:30:00', 2, '0000_00:00:00', err_tmp) + if ( err_tmp == 0 ) then + write(stderrUnit, *) ' Result: PASSED' + else + write(stderrUnit, *) ' * Result: FAILED' + end if + + write(stderrUnit, *) ' Test 6:' + call test_core_interval_test('0001-01-01_00:00:00', '0001-01-00_00:00:00', '0001-00-00_00:00:00', 1, '0000-00-31_00:00:00', err_tmp) + if ( err_tmp == 0 ) then + write(stderrUnit, *) ' Result: PASSED' + else + write(stderrUnit, *) ' * Result: FAILED' + end if + + + + write(stderrUnit, *) ' Completed time interval tests' + end if + + call mpas_timer_stop('timekeeping tests') + + end subroutine test_core_test_intervals!}}} + + subroutine test_core_interval_test(ref_str, int1_str, int2_str, expected_divs, expected_remainder_str, ierr)!{{{ + character (len=*), intent(in) :: ref_str, int1_str, int2_str + integer, intent(in) :: expected_divs + character (len=*), intent(in) :: expected_remainder_str + integer, intent(out) :: ierr + + integer :: divs + + character (len=StrKIND) :: remainder_str + + type (mpas_time_type) :: ref_time + type (mpas_timeinterval_type) :: int1, int2, remainder + type (mpas_timeinterval_type) :: expected_remainder + + integer :: err_tmp + + ierr = 0 + + write(stderrUnit, *) '' + write(stderrUnit, *) ' Testing time intervals:' + write(stderrUnit, *) ' Reference time: ', trim(ref_str) + write(stderrUnit, *) ' Interval 1: ', trim(int1_str) + write(stderrUnit, *) ' Interval 2: ', trim(int2_str) + + call mpas_set_time(ref_time, dateTimeString=ref_str, ierr=err_tmp) + call mpas_set_timeinterval(int1, timeString=int1_str, ierr=err_tmp) + call mpas_set_timeinterval(int2, timeString=int2_str, ierr=err_tmp) + call mpas_set_timeinterval(expected_remainder, timeString=expected_remainder_str, ierr=err_tmp) + + write(stderrUnit, *) ' -- Calling interval division' + + call mpas_interval_division(ref_time, int1, int2, divs, remainder) + + call mpas_get_timeinterval(remainder, startTimeIn=ref_time, timeString=remainder_str) + + write(stderrUnit, *) ' Interval Division summary' + write(stderrUnit, *) ' Divisions: ', divs + write(stderrUnit, *) ' Remainder: ', trim(remainder_str) + write(stderrUnit, *) '' + + if ( divs == expected_divs ) then + write(stderrUnit, *) ' Div Test: PASSED' + else + write(stderrUnit, *) ' ** Div Test: FAILED' + ierr = 1 + end if + + if ( remainder == expected_remainder ) then + write(stderrUnit, *) ' Remainder Test: PASSED' + else + write(stderrUnit, *) ' ** Remainder Test: FAILED' + ierr = 1 + end if + + + end subroutine test_core_interval_test!}}} + +end module test_core_timekeeping_tests diff --git a/src/driver/Makefile b/src/driver/Makefile index 7a803c72c1..8e041c8f70 100644 --- a/src/driver/Makefile +++ b/src/driver/Makefile @@ -23,7 +23,7 @@ clean: $(RM) $@ $*.mod ifeq "$(GEN_F90)" "true" $(CPP) $(CPPFLAGS) $(CPPINCLUDES) $< > $*.f90 - $(FC) $(FFLAGS) -c $*.f90 $(FCINCLUDES) -I../framework -I../core_$(CORE) -I../external/esmf_time_f90 + $(FC) $(FFLAGS) -c $*.f90 $(FCINCLUDES) -I../framework -I../operators -I../core_$(CORE) -I../external/esmf_time_f90 else - $(FC) $(CPPFLAGS) $(FFLAGS) -c $*.F $(CPPINCLUDES) $(FCINCLUDES) -I../framework -I../core_$(CORE) -I../external/esmf_time_f90 + $(FC) $(CPPFLAGS) $(FFLAGS) -c $*.F $(CPPINCLUDES) $(FCINCLUDES) -I../framework -I../operators -I../core_$(CORE) -I../external/esmf_time_f90 endif diff --git a/src/driver/mpas_subdriver.F b/src/driver/mpas_subdriver.F index 795acf48fc..1a3528f9f6 100644 --- a/src/driver/mpas_subdriver.F +++ b/src/driver/mpas_subdriver.F @@ -9,6 +9,7 @@ module mpas_subdriver use mpas_framework use mpas_kind_types + use mpas_abort, only : mpas_dmpar_global_abort use mpas_derived_types, only: dm_info, domain_type @@ -56,6 +57,7 @@ subroutine mpas_init() character(len=StrKIND) :: argument, namelistFile, streamsFile character(len=StrKIND) :: timeStamp integer :: ierr + integer :: blockID character(kind=c_char), dimension(StrKIND+1) :: c_filename ! StrKIND+1 for C null-termination character integer(kind=c_int) :: c_comm @@ -202,7 +204,7 @@ end subroutine xml_stream_get_attributes call mpas_dmpar_global_abort('ERROR: Package definition failed for core ' // trim(domain_ptr % core % coreName)) end if - ierr = domain_ptr % core % setup_packages(domain_ptr % configs, domain_ptr % packages) + ierr = domain_ptr % core % setup_packages(domain_ptr % configs, domain_ptr % packages, domain_ptr % iocontext) if ( ierr /= 0 ) then call mpas_dmpar_global_abort('ERROR: Package setup failed for core ' // trim(domain_ptr % core % coreName)) end if @@ -267,12 +269,13 @@ end subroutine xml_stream_get_attributes call mpas_get_time(start_time, dateTimeString=ref_time_temp, ierr=ierr) end if + blockID = -1 if ( trim(filename_interval_temp) == 'none' ) then - call mpas_expand_string(ref_time_temp, mesh_filename_temp, mesh_filename) + call mpas_expand_string(ref_time_temp, blockID, mesh_filename_temp, mesh_filename) else call mpas_set_time(ref_time, dateTimeString=ref_time_temp, ierr=ierr) call mpas_set_timeInterval(filename_interval, timeString=filename_interval_temp, ierr=ierr) - call mpas_build_stream_filename(ref_time, start_time, filename_interval, mesh_filename_temp, mesh_filename, ierr) + call mpas_build_stream_filename(ref_time, start_time, filename_interval, mesh_filename_temp, blockID, mesh_filename, ierr) end if write(stderrUnit, *) ' ** Attempting to bootstrap MPAS framework using stream: ', trim(mesh_stream) call mpas_bootstrap_framework_phase1(domain_ptr, mesh_filename, mesh_iotype) @@ -280,7 +283,8 @@ end subroutine xml_stream_get_attributes ! ! Set up run-time streams ! - call MPAS_stream_mgr_init(domain_ptr % streamManager, domain_ptr % clock, domain_ptr % blocklist % allFields, domain_ptr % packages, domain_ptr % blocklist % allStructs) + call MPAS_stream_mgr_init(domain_ptr % streamManager, domain_ptr % ioContext, domain_ptr % clock, & + domain_ptr % blocklist % allFields, domain_ptr % packages, domain_ptr % blocklist % allStructs) call add_stream_attributes(domain_ptr) @@ -345,7 +349,9 @@ subroutine mpas_finalize() end if call mpas_timer_stop('total time') + call mpas_timer_write_header() call mpas_timer_write() + call mpas_timer_finalize(domain_ptr) ! @@ -400,6 +406,9 @@ subroutine add_stream_attributes(domain) call MPAS_stream_mgr_add_att(domain % streamManager, 'on_a_sphere', domain % on_a_sphere) call MPAS_stream_mgr_add_att(domain % streamManager, 'sphere_radius', domain % sphere_radius) + call MPAS_stream_mgr_add_att(domain % streamManager, 'is_periodic', domain % is_periodic) + call MPAS_stream_mgr_add_att(domain % streamManager, 'x_period', domain % x_period) + call MPAS_stream_mgr_add_att(domain % streamManager, 'y_period', domain % y_period) ! DWJ 10/01/2014: Eventually add the real history attribute, for now (due to length restrictions) ! add a shortened version. ! call MPAS_stream_mgr_add_att(domain % streamManager, 'history', domain % history) diff --git a/src/external/ezxml/Makefile b/src/external/ezxml/Makefile index f07d9effd7..b7d1633363 100644 --- a/src/external/ezxml/Makefile +++ b/src/external/ezxml/Makefile @@ -3,11 +3,12 @@ OBJS = ezxml.o all: clean - $(MAKE) library + $(MAKE) -j 1 library library: $(OBJS) clean: $(RM) *.o *.i -.c.o: $(CC) $(CFLAGS) $(CPPFLAGS) -c $< +.c.o: + $(CC) $(CFLAGS) $(CPPFLAGS) -c $< diff --git a/src/framework/Makefile b/src/framework/Makefile index d1138a4bff..8b9947db78 100644 --- a/src/framework/Makefile +++ b/src/framework/Makefile @@ -1,12 +1,12 @@ .SUFFIXES: .F .c .o DEPS := $(shell find ../core_$(CORE)/ -type f -name "*.xml" ! -name "*processed.xml") +TYPE_DEPS := $(shell find . -type f -name "*_types.inc") OBJS = mpas_kind_types.o \ mpas_framework.o \ mpas_timer.o \ mpas_timekeeping.o \ - mpas_configure.o \ mpas_constants.o \ mpas_attlist.o \ mpas_hash.o \ @@ -14,7 +14,9 @@ OBJS = mpas_kind_types.o \ mpas_block_decomp.o \ mpas_block_creator.o \ mpas_dmpar.o \ + mpas_abort.o \ mpas_decomp.o \ + mpas_threading.o \ mpas_io.o \ mpas_io_streams.o \ mpas_bootstrapping.o \ @@ -31,24 +33,24 @@ OBJS = mpas_kind_types.o \ mpas_field_routines.o \ mpas_pool_routines.o \ xml_stream_parser.o \ + regex_matching.o \ + mpas_field_accessor.o all: framework $(DEPS) framework: $(OBJS) ar -ru libframework.a $(OBJS) ../external/ezxml/ezxml.o -mpas_configure.o: mpas_dmpar.o mpas_io_units.o mpas_pool_routines.o $(DEPS) - mpas_framework.o: mpas_dmpar.o \ mpas_io.o \ mpas_derived_types.o \ mpas_domain_routines.o \ mpas_field_routines.o \ mpas_pool_routines.o \ - mpas_configure.o \ mpas_timer.o \ mpas_sort.o \ mpas_io_units.o \ + mpas_block_decomp.o \ mpas_stream_manager.o \ mpas_c_interfacing.o @@ -56,47 +58,51 @@ mpas_constants.o: mpas_kind_types.o mpas_attlist.o: mpas_kind_types.o mpas_io_units.o mpas_derived_types.o -mpas_derived_types.o: mpas_kind_types.o mpas_constants.o +mpas_derived_types.o: mpas_kind_types.o mpas_constants.o $(TYPE_DEPS) -mpas_domain_routines.o: mpas_derived_types.o mpas_pool_routines.o +mpas_domain_routines.o: mpas_derived_types.o mpas_pool_routines.o mpas_dmpar.o -mpas_field_routines.o: mpas_derived_types.o +mpas_field_routines.o: mpas_derived_types.o duplicate_field_array.inc duplicate_field_scalar.inc mpas_threading.o mpas_attlist.o -mpas_pool_routines.o: mpas_derived_types.o mpas_field_routines.o +mpas_pool_routines.o: mpas_derived_types.o mpas_field_routines.o mpas_threading.o mpas_abort.o mpas_decomp.o: mpas_derived_types.o mpas_stream_manager.o mpas_hash.o : mpas_derived_types.o -mpas_dmpar.o: mpas_sort.o streams.o mpas_kind_types.o mpas_derived_types.o mpas_hash.o mpas_io_units.o +mpas_dmpar.o: mpas_sort.o streams.o mpas_kind_types.o mpas_derived_types.o mpas_hash.o mpas_io_units.o mpas_threading.o mpas_pool_routines.o mpas_sort.o: mpas_kind_types.o mpas_io_units.o -mpas_timekeeping.o: mpas_kind_types.o mpas_io_units.o mpas_derived_types.o +mpas_timekeeping.o: mpas_kind_types.o mpas_io_units.o mpas_derived_types.o mpas_dmpar.o mpas_threading.o mpas_abort.o -mpas_timer.o: mpas_kind_types.o mpas_io_units.o mpas_dmpar.o +mpas_timer.o: mpas_kind_types.o mpas_io_units.o mpas_dmpar.o mpas_threading.o mpas_abort.o -mpas_block_decomp.o: mpas_derived_types.o mpas_hash.o mpas_configure.o mpas_io_units.o +mpas_block_decomp.o: mpas_derived_types.o mpas_hash.o mpas_io_units.o mpas_dmpar.o -mpas_block_creator.o: mpas_dmpar.o mpas_hash.o mpas_sort.o mpas_configure.o mpas_io_units.o mpas_block_decomp.o mpas_stream_manager.o mpas_decomp.o $(DEPS) +mpas_block_creator.o: mpas_dmpar.o mpas_hash.o mpas_sort.o mpas_io_units.o mpas_block_decomp.o mpas_stream_manager.o mpas_decomp.o mpas_abort.o $(DEPS) -mpas_io.o: mpas_dmpar.o mpas_io_units.o mpas_attlist.o +mpas_io.o: mpas_dmpar.o mpas_io_units.o mpas_attlist.o mpas_abort.o -mpas_io_streams.o: mpas_attlist.o mpas_derived_types.o mpas_timekeeping.o mpas_io.o mpas_io_units.o mpas_pool_routines.o $(DEPS) +mpas_io_streams.o: mpas_attlist.o mpas_derived_types.o mpas_timekeeping.o mpas_io.o mpas_io_units.o mpas_pool_routines.o add_field_indices.inc $(DEPS) -mpas_bootstrapping.o: mpas_derived_types.o mpas_dmpar.o mpas_block_decomp.o mpas_block_creator.o mpas_sort.o mpas_configure.o mpas_timekeeping.o mpas_io_streams.o mpas_io_units.o mpas_stream_manager.o random_id.o $(DEPS) +mpas_bootstrapping.o: mpas_derived_types.o mpas_dmpar.o mpas_block_decomp.o mpas_block_creator.o mpas_sort.o mpas_timekeeping.o mpas_io_streams.o mpas_io_units.o mpas_stream_manager.o random_id.o mpas_abort.o $(DEPS) mpas_io_units.o: mpas_kind_types.o -mpas_stream_list.o: mpas_derived_types.o mpas_kind_types.o mpas_io_units.o mpas_io_streams.o mpas_timekeeping.o +mpas_threading.o: mpas_kind_types.o mpas_io_units.o + +mpas_stream_list.o: mpas_derived_types.o mpas_kind_types.o mpas_io_units.o mpas_io_streams.o mpas_timekeeping.o regex_matching.o -mpas_stream_manager.o: mpas_io_streams.o mpas_timekeeping.o mpas_derived_types.o mpas_io_units.o mpas_kind_types.o mpas_c_interfacing.o mpas_stream_list.o mpas_dmpar.o mpas_io.o +mpas_stream_manager.o: mpas_io_streams.o mpas_timekeeping.o mpas_derived_types.o mpas_io_units.o mpas_kind_types.o mpas_c_interfacing.o mpas_stream_list.o mpas_dmpar.o mpas_io.o mpas_threading.o mpas_abort.o -mpas_forcing.o: mpas_derived_types.o mpas_timekeeping.o mpas_io_streams.o mpas_stream_manager.o +mpas_forcing.o: mpas_derived_types.o mpas_timekeeping.o mpas_io_streams.o mpas_stream_manager.o mpas_abort.o xml_stream_parser.o: xml_stream_parser.c $(CC) $(CFLAGS) $(CPPFLAGS) $(CPPINCLUDES) -I../external/ezxml -c xml_stream_parser.c +mpas_field_accessor.o: mpas_derived_types.o mpas_kind_types.o mpas_pool_routines.o mpas_io_units.o + clean: $(RM) *.o *.mod *.f90 libframework.a @# Certain systems with intel compilers generate *.i files diff --git a/src/framework/add_field_indices.inc b/src/framework/add_field_indices.inc index 56c7c798a4..87e1031941 100644 --- a/src/framework/add_field_indices.inc +++ b/src/framework/add_field_indices.inc @@ -2,45 +2,49 @@ totalDimSize = 0 field_ptr => field if ( field % isDecomposed ) then + meshFieldDim = .false. if (trim(field % dimNames(idim)) == 'nCells') then -!write(0,*) '... outer dimension is nCells' - allocate(indices(0)) - do while (associated(field_ptr)) - call mpas_pool_get_array(field_ptr % block % allFields, 'indexToCellID', indexArray) - call mpas_pool_get_dimension(field_ptr % block % dimensions, 'nCellsSolve', indexDimension) - - call mergeArrays(indices, indexArray(1:indexDimension)) - totalDimSize = totalDimSize + indexDimension - - field_ptr => field_ptr % next - end do - call mpas_dmpar_sum_int(field % block % domain % dminfo, totalDimSize, globalDimSize) + elementName = 'Cell' + elementNamePlural = 'Cells' + meshFieldDim = .true. else if (trim(field % dimNames(idim)) == 'nEdges') then -!write(0,*) '... outer dimension is nEdges' + elementName = 'Edge' + elementNamePlural = 'Edges' + meshFieldDim = .true. + else if (trim(field % dimNames(idim)) == 'nVertices') then + elementName = 'Vertex' + elementNamePlural = 'Vertices' + meshFieldDim = .true. + end if + + if ( meshFieldDim ) then allocate(indices(0)) - do while (associated(field_ptr)) - call mpas_pool_get_array(field_ptr % block % allFields, 'indexToEdgeID', indexArray) - call mpas_pool_get_dimension(field_ptr % block % dimensions, 'nEdgesSolve', indexDimension) + if ( .not. stream % blockWrite ) then + do while (associated(field_ptr)) + call mpas_pool_get_array(field_ptr % block % allFields, 'indexTo' // trim(elementName) // 'ID', indexArray) + call mpas_pool_get_dimension(field_ptr % block % dimensions, 'n' // trim(elementNamePlural) // 'Solve', & + indexDimension) - call mergeArrays(indices, indexArray(1:indexDimension)) - totalDimSize = totalDimSize + indexDimension + call mergeArrays(indices, indexArray(1:indexDimension)) + totalDimSize = totalDimSize + indexDimension - field_ptr => field_ptr % next - end do - call mpas_dmpar_sum_int(field % block % domain % dminfo, totalDimSize, globalDimSize) - else if (trim(field % dimNames(idim)) == 'nVertices') then -!write(0,*) '... outer dimension is nVertices' - allocate(indices(0)) - do while (associated(field_ptr)) - call mpas_pool_get_array(field_ptr % block % allFields, 'indexToVertexID', indexArray) - call mpas_pool_get_dimension(field_ptr % block % dimensions, 'nVerticesSolve', indexDimension) + field_ptr => field_ptr % next + end do + call mpas_dmpar_sum_int(stream % fileHandle % ioContext % dminfo, totalDimSize, globalDimSize) + else + call mpas_pool_get_dimension(field_ptr % block % dimensions, 'n' // trim(elementNamePlural), & + indexDimension) + allocate(indexArray(indexDimension)) + do i = 1, indexDimension + indexArray(i) = i + end do call mergeArrays(indices, indexArray(1:indexDimension)) totalDimSize = totalDimSize + indexDimension + globalDimSize = totalDimSize - field_ptr => field_ptr % next - end do - call mpas_dmpar_sum_int(field % block % domain % dminfo, totalDimSize, globalDimSize) + deallocate(indexArray) + end if else ! Use defined decomposition allocate(indices(0)) do while (associated(field_ptr)) @@ -51,16 +55,15 @@ call mergeArrays(indices, indexArray(1:indexDimension)) totalDimSize = totalDimSize + indexDimension - field_ptr => field_ptr % next end do - call mpas_dmpar_sum_int(field % block % domain % dminfo, totalDimSize, globalDimSize) + call mpas_dmpar_sum_int(stream % fileHandle % ioContext % dminfo, totalDimSize, globalDimSize) end if else globalDimSize = field % dimSizes(idim) totalDimSize = globalDimSize - if (field % block % domain % dminfo % my_proc_id == IO_NODE) then + if (stream % fileHandle % ioContext % dminfo % my_proc_id == IO_NODE) then allocate(indices(field % dimSizes(ndims))) do i=1,field % dimSizes(ndims) indices(i) = i diff --git a/src/framework/duplicate_field_array.inc b/src/framework/duplicate_field_array.inc index 86cebd8e90..17a14cb1f4 100644 --- a/src/framework/duplicate_field_array.inc +++ b/src/framework/duplicate_field_array.inc @@ -1,60 +1,80 @@ - if (present(copy_array_only)) then - local_copy_only = copy_array_only - else - local_copy_only = .false. - end if - - - src_cursor => src - if (.not. local_copy_only) then - nullify(dst_cursor) - else - dst_cursor => dst - end if + threadNum = mpas_threading_get_thread_num() -! do while (associated(src_cursor)) + if ( threadNum == 0 ) then + if (present(copy_array_only)) then + local_copy_only = copy_array_only + else + local_copy_only = .false. + end if + + src_cursor => src if (.not. local_copy_only) then - if (associated(dst_cursor)) then - allocate(dst_cursor % next) - dst_cursor % next % prev => dst_cursor - dst_cursor => dst_cursor % next - else - allocate(dst) - nullify(dst % prev) - dst_cursor => dst - end if - nullify(dst_cursor % next) + nullify(dst_cursor) + else + dst_cursor => dst end if +! do while (associated(src_cursor)) - ! - ! Fill in members of dst_cursor from src_cursor - ! - if (.not. local_copy_only) then - dst_cursor % block => src_cursor % block - dst_cursor % fieldName = src_cursor % fieldName - dst_cursor % isVarArray = src_cursor % isVarArray - if ( associated( src_cursor % constituentNames ) ) then - allocate(dst_cursor % constituentNames(size(src_cursor % constituentNames, dim=1))) - dst_cursor % constituentNames(:) = src_cursor % constituentNames(:) + if (.not. local_copy_only) then + if (associated(dst_cursor)) then + allocate(dst_cursor % next) + dst_cursor % next % prev => dst_cursor + dst_cursor => dst_cursor % next + else + allocate(dst) + nullify(dst % prev) + dst_cursor => dst + end if + nullify(dst_cursor % next) end if - dst_cursor % isPersistent = src_cursor % isPersistent - dst_cursor % isActive = src_cursor % isActive - dst_cursor % isDecomposed = src_cursor % isDecomposed - dst_cursor % hasTimeDimension = src_cursor % hasTimeDimension - dst_cursor % dimNames = src_cursor % dimNames - dst_cursor % dimSizes = src_cursor % dimSizes - dst_cursor % sendList => src_cursor % sendList - dst_cursor % recvList => src_cursor % recvList - dst_cursor % copyList => src_cursor % copyList - call mpas_allocate_mold(dst_cursor % array, src_cursor % array) ! Until we get F2008 support for ALLOCATE(A,MOLD=B) - end if - dst_cursor % array = src_cursor % array -! src_cursor => src_cursor % next -! if (.not. local_copy_only) then -! dst_cursor => dst_cursor % next -! end if -! end do + ! + ! Fill in members of dst_cursor from src_cursor + ! + if (.not. local_copy_only) then + dst_cursor % block => src_cursor % block + dst_cursor % fieldName = src_cursor % fieldName + dst_cursor % isVarArray = src_cursor % isVarArray + if ( associated( src_cursor % constituentNames ) ) then + allocate(dst_cursor % constituentNames(size(src_cursor % constituentNames, dim=1))) + dst_cursor % constituentNames(:) = src_cursor % constituentNames(:) + end if + dst_cursor % isPersistent = src_cursor % isPersistent + dst_cursor % isActive = src_cursor % isActive + dst_cursor % isDecomposed = src_cursor % isDecomposed + dst_cursor % hasTimeDimension = src_cursor % hasTimeDimension + dst_cursor % dimNames = src_cursor % dimNames + dst_cursor % dimSizes = src_cursor % dimSizes + dst_cursor % sendList => src_cursor % sendList + dst_cursor % recvList => src_cursor % recvList + dst_cursor % copyList => src_cursor % copyList + if ( dst_cursor % isActive ) then + call mpas_allocate_mold(dst_cursor % array, src_cursor % array) ! Until we get F2008 support for ALLOCATE(A,MOLD=B) + else + nullify(dst_cursor % array) + end if + + if ( associated(dst_cursor % attLists) ) then + deallocate(dst_cursor % attLists) + end if + + allocate(dst_cursor % attLists( size(src_cursor % attLists) ) ) + + do iConstituent = 1, size(src_cursor % attLists) + call mpas_duplicate_attlist(src_cursor % attLists(iConstituent) % attList, dst_cursor % attLists(iConstituent) % attList) + end do + end if + if ( dst_cursor % isActive .and. src_cursor % isActive ) then + dst_cursor % array = src_cursor % array + end if + +! src_cursor => src_cursor % next +! if (.not. local_copy_only) then +! dst_cursor => dst_cursor % next +! end if + +! end do + end if diff --git a/src/framework/duplicate_field_scalar.inc b/src/framework/duplicate_field_scalar.inc index 1c2bf9dc2c..33cb898256 100644 --- a/src/framework/duplicate_field_scalar.inc +++ b/src/framework/duplicate_field_scalar.inc @@ -1,52 +1,68 @@ - if (present(copy_array_only)) then - local_copy_only = copy_array_only - else - local_copy_only = .false. - end if - - - src_cursor => src - if (.not. local_copy_only) then - nullify(dst_cursor) - else - dst_cursor => dst - end if - -! do while (associated(src_cursor)) + threadNum = mpas_threading_get_thread_num() + if ( threadNum == 0 ) then + if (present(copy_array_only)) then + local_copy_only = copy_array_only + else + local_copy_only = .false. + end if + + + src_cursor => src if (.not. local_copy_only) then - if (associated(dst_cursor)) then - allocate(dst_cursor % next) - dst_cursor % next % prev => dst_cursor - dst_cursor => dst_cursor % next - else - allocate(dst) - nullify(dst % prev) - dst_cursor => dst - end if - nullify(dst_cursor % next) + nullify(dst_cursor) + else + dst_cursor => dst end if + +! do while (associated(src_cursor)) + + if (.not. local_copy_only) then + if (associated(dst_cursor)) then + allocate(dst_cursor % next) + dst_cursor % next % prev => dst_cursor + dst_cursor => dst_cursor % next + else + allocate(dst) + nullify(dst % prev) + dst_cursor => dst + end if + nullify(dst_cursor % next) + end if + + + ! + ! Fill in members of dst_cursor from src_cursor + ! + if (.not. local_copy_only) then + dst_cursor % block => src_cursor % block + dst_cursor % fieldName = src_cursor % fieldName + dst_cursor % isVarArray = src_cursor % isVarArray + dst_cursor % isActive = src_cursor % isActive + dst_cursor % isDecomposed = src_cursor % isDecomposed + dst_cursor % hasTimeDimension = src_cursor % hasTimeDimension + dst_cursor % sendList => src_cursor % sendList + dst_cursor % recvList => src_cursor % recvList + dst_cursor % copyList => src_cursor % copyList + if ( associated(dst_cursor % attLists) ) then + deallocate(dst_cursor % attLists) + end if - ! - ! Fill in members of dst_cursor from src_cursor - ! - if (.not. local_copy_only) then - dst_cursor % block => src_cursor % block - dst_cursor % fieldName = src_cursor % fieldName - dst_cursor % isVarArray = src_cursor % isVarArray - dst_cursor % isActive = src_cursor % isActive - dst_cursor % isDecomposed = src_cursor % isDecomposed - dst_cursor % hasTimeDimension = src_cursor % hasTimeDimension - dst_cursor % sendList => src_cursor % sendList - dst_cursor % recvList => src_cursor % recvList - dst_cursor % copyList => src_cursor % copyList - end if - dst_cursor % scalar = src_cursor % scalar + allocate(dst_cursor % attLists( size(src_cursor % attLists) ) ) -! src_cursor => src_cursor % next -! if (.not. local_copy_only) then -! dst_cursor => dst_cursor % next -! end if + do iConstituent = 1, size(src_cursor % attLists) + call mpas_duplicate_attlist(src_cursor % attLists(iConstituent) % attList, dst_cursor % attLists(iConstituent) % attList) + end do + end if + if ( dst_cursor % isActive .and. src_cursor % isActive ) then + dst_cursor % scalar = src_cursor % scalar + end if -! end do +! src_cursor => src_cursor % next +! if (.not. local_copy_only) then +! dst_cursor => dst_cursor % next +! end if + +! end do + end if diff --git a/src/framework/mpas_abort.F b/src/framework/mpas_abort.F new file mode 100644 index 0000000000..4043c39641 --- /dev/null +++ b/src/framework/mpas_abort.F @@ -0,0 +1,115 @@ +! Copyright (c) 2016, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +module mpas_abort + + contains + + !----------------------------------------------------------------------- + ! routine mpas_dmpar_global_abort + ! + !> \brief Forces the exit of all processes in MPI_COMM_WORLD + !> \author Michael Duda + !> \date 26 March 2013 + !> \details + !> This routine writes the specified message to standard error and to + !> a per-process log file named log.????.abort. However, if the optional + !> argument deferredAbort is set to .true., messages will be written to + !> standard error and to the log.????.abort files, but MPI tasks will not + !> be termintated. This allows code to write several lines of messages + !> before exiting. + ! + !----------------------------------------------------------------------- + subroutine mpas_dmpar_global_abort(mesg, deferredAbort)!{{{ + + use mpas_kind_types, only : StrKIND + use mpas_io_units, only : stderrUnit, stdoutUnit, mpas_new_unit + use mpas_threading, only : mpas_threading_get_thread_num + +#ifdef _MPI +#ifndef NOMPIMOD + use mpi +#endif +#endif + + implicit none + +#ifdef _MPI +#ifdef NOMPIMOD + include 'mpif.h' +#endif +#endif + + character(len=*), intent(in) :: mesg !< Input: Abort message + logical, intent(in), optional :: deferredAbort !< Input: Defer call to abort until later + + integer :: threadNum + +#ifdef _MPI + integer :: mpi_ierr, mpi_errcode, my_proc_id, nprocs +#endif + + character(len=StrKIND) :: errorFile + integer :: errorUnit + logical :: local_deferredAbort + + if (present(deferredAbort)) then + local_deferredAbort = deferredAbort + else + local_deferredAbort = .false. + end if + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + write(stderrUnit,*) trim(mesg) + end if + + + if (.not. local_deferredAbort) then + close(stderrUnit) + close(stdoutUnit) + end if + +#ifdef _MPI + call MPI_Comm_rank(MPI_COMM_WORLD, my_proc_id, mpi_ierr) + call MPI_Comm_size(MPI_COMM_WORLD, nprocs, mpi_ierr) + if (nprocs < 1E4) then + write(errorFile,fmt='(a,i4.4,a)') 'log.', my_proc_id, '.abort' + else if (nprocs < 1E5) then + write(errorFile,fmt='(a,i5.5,a)') 'log.', my_proc_id, '.abort' + else if (nprocs < 1E6) then + write(errorFile,fmt='(a,i6.6,a)') 'log.', my_proc_id, '.abort' + else if (nprocs < 1E7) then + write(errorFile,fmt='(a,i7.7,a)') 'log.', my_proc_id, '.abort' + else if (nprocs < 1E8) then + write(errorFile,fmt='(a,i8.8,a)') 'log.', my_proc_id, '.abort' + else + write(errorFile,fmt='(a,i9.9,a)') 'log.', my_proc_id, '.abort' + end if +#else + errorFile = 'log.abort' +#endif + + if ( threadNum == 0 ) then + call mpas_new_unit(errorUnit) + open(unit=errorUnit, file=trim(errorFile), form='formatted', position='append') + write(errorUnit,*) trim(mesg) + close(errorUnit) + end if + + if (.not. local_deferredAbort) then +#ifdef _MPI + call MPI_Abort(MPI_COMM_WORLD, mpi_errcode, mpi_ierr) +#else + stop +#endif + end if + + end subroutine mpas_dmpar_global_abort!}}} + +end module mpas_abort diff --git a/src/framework/mpas_attlist.F b/src/framework/mpas_attlist.F index c39c2f330b..bbebda1470 100644 --- a/src/framework/mpas_attlist.F +++ b/src/framework/mpas_attlist.F @@ -460,6 +460,62 @@ subroutine mpas_get_att_text(attList, attName, attValue, ierr)!{{{ end subroutine mpas_get_att_text!}}} +!*********************************************************************** +! +! routine mpas_duplicate_attlist +! +!> \brief MPAS duplicate attribute list routine +!> \author Doug Jacobsen +!> \date 04/26/2016 +!> \details +!> This routine creates a copy of an attribute list, and returns it as destAttList. +!> This routine assumes that destAttList is empty, however it will remove every +!> attribute from destAttList before it attempts to copy into it. +! +!----------------------------------------------------------------------- + subroutine mpas_duplicate_attlist(srcAttList, destAttList, ierr)!{{{ + + implicit none + + type ( att_list_type ), pointer :: srcAttList !< Input: Source attribute list + type ( att_list_type ), pointer :: destAttList !< Output: Destination attribute list + integer, intent(out), optional :: ierr !< Optional Output: Error code + + type ( att_list_type ), pointer :: srcCursor + + if ( present(ierr) ) ierr = 0 + + if ( associated(destAttList) ) then + call mpas_deallocate_attlist(destAttList) + end if + + allocate(destAttList) + + destAttList % attType = -1 + destAttList % attName = '' + nullify(destAttList % next) + nullify(destAttList % attValueIntA) + nullify(destAttList % attValueRealA) + + srcCursor => srcAttList + do while ( associated(srcCursor) ) + if ( srcCursor % attType == MPAS_ATT_INT ) then + call mpas_add_att(destAttList, srcCursor % attName, srcCursor % attValueInt) + else if ( srcCursor % attType == MPAS_ATT_INTA ) then + call mpas_add_att(destAttList, srcCursor % attName, srcCursor % attValueIntA) + else if ( srcCursor % attType == MPAS_ATT_REAL ) then + call mpas_add_att(destAttList, srcCursor % attName, srcCursor % attValueReal) + else if ( srcCursor % attType == MPAS_ATT_REALA ) then + call mpas_add_att(destAttList, srcCursor % attName, srcCursor % attValueRealA) + else if ( srcCursor % attType == MPAS_ATT_TEXT ) then + call mpas_add_att(destAttList, srcCursor % attName, srcCursor % attValueText) + end if + + srcCursor => srcCursor % next + end do + + end subroutine mpas_duplicate_attlist!}}} + !*********************************************************************** ! ! routine mpas_remove_att diff --git a/src/framework/mpas_attlist_types.inc b/src/framework/mpas_attlist_types.inc index 2bf3c3057f..0d2652e063 100644 --- a/src/framework/mpas_attlist_types.inc +++ b/src/framework/mpas_attlist_types.inc @@ -4,6 +4,10 @@ integer, parameter :: MPAS_ATT_REALA = 4 integer, parameter :: MPAS_ATT_TEXT = 5 + type att_lists_type + type (att_list_type), pointer :: attList => null() + end type att_lists_type + ! Derived type for holding field attributes type att_list_type character (len=StrKIND) :: attName diff --git a/src/framework/mpas_block_creator.F b/src/framework/mpas_block_creator.F index eb5a328d79..da116084bf 100644 --- a/src/framework/mpas_block_creator.F +++ b/src/framework/mpas_block_creator.F @@ -30,7 +30,7 @@ module mpas_block_creator use mpas_domain_routines use mpas_field_routines use mpas_pool_routines - use mpas_configure + use mpas_abort, only : mpas_dmpar_global_abort contains @@ -1053,7 +1053,15 @@ subroutine mpas_block_creator_finalize_block_phase1(nHalos, blocklist, nCellsSol nullify(indexToCellIDPoolField % sendList) nullify(indexToCellIDPoolField % recvList) nullify(indexToCellIDPoolField % copyList) + allocate(indexToCellIDPoolField % attLists(1)) + allocate(indexToCellIDPoolField % attLists(1) % attList) + indexToCellIDPoolField % attLists(1) % attList % attName = '' + indexToCellIDPoolField % attLists(1) % attList % attType = -1 + nullify(indexToCellIDPoolField % attLists(1) % attList % next) + nullify(indexToCellIDPoolField % attLists(1) % attList % attValueIntA) + nullify(indexToCellIDPoolField % attLists(1) % attList % attValueRealA) allocate(indexToCellIDPoolField % array(nCells+1)) + indexToCellIDPoolField % array(nCells+1) = 0 call mpas_pool_add_field(block_ptr % allFields, 'indexToCellID_blk', indexToCellIDPoolField) @@ -1071,7 +1079,15 @@ subroutine mpas_block_creator_finalize_block_phase1(nHalos, blocklist, nCellsSol nullify(indexToEdgeIDPoolField % sendList) nullify(indexToEdgeIDPoolField % recvList) nullify(indexToEdgeIDPoolField % copyList) + allocate(indexToEdgeIDPoolField % attLists(1)) + allocate(indexToEdgeIDPoolField % attLists(1) % attList) + indexToEdgeIDPoolField % attLists(1) % attList % attName = '' + indexToEdgeIDPoolField % attLists(1) % attList % attType = -1 + nullify(indexToEdgeIDPoolField % attLists(1) % attList % next) + nullify(indexToEdgeIDPoolField % attLists(1) % attList % attValueIntA) + nullify(indexToEdgeIDPoolField % attLists(1) % attList % attValueRealA) allocate(indexToEdgeIDPoolField % array(nEdges+1)) + indexToEdgeIDPoolField % array(nEdges+1) = 0 call mpas_pool_add_field(block_ptr % allFields, 'indexToEdgeID_blk', indexToEdgeIDPoolField) @@ -1090,7 +1106,15 @@ subroutine mpas_block_creator_finalize_block_phase1(nHalos, blocklist, nCellsSol nullify(indexToVertexIDPoolField % sendList) nullify(indexToVertexIDPoolField % recvList) nullify(indexToVertexIDPoolField % copyList) + allocate(indexToVertexIDPoolField % attLists(1)) + allocate(indexToVertexIDPoolField % attLists(1) % attList) + indexToVertexIDPoolField % attLists(1) % attList % attName = '' + indexToVertexIDPoolField % attLists(1) % attList % attType = -1 + nullify(indexToVertexIDPoolField % attLists(1) % attList % next) + nullify(indexToVertexIDPoolField % attLists(1) % attList % attValueIntA) + nullify(indexToVertexIDPoolField % attLists(1) % attList % attValueRealA) allocate(indexToVertexIDPoolField % array(nVertices+1)) + indexToVertexIDPoolField % array(nVertices+1) = 0 call mpas_pool_add_field(block_ptr % allFields, 'indexToVertexID_blk', indexToVertexIDPoolField) @@ -1232,6 +1256,11 @@ subroutine mpas_block_creator_finalize_block_phase2(stream_manager, blocklist, r end do call mpas_pool_set_error_level(err_level) + iErr = block_ptr % domain % core % setup_decomposed_dimensions(block_ptr, stream_manager, readableDimensions, block_ptr % dimensions, domain % dminfo % total_blocks) + if ( iErr /= 0 ) then + call mpas_dmpar_global_abort('ERROR: Decomposed dimension setup failed for core ' // trim(block_ptr % domain % core % coreName)) + end if + iErr = block_ptr % domain % core % setup_derived_dimensions(readableDimensions, block_ptr % dimensions, block_ptr % configs) if ( iErr /= 0 ) then call mpas_dmpar_global_abort('ERROR: Derived dimension setup failed for core ' // trim(block_ptr % domain % core % coreName)) @@ -1254,6 +1283,13 @@ subroutine mpas_block_creator_finalize_block_phase2(stream_manager, blocklist, r ownedIndices % isActive = .true. ownedIndices % isVarArray = .false. ownedIndices % isPersistent = .true. + allocate(ownedIndices % attLists(1)) + allocate(ownedIndices % attLists(1) % attList) + ownedIndices % attLists(1) % attList % attName = '' + ownedIndices % attLists(1) % attList % attType = -1 + nullify(ownedIndices % attLists(1) % attList % next) + nullify(ownedIndices % attLists(1) % attList % attValueIntA) + nullify(ownedIndices % attLists(1) % attList % attValueRealA) call mpas_pool_add_field(block_ptr % allFields, 'nCellsOwnedIndices', ownedIndices) indexField % array(:) = indexFieldBlk % array(:) @@ -1274,6 +1310,13 @@ subroutine mpas_block_creator_finalize_block_phase2(stream_manager, blocklist, r ownedIndices % isActive = .true. ownedIndices % isVarArray = .false. ownedIndices % isPersistent = .true. + allocate(ownedIndices % attLists(1)) + allocate(ownedIndices % attLists(1) % attList) + ownedIndices % attLists(1) % attList % attName = '' + ownedIndices % attLists(1) % attList % attType = -1 + nullify(ownedIndices % attLists(1) % attList % next) + nullify(ownedIndices % attLists(1) % attList % attValueIntA) + nullify(ownedIndices % attLists(1) % attList % attValueRealA) call mpas_pool_add_field(block_ptr % allFields, 'nEdgesOwnedIndices', ownedIndices) indexField % array(:) = indexFieldBlk % array(:) @@ -1294,6 +1337,13 @@ subroutine mpas_block_creator_finalize_block_phase2(stream_manager, blocklist, r ownedIndices % isActive = .true. ownedIndices % isVarArray = .false. ownedIndices % isPersistent = .true. + allocate(ownedIndices % attLists(1)) + allocate(ownedIndices % attLists(1) % attList) + ownedIndices % attLists(1) % attList % attName = '' + ownedIndices % attLists(1) % attList % attType = -1 + nullify(ownedIndices % attLists(1) % attList % next) + nullify(ownedIndices % attLists(1) % attList % attValueIntA) + nullify(ownedIndices % attLists(1) % attList % attValueRealA) call mpas_pool_add_field(block_ptr % allFields, 'nVerticesOwnedIndices', ownedIndices) indexField % array(:) = indexFieldBlk % array(:) @@ -1530,94 +1580,104 @@ recursive subroutine mpas_block_creator_allocate_pool_fields(currentPool, dimens do timeLev = 1, poolItr % nTimeLevels call mpas_pool_get_field(currentPool, poolItr % memberName, real1DField, timeLev) - do iDim = 1, poolItr % nDims - call mpas_pool_get_dimension(currentPool, real1DField % dimNames(iDim), tempDim) + if ( real1DField % isActive ) then + do iDim = 1, poolItr % nDims + call mpas_pool_get_dimension(currentPool, real1DField % dimNames(iDim), tempDim) - if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then - call missing_dim_abort(real1DField % dimNames(iDim), poolItr % memberName) - end if - real1DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & - real1DField % dimNames(iDim), iErr = localErr) - end do + if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then + call missing_dim_abort(real1DField % dimNames(iDim), poolItr % memberName) + end if + real1DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & + real1DField % dimNames(iDim), iErr = localErr) + end do - if ( real1DField % isActive .and. real1DField % isPersistent ) then - allocate(real1DField % array(real1DField % dimSizes(1))) - real1DField % array(:) = real1DField % defaultValue + if ( real1DField % isPersistent ) then + allocate(real1DField % array(real1DField % dimSizes(1))) + real1DField % array(:) = real1DField % defaultValue + end if end if end do else if ( poolItr % nDims == 2 ) then do timeLev = 1, poolItr % nTimeLevels call mpas_pool_get_field(currentPool, poolItr % memberName, real2DField, timeLev) - do iDim = 1, poolItr % nDims - call mpas_pool_get_dimension(currentPool, real2DField % dimNames(iDim), tempDim) - if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then - call missing_dim_abort(real2DField % dimNames(iDim), poolItr % memberName) + if ( real2DField % isActive ) then + do iDim = 1, poolItr % nDims + call mpas_pool_get_dimension(currentPool, real2DField % dimNames(iDim), tempDim) + if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then + call missing_dim_abort(real2DField % dimNames(iDim), poolItr % memberName) + end if + real2DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & + real2DField % dimNames(iDim), iErr = localErr) + end do + + if ( real2DField % isPersistent ) then + allocate(real2DField % array(real2DField % dimSizes(1), real2DField % dimSizes(2))) + real2DField % array(:,:) = real2DField % defaultValue end if - real2DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & - real2DField % dimNames(iDim), iErr = localErr) - end do - - if ( real2DField % isActive .and. real2DField % isPersistent ) then - allocate(real2DField % array(real2DField % dimSizes(1), real2DField % dimSizes(2))) - real2DField % array(:,:) = real2DField % defaultValue end if end do else if ( poolItr % nDims == 3 ) then do timeLev = 1, poolItr % nTimeLevels call mpas_pool_get_field(currentPool, poolItr % memberName, real3DField, timeLev) - do iDim = 1, poolItr % nDims - call mpas_pool_get_dimension(currentPool, real3DField % dimNames(iDim), tempDim) - if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then - call missing_dim_abort(real3DField % dimNames(iDim), poolItr % memberName) + if ( real3DField % isActive ) then + do iDim = 1, poolItr % nDims + call mpas_pool_get_dimension(currentPool, real3DField % dimNames(iDim), tempDim) + if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then + call missing_dim_abort(real3DField % dimNames(iDim), poolItr % memberName) + end if + real3DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & + real3DField % dimNames(iDim), iErr = localErr) + end do + + if ( real3DField % isPersistent ) then + allocate(real3DField % array(real3DField % dimSizes(1), real3DField % dimSizes(2), real3DField % dimSizes(3))) + real3DField % array(:,:,:) = real3DField % defaultValue end if - real3DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & - real3DField % dimNames(iDim), iErr = localErr) - end do - - if ( real3DField % isActive .and. real3DField % isPersistent ) then - allocate(real3DField % array(real3DField % dimSizes(1), real3DField % dimSizes(2), real3DField % dimSizes(3))) - real3DField % array(:,:,:) = real3DField % defaultValue end if end do else if ( poolItr % nDims == 4 ) then do timeLev = 1, poolItr % nTimeLevels call mpas_pool_get_field(currentPool, poolItr % memberName, real4DField, timeLev) - do iDim = 1, poolItr % nDims - call mpas_pool_get_dimension(currentPool, real4DField % dimNames(iDim), tempDim) - if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then - call missing_dim_abort(real4DField % dimNames(iDim), poolItr % memberName) + if ( real4DField % isActive ) then + do iDim = 1, poolItr % nDims + call mpas_pool_get_dimension(currentPool, real4DField % dimNames(iDim), tempDim) + if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then + call missing_dim_abort(real4DField % dimNames(iDim), poolItr % memberName) + end if + real4DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & + real4DField % dimNames(iDim), iErr = localErr) + end do + + if ( real4DField % isPersistent ) then + allocate(real4DField % array(real4DField % dimSizes(1), real4DField % dimSizes(2), & + real4DField % dimSizes(3), real4DField % dimSizes(4))) + real4DField % array(:,:,:,:) = real4DField % defaultValue end if - real4DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & - real4DField % dimNames(iDim), iErr = localErr) - end do - - if ( real4DField % isActive .and. real4DField % isPersistent ) then - allocate(real4DField % array(real4DField % dimSizes(1), real4DField % dimSizes(2), & - real4DField % dimSizes(3), real4DField % dimSizes(4))) - real4DField % array(:,:,:,:) = real4DField % defaultValue end if end do else if ( poolItr % nDims == 5 ) then do timeLev = 1, poolItr % nTimeLevels call mpas_pool_get_field(currentPool, poolItr % memberName, real5DField, timeLev) - do iDim = 1, poolItr % nDims - call mpas_pool_get_dimension(currentPool, real5DField % dimNames(iDim), tempDim) - if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then - call missing_dim_abort(real5DField % dimNames(iDim), poolItr % memberName) + if ( real5DField % isActive ) then + do iDim = 1, poolItr % nDims + call mpas_pool_get_dimension(currentPool, real5DField % dimNames(iDim), tempDim) + if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then + call missing_dim_abort(real5DField % dimNames(iDim), poolItr % memberName) + end if + real5DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & + real5DField % dimNames(iDim), iErr = localErr) + end do + + if ( real5DField % isPersistent ) then + allocate(real5DField % array(real5DField % dimSizes(1), real5DField % dimSizes(2), & + real5DField % dimSizes(3), real5DField % dimSizes(4), & + real5DField % dimSizes(5))) + real5DField % array(:,:,:,:,:) = real5DField % defaultValue end if - real5DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & - real5DField % dimNames(iDim), iErr = localErr) - end do - - if ( real5DField % isActive .and. real5DField % isPersistent ) then - allocate(real5DField % array(real5DField % dimSizes(1), real5DField % dimSizes(2), & - real5DField % dimSizes(3), real5DField % dimSizes(4), & - real5DField % dimSizes(5))) - real5DField % array(:,:,:,:,:) = real5DField % defaultValue end if end do end if @@ -1626,54 +1686,60 @@ recursive subroutine mpas_block_creator_allocate_pool_fields(currentPool, dimens do timeLev = 1, poolItr % nTimeLevels call mpas_pool_get_field(currentPool, poolItr % memberName, int1DField, timeLev) - do iDim = 1, poolItr % nDims - call mpas_pool_get_dimension(currentPool, int1DField % dimNames(iDim), tempDim) - if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then - call missing_dim_abort(int1DField % dimNames(iDim), poolItr % memberName) + if ( int1DField % isActive ) then + do iDim = 1, poolItr % nDims + call mpas_pool_get_dimension(currentPool, int1DField % dimNames(iDim), tempDim) + if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then + call missing_dim_abort(int1DField % dimNames(iDim), poolItr % memberName) + end if + int1DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & + int1DField % dimNames(iDim), iErr = localErr) + end do + + if ( int1DField % isPersistent ) then + allocate(int1DField % array(int1DField % dimSizes(1))) + int1DField % array(:) = int1DField % defaultValue end if - int1DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & - int1DField % dimNames(iDim), iErr = localErr) - end do - - if ( int1DField % isActive .and. int1DField % isPersistent ) then - allocate(int1DField % array(int1DField % dimSizes(1))) - int1DField % array(:) = int1DField % defaultValue end if end do else if ( poolItr % nDims == 2 ) then do timeLev = 1, poolItr % nTimeLevels call mpas_pool_get_field(currentPool, poolItr % memberName, int2DField, timeLev) - do iDim = 1, poolItr % nDims - call mpas_pool_get_dimension(currentPool, int2DField % dimNames(iDim), tempDim) - if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then - call missing_dim_abort(int2DField % dimNames(iDim), poolItr % memberName) + if ( int2DField % isActive ) then + do iDim = 1, poolItr % nDims + call mpas_pool_get_dimension(currentPool, int2DField % dimNames(iDim), tempDim) + if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then + call missing_dim_abort(int2DField % dimNames(iDim), poolItr % memberName) + end if + int2DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & + int2DField % dimNames(iDim), iErr = localErr) + end do + + if ( int2DField % isPersistent ) then + allocate(int2DField % array(int2DField % dimSizes(1), int2DField % dimSizes(2))) + int2DField % array(:,:) = int2DField % defaultValue end if - int2DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & - int2DField % dimNames(iDim), iErr = localErr) - end do - - if ( int2DField % isActive .and. int2DField % isPersistent ) then - allocate(int2DField % array(int2DField % dimSizes(1), int2DField % dimSizes(2))) - int2DField % array(:,:) = int2DField % defaultValue end if end do else if ( poolItr % nDims == 3 ) then do timeLev = 1, poolItr % nTimeLevels call mpas_pool_get_field(currentPool, poolItr % memberName, int3DField, timeLev) - do iDim = 1, poolItr % nDims - call mpas_pool_get_dimension(currentPool, int3DField % dimNames(iDim), tempDim) - if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then - call missing_dim_abort(int3DField % dimNames(iDim), poolItr % memberName) + if ( int3DField % isActive ) then + do iDim = 1, poolItr % nDims + call mpas_pool_get_dimension(currentPool, int3DField % dimNames(iDim), tempDim) + if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then + call missing_dim_abort(int3DField % dimNames(iDim), poolItr % memberName) + end if + int3DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & + int3DField % dimNames(iDim), iErr = localErr) + end do + + if ( int3DField % isPersistent ) then + allocate(int3DField % array(int3DField % dimSizes(1), int3DField % dimSizes(2), int3DField % dimSizes(3))) + int3DField % array(:,:,:) = int3DField % defaultValue end if - int3DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & - int3DField % dimNames(iDim), iErr = localErr) - end do - - if ( int3DField % isActive .and. int3DField % isPersistent ) then - allocate(int3DField % array(int3DField % dimSizes(1), int3DField % dimSizes(2), int3DField % dimSizes(3))) - int3DField % array(:,:,:) = int3DField % defaultValue end if end do end if @@ -1682,18 +1748,20 @@ recursive subroutine mpas_block_creator_allocate_pool_fields(currentPool, dimens do timeLev = 1, poolItr % nTimeLevels call mpas_pool_get_field(currentPool, poolItr % memberName, char1DField, timeLev) - do iDim = 1, poolItr % nDims - call mpas_pool_get_dimension(currentPool, char1DField % dimNames(iDim), tempDim) - if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then - call missing_dim_abort(char1DField % dimNames(iDim), poolItr % memberName) + if ( char1DField % isActive ) then + do iDim = 1, poolItr % nDims + call mpas_pool_get_dimension(currentPool, char1DField % dimNames(iDim), tempDim) + if ( .not. associated(tempDim) .or. tempDim == MPAS_MISSING_DIM ) then + call missing_dim_abort(char1DField % dimNames(iDim), poolItr % memberName) + end if + char1DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & + char1DField % dimNames(iDim), iErr = localErr) + end do + + if ( char1DField % isPersistent ) then + allocate(char1DField % array(char1DField % dimSizes(1))) + char1DField % array(:) = char1DField % defaultValue end if - char1DField % dimSizes(iDim) = tempDim + mpas_dimension_num_garbage_elements( & - char1DField % dimNames(iDim), iErr = localErr) - end do - - if ( char1DField % isActive .and. char1DField % isPersistent ) then - allocate(char1DField % array(char1DField % dimSizes(1))) - char1DField % array(:) = char1DField % defaultValue end if end do end if diff --git a/src/framework/mpas_block_decomp.F b/src/framework/mpas_block_decomp.F index 48d8a52efb..042b1718a9 100644 --- a/src/framework/mpas_block_decomp.F +++ b/src/framework/mpas_block_decomp.F @@ -23,7 +23,6 @@ module mpas_block_decomp use mpas_hash use mpas_sort use mpas_derived_types - use mpas_configure use mpas_io_units type graph @@ -35,11 +34,6 @@ module mpas_block_decomp integer, dimension(:,:), pointer :: adjacencyList end type graph - integer :: total_blocks - logical :: explicitDecomp - integer, dimension(:), allocatable :: block_proc_list - integer, dimension(:), allocatable :: block_local_id_list - contains !*********************************************************************** @@ -55,11 +49,9 @@ module mpas_block_decomp !----------------------------------------------------------------------- subroutine mpas_block_decomp_cells_for_proc(dminfo, partial_global_graph_info, local_cell_list, block_id, block_start, block_count, numBlocks, explicitProcDecomp, blockFilePrefix, procFilePrefix)!{{{ - use mpas_configure - implicit none - type (dm_info), intent(in) :: dminfo !< Input: domain information + type (dm_info), intent(inout) :: dminfo !< Input: domain information type (graph), intent(in) :: partial_global_graph_info !< Input: Global graph information integer, dimension(:), pointer :: local_cell_list !< Output: list of cells this processor owns, ordered by block integer, dimension(:), pointer :: block_id !< Output: list of global block id's this processor owns @@ -79,7 +71,7 @@ subroutine mpas_block_decomp_cells_for_proc(dminfo, partial_global_graph_info, l integer, dimension(:,:), allocatable :: sorted_local_cell_list integer :: i, global_block_id, local_block_id, owning_proc, iunit, istatus - integer :: blocks_per_proc + integer :: blocks_per_proc, err integer, dimension(:), pointer :: local_nvertices character (len=StrKIND) :: filename @@ -88,17 +80,17 @@ subroutine mpas_block_decomp_cells_for_proc(dminfo, partial_global_graph_info, l no_blocks = .false. if (numBlocks == 0) then - total_blocks = dminfo % nProcs + dminfo % total_blocks = dminfo % nProcs else - total_blocks = numBlocks + dminfo % total_blocks = numBlocks end if - explicitDecomp = explicitProcDecomp + dminfo % explicitDecomp = explicitProcDecomp call mpas_build_block_proc_list(dminfo, procFilePrefix) call mpas_get_blocks_per_proc(dminfo, dminfo % my_proc_id, blocks_per_proc) - if(total_blocks > 1) then + if(dminfo % total_blocks > 1) then allocate(local_nvertices(dminfo % nprocs)) allocate(global_start(dminfo % nprocs)) allocate(global_cell_list(partial_global_graph_info % nVerticesTotal)) @@ -107,38 +99,52 @@ subroutine mpas_block_decomp_cells_for_proc(dminfo, partial_global_graph_info, l if (dminfo % my_proc_id == IO_NODE) then iunit = 50 + dminfo % my_proc_id - if (total_blocks < 10) then - write(filename,'(a,i1)') trim(blockFilePrefix), total_blocks - else if (total_blocks < 100) then - write(filename,'(a,i2)') trim(blockFilePrefix), total_blocks - else if (total_blocks < 1000) then - write(filename,'(a,i3)') trim(blockFilePrefix), total_blocks - else if (total_blocks < 10000) then - write(filename,'(a,i4)') trim(blockFilePrefix), total_blocks - else if (total_blocks < 100000) then - write(filename,'(a,i5)') trim(blockFilePrefix), total_blocks - else if (total_blocks < 1000000) then - write(filename,'(a,i6)') trim(blockFilePrefix), total_blocks - else if (total_blocks < 10000000) then - write(filename,'(a,i7)') trim(blockFilePrefix), total_blocks + if (dminfo % total_blocks < 10) then + write(filename,'(a,i1)') trim(blockFilePrefix), dminfo % total_blocks + else if (dminfo % total_blocks < 100) then + write(filename,'(a,i2)') trim(blockFilePrefix), dminfo % total_blocks + else if (dminfo % total_blocks < 1000) then + write(filename,'(a,i3)') trim(blockFilePrefix), dminfo % total_blocks + else if (dminfo % total_blocks < 10000) then + write(filename,'(a,i4)') trim(blockFilePrefix), dminfo % total_blocks + else if (dminfo % total_blocks < 100000) then + write(filename,'(a,i5)') trim(blockFilePrefix), dminfo % total_blocks + else if (dminfo % total_blocks < 1000000) then + write(filename,'(a,i6)') trim(blockFilePrefix), dminfo % total_blocks + else if (dminfo % total_blocks < 10000000) then + write(filename,'(a,i7)') trim(blockFilePrefix), dminfo % total_blocks end if open(unit=iunit, file=trim(filename), form='formatted', status='old', iostat=istatus) if (istatus /= 0) then - write(stderrUnit,*) 'Could not open block decomposition file for ',total_blocks,' blocks.' + write(stderrUnit,*) 'ERROR: Could not open block decomposition file for ',dminfo % total_blocks,' blocks.' write(stderrUnit,*) 'Filename: ',trim(filename) call mpas_dmpar_abort(dminfo) end if local_nvertices(:) = 0 do i=1,partial_global_graph_info % nVerticesTotal - read(unit=iunit, fmt=*) global_block_id + read(unit=iunit, fmt=*, iostat=err) global_block_id + + if ( err .ne. 0 ) then + write(stderrUnit, *) 'ERROR: Decomoposition file: ', trim(filename), ' contains less than ', & + partial_global_graph_info % nVerticesTotal, ' cells.' + call mpas_dmpar_abort(dminfo) + end if call mpas_get_owning_proc(dminfo, global_block_id, owning_proc) local_nvertices(owning_proc+1) = local_nvertices(owning_proc+1) + 1 end do + + read(unit=iunit, fmt=*, iostat=err) + + if ( err == 0 ) then + write(stderrUnit, *) 'ERROR: Decomposition file: ', trim(filename), ' contains more than ', & + partial_global_graph_info % nVerticesTotal, ' cells.' + call mpas_dmpar_abort(dminfo) + end if - allocate(global_cell_list(partial_global_graph_info % nVerticesTotal)) + allocate(global_cell_list(partial_global_graph_info % nVerticesTotal)) global_start(1) = 1 do i=2,dminfo % nprocs @@ -148,14 +154,14 @@ subroutine mpas_block_decomp_cells_for_proc(dminfo, partial_global_graph_info, l rewind(unit=iunit) do i=1,partial_global_graph_info % nVerticesTotal - read(unit=iunit, fmt=*) global_block_id + read(unit=iunit, fmt=*, iostat=err) global_block_id call mpas_get_owning_proc(dminfo, global_block_id, owning_proc) global_cell_list(global_start(owning_proc+1)) = i global_block_list(global_start(owning_proc+1)) = global_block_id global_start(owning_proc+1) = global_start(owning_proc+1) + 1 end do - + global_start(1) = 0 do i=2,dminfo % nprocs global_start(i) = global_start(i-1) + local_nvertices(i-1) @@ -503,11 +509,11 @@ subroutine mpas_get_blocks_per_proc(dminfo, proc_number, blocks_per_proc)!{{{ integer :: blocks_per_proc_min, even_blocks, remaining_blocks integer :: i, owning_proc, local_block_id - if(.not. explicitDecomp) then - if(total_blocks > dminfo % nProcs) then - blocks_per_proc_min = total_blocks / dminfo % nProcs - remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs) - even_blocks = total_blocks - remaining_blocks + if(.not. dminfo % explicitDecomp) then + if(dminfo % total_blocks > dminfo % nProcs) then + blocks_per_proc_min = dminfo % total_blocks / dminfo % nProcs + remaining_blocks = dminfo % total_blocks - (blocks_per_proc_min * dminfo % nProcs) + even_blocks = dminfo % total_blocks - remaining_blocks blocks_per_proc = blocks_per_proc_min @@ -515,7 +521,7 @@ subroutine mpas_get_blocks_per_proc(dminfo, proc_number, blocks_per_proc)!{{{ blocks_per_proc = blocks_per_proc + 1 end if else - if(dminfo % my_proc_id < total_blocks) then + if(dminfo % my_proc_id < dminfo % total_blocks) then blocks_per_proc = 1 else blocks_per_proc = 0 @@ -523,7 +529,7 @@ subroutine mpas_get_blocks_per_proc(dminfo, proc_number, blocks_per_proc)!{{{ end if else blocks_per_proc = 0 - do i = 0, total_blocks-1 + do i = 0, dminfo % total_blocks-1 call mpas_get_owning_proc(dminfo, i, owning_proc) if(owning_proc == proc_number) then call mpas_get_local_block_id(dminfo, i, local_block_id) @@ -552,11 +558,11 @@ subroutine mpas_get_local_block_id(dminfo, global_block_number, local_block_numb integer :: blocks_per_proc_min, even_blocks, remaining_blocks - if(.not.explicitDecomp) then - if(total_blocks > dminfo % nProcs) then - blocks_per_proc_min = total_blocks / dminfo % nProcs - remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs) - even_blocks = total_blocks - remaining_blocks + if(.not.dminfo % explicitDecomp) then + if(dminfo % total_blocks > dminfo % nProcs) then + blocks_per_proc_min = dminfo % total_blocks / dminfo % nProcs + remaining_blocks = dminfo % total_blocks - (blocks_per_proc_min * dminfo % nProcs) + even_blocks = dminfo % total_blocks - remaining_blocks if(global_block_number > even_blocks) then local_block_number = blocks_per_proc_min @@ -567,7 +573,7 @@ subroutine mpas_get_local_block_id(dminfo, global_block_number, local_block_numb local_block_number = 0 end if else - local_block_number = block_local_id_list(global_block_number+1) + local_block_number = dminfo % block_local_id_list(global_block_number+1) end if end subroutine mpas_get_local_block_id!}}} @@ -589,11 +595,11 @@ subroutine mpas_get_owning_proc(dminfo, global_block_number, owning_proc)!{{{ integer :: blocks_per_proc_min, even_blocks, remaining_blocks - if(.not.explicitDecomp) then - if(total_blocks >= dminfo % nProcs) then - blocks_per_proc_min = total_blocks / dminfo % nProcs - remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs) - even_blocks = total_blocks - remaining_blocks + if(.not.dminfo % explicitDecomp) then + if(dminfo % total_blocks >= dminfo % nProcs) then + blocks_per_proc_min = dminfo % total_blocks / dminfo % nProcs + remaining_blocks = dminfo % total_blocks - (blocks_per_proc_min * dminfo % nProcs) + even_blocks = dminfo % total_blocks - remaining_blocks if((global_block_number+1) > even_blocks) then owning_proc = global_block_number - even_blocks @@ -604,7 +610,7 @@ subroutine mpas_get_owning_proc(dminfo, global_block_number, owning_proc)!{{{ owning_proc = global_block_number end if else - owning_proc = block_proc_list(global_block_number+1) + owning_proc = dminfo % block_proc_list(global_block_number+1) end if end subroutine mpas_get_owning_proc!}}} @@ -621,11 +627,9 @@ end subroutine mpas_get_owning_proc!}}} !----------------------------------------------------------------------- subroutine mpas_build_block_proc_list(dminfo, procFilePrefix)!{{{ - use mpas_configure - implicit none - type(dm_info), intent(in) :: dminfo !< Input: Domain information + type(dm_info), intent(inout) :: dminfo !< Input: Domain information character (len=*), intent(in) :: procFilePrefix integer :: iounit, istatus, i, owning_proc @@ -633,10 +637,10 @@ subroutine mpas_build_block_proc_list(dminfo, procFilePrefix)!{{{ integer, dimension(:), allocatable :: block_counter - if(.not.explicitDecomp) return + if(.not.dminfo % explicitDecomp) return - allocate(block_proc_list(total_blocks)) - allocate(block_local_id_list(total_blocks)) + allocate(dminfo % block_proc_list(dminfo % total_blocks)) + allocate(dminfo % block_local_id_list(dminfo % total_blocks)) if (dminfo % my_proc_id == IO_NODE) then allocate(block_counter(dminfo % nProcs)) @@ -661,22 +665,22 @@ subroutine mpas_build_block_proc_list(dminfo, procFilePrefix)!{{{ open(unit=iounit, file=trim(filename), form='formatted', status='old', iostat=istatus) - do i=1,total_blocks + do i=1,dminfo % total_blocks read(unit=iounit, fmt=*) owning_proc - block_proc_list(i) = owning_proc - block_local_id_list(i) = block_counter(owning_proc+1) + dminfo % block_proc_list(i) = owning_proc + dminfo % block_local_id_list(i) = block_counter(owning_proc+1) block_counter(owning_proc+1) = block_counter(owning_proc+1) + 1 end do close(unit=iounit) deallocate(block_counter) - call mpas_dmpar_bcast_ints(dminfo, total_blocks, block_proc_list) - call mpas_dmpar_bcast_ints(dminfo, total_blocks, block_local_id_list) + call mpas_dmpar_bcast_ints(dminfo, dminfo % total_blocks, dminfo % block_proc_list) + call mpas_dmpar_bcast_ints(dminfo, dminfo % total_blocks, dminfo % block_local_id_list) else - call mpas_dmpar_bcast_ints(dminfo, total_blocks, block_proc_list) - call mpas_dmpar_bcast_ints(dminfo, total_blocks, block_local_id_list) + call mpas_dmpar_bcast_ints(dminfo, dminfo % total_blocks, dminfo % block_proc_list) + call mpas_dmpar_bcast_ints(dminfo, dminfo % total_blocks, dminfo % block_local_id_list) endif end subroutine mpas_build_block_proc_list!}}} @@ -692,10 +696,12 @@ end subroutine mpas_build_block_proc_list!}}} !> This routine destroys the mapping of blocks to processors. ! !----------------------------------------------------------------------- - subroutine mpas_finish_block_proc_list()!{{{ - if(.not.explicitDecomp) return - deallocate(block_proc_list) - deallocate(block_local_id_list) + subroutine mpas_finish_block_proc_list(dminfo)!{{{ + type (dm_info), intent(inout) :: dminfo + + if(.not.dminfo % explicitDecomp) return + deallocate(dminfo % block_proc_list) + deallocate(dminfo % block_local_id_list) end subroutine mpas_finish_block_proc_list!}}} end module mpas_block_decomp diff --git a/src/framework/mpas_block_types.inc b/src/framework/mpas_block_types.inc index 0753d73eaf..f6d86fb461 100644 --- a/src/framework/mpas_block_types.inc +++ b/src/framework/mpas_block_types.inc @@ -21,13 +21,32 @@ integer :: blockID ! Unique global ID number for this block integer :: localBlockID ! Unique local ID number for this block + ! Pointer to domain that owns the block type (domain_type), pointer :: domain + ! Data structures for exchange lists for a block (exchanging standard fields) type (parallel_info), pointer :: parinfo + ! Linked list pointers type (block_type), pointer :: prev => null() type (block_type), pointer :: next => null() + ! Data structures for core specific data type (mpas_pool_type), pointer :: structs, dimensions, configs, packages + + ! Data structures for IO infrastructure type (mpas_pool_type), pointer :: allFields, allStructs + + ! Data types for particle lists {{{ + + ! Define the particle list on a block + type (mpas_particle_list_type), pointer :: particlelist => null() + + ! Define the list of neighboring blocks on a block, for exchanging particles + integer, dimension(:), pointer :: blockNeighs => null() + ! Define the list of neighboring processors on a block, for exchanging particles + integer, dimension(:), pointer :: procNeighs => null() + + !}}} + end type block_type diff --git a/src/framework/mpas_bootstrapping.F b/src/framework/mpas_bootstrapping.F index a17bca51a1..b7a5eaa057 100644 --- a/src/framework/mpas_bootstrapping.F +++ b/src/framework/mpas_bootstrapping.F @@ -14,10 +14,10 @@ module mpas_bootstrapping use mpas_block_decomp use mpas_block_creator use mpas_sort - use mpas_configure use mpas_timekeeping use mpas_io_streams use mpas_io_units + use mpas_abort, only : mpas_dmpar_global_abort integer :: readCellStart, readCellEnd, nReadCells @@ -67,6 +67,11 @@ module mpas_bootstrapping !> Attributes required to be present in the grid file: !> on_a_sphere !> sphere_radius + !> parent_id + !> mesh_spec + !> is_periodic + !> x_period + !> y_period !> ***** these are needed by mpas_block_creator_finalize_block_init() !> so they can be set in the mesh pool and queried by, e.g., !> mpas_initialize_vectors() @@ -87,8 +92,8 @@ subroutine mpas_bootstrap_framework_phase1(domain, mesh_filename, mesh_iotype) ! integer :: ierr type (MPAS_IO_Handle_type) :: inputHandle - character (len=StrKIND) :: c_on_a_sphere - real (kind=RKIND) :: r_sphere_radius + character (len=StrKIND) :: c_on_a_sphere, c_is_periodic, c_parent_id, c_file_id, c_mesh_spec + real (kind=RKIND) :: r_sphere_radius, r_x_period, r_y_period type (field1dInteger), pointer :: indexToCellIDField type (field1dInteger), pointer :: indexToEdgeIDField @@ -143,7 +148,7 @@ subroutine mpas_bootstrap_framework_phase1(domain, mesh_filename, mesh_iotype) ! nHalos = config_num_halos - inputHandle = MPAS_io_open(trim(mesh_filename), MPAS_IO_READ, mesh_iotype, ierr=ierr) + inputHandle = MPAS_io_open(trim(mesh_filename), MPAS_IO_READ, mesh_iotype, domain % ioContext, ierr=ierr) if (ierr /= MPAS_IO_NOERR) then write(stderrUnit,*) ' ' write(stderrUnit,*) '************************************************************************************' @@ -280,6 +285,78 @@ subroutine mpas_bootstrap_framework_phase1(domain, mesh_filename, mesh_iotype) ! domain % sphere_radius = r_sphere_radius end if + ! + ! We need to read in the file_id, parent_id, and mesh spec attributes as well, to + ! set them appropriately in the output files. + ! + call MPAS_io_get_att(inputHandle, 'file_id', c_file_id, ierr=ierr) + if ( ierr /= MPAS_IO_NOERR) then + write(stderrUnit, *) 'Warning: Attribute file_id not found in '//trim(mesh_filename) + write(stderrUnit, *) ' Using '''' for the previous file_id.' + c_file_id = '' + end if + + call MPAS_io_get_att(inputHandle, 'parent_id', c_parent_id, ierr=ierr) + if ( ierr /= MPAS_IO_NOERR) then + write(stderrUnit, *) 'Warning: Attribute parent_id not found in '//trim(mesh_filename) + write(stderrUnit, *) ' Setting parent_id to ''''' + c_parent_id = '' + end if + + if ( trim(c_file_id) == '' ) then + domain % parent_id = c_parent_id + else + domain % parent_id = trim(c_file_id) // NEW_LINE('A') // trim(c_parent_id) + end if + + call MPAS_io_get_att(inputHandle, 'mesh_spec', c_mesh_spec, ierr=ierr) + if ( ierr /= MPAS_IO_NOERR) then + write(stderrUnit, *) 'Warning: Attribute mesh_spec not found in '//trim(mesh_filename) + write(stderrUnit, *) ' Setting mesh_spec to ''0.0''' + domain % mesh_spec = '0.0' + else + domain % mesh_spec = c_mesh_spec + end if + + ! + ! Get attributes based on planar mesh + ! + if ( .not. domain % on_a_sphere ) then + call MPAS_io_get_att(inputHandle, 'is_periodic', c_is_periodic, ierr=ierr) + if ( ierr /= MPAS_IO_NOERR ) then + write(stderrUnit, *) 'Warning: is_periodic attribute not found in '//trim(mesh_filename) + write(stderrUnit, *) ' Setting is_periodic to .false.' + domain % is_periodic = .false. + else + if (index(c_is_periodic, 'YES') /= 0) then + domain % is_periodic = .true. + else + domain % is_periodic = .false. + end if + end if + else + domain % is_periodic = .false. + end if + + if ( domain % is_periodic ) then + call MPAS_io_get_att(inputHandle, 'x_period', r_x_period, ierr=ierr) + if ( ierr /= MPAS_IO_NOERR ) then + call mpas_dmpar_global_abort('ERROR: Required attribute ''x_period'' missing from periodic input mesh. Exiting...') + else + domain % x_period = r_x_period + end if + + call MPAS_io_get_att(inputHandle, 'y_period', r_y_period, ierr=ierr) + if ( ierr /= MPAS_IO_NOERR ) then + call mpas_dmpar_global_abort('ERROR: Required attribute ''y_period'' missing from periodic input mesh. Exiting...') + else + domain % y_period = r_y_period + end if + else + domain % x_period = 0 + domain % y_period = 0 + end if + ! Allocate blocks, and copy indexTo arrays into blocks call mpas_block_creator_finalize_block_phase1(nHalos, domain % blocklist, & @@ -422,7 +499,7 @@ subroutine mpas_bootstrap_framework_phase2(domain) !{{{ MPAS_STREAM_PROPERTY_IOTYPE, ioType, ierr = err_local) ! Try to open file - inputHandle = MPAS_io_open(trim(streamFilename), MPAS_IO_READ, ioType, ierr = err_local) + inputHandle = MPAS_io_open(trim(streamFilename), MPAS_IO_READ, ioType, domain % ioContext, ierr = err_local) ! If to determine if file was opened or not. if ( err_local == MPAS_IO_NOERR ) then diff --git a/src/framework/mpas_configure.F b/src/framework/mpas_configure.F deleted file mode 100644 index 3be6d3bf5d..0000000000 --- a/src/framework/mpas_configure.F +++ /dev/null @@ -1,38 +0,0 @@ -! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) -! and the University Corporation for Atmospheric Research (UCAR). -! -! Unless noted otherwise source code is licensed under the BSD license. -! Additional copyright and license information can be found in the LICENSE file -! distributed with this code, or at http://mpas-dev.github.com/license.html -! -!----------------------------------------------------------------------- -! mpas_configure -! -!> \brief MPAS Configuration routines. -!> \author Michael Duda, Doug Jacobsen -!> \date 03/26/13 -!> \details -!> This module will contain all namelist parameter definitions, as well as the routine which reads them from the namelist file. -! -!----------------------------------------------------------------------- -module mpas_configure - - use mpas_dmpar - use mpas_io_units - use mpas_derived_types - use mpas_pool_routines - - contains - -!----------------------------------------------------------------------- -! routine mpas_read_namelist -! -!> \brief MPAS read namelist routine -!> \author Michael Duda, Doug Jacobsen -!> \date 03/26/13 -!> \details -!> This routine reads and broadcasts the namelist file. -! -!----------------------------------------------------------------------- - -end module mpas_configure diff --git a/src/framework/mpas_constants.F b/src/framework/mpas_constants.F index 921671e2cb..1eec5d3565 100644 --- a/src/framework/mpas_constants.F +++ b/src/framework/mpas_constants.F @@ -29,7 +29,8 @@ module mpas_constants real (kind=RKIND), parameter :: rgas = 287.0 !< Constant: Gas constant for dry air [J kg-1 K-1] real (kind=RKIND), parameter :: rv = 461.6 !< Constant: Gas constant for water vapor [J kg-1 K-1] real (kind=RKIND), parameter :: rvord = rv/rgas ! - real (kind=RKIND), parameter :: cp = 1003.0 !< Constant: Specific heat of dry air at constant pressure [J kg-1 K-1] +! real (kind=RKIND), parameter :: cp = 1003.0 !< Constant: Specific heat of dry air at constant pressure [J kg-1 K-1] + real (kind=RKIND), parameter :: cp = 7.*rgas/2. !< Constant: Specific heat of dry air at constant pressure [J kg-1 K-1] real (kind=RKIND), parameter :: cv = cp - rgas !< Constant: Specific heat of dry air at constant volume [J kg-1 K-1] real (kind=RKIND), parameter :: cvpm = -cv/cp ! real (kind=RKIND), parameter :: prandtl = 1.0 !< Constant: Prandtl number diff --git a/src/framework/mpas_core_types.inc b/src/framework/mpas_core_types.inc index c0695b3d6a..65089c0754 100644 --- a/src/framework/mpas_core_types.inc +++ b/src/framework/mpas_core_types.inc @@ -21,11 +21,13 @@ end interface abstract interface - function mpas_setup_packages_function(configs, packages) result(iErr) + function mpas_setup_packages_function(configs, packages, iocontext) result(iErr) import mpas_pool_type + import mpas_io_context_type type (mpas_pool_type), intent(inout) :: configs type (mpas_pool_type), intent(inout) :: packages + type (mpas_io_context_type), intent(inout) :: iocontext integer :: iErr end function mpas_setup_packages_function end interface @@ -80,16 +82,30 @@ end interface abstract interface - function mpas_setup_setup_derived_dimensions_function(readDimensions, dimensionPool, configPool) result(iErr) + function mpas_setup_derived_dimensions_function(readDimensions, dimensionPool, configPool) result(iErr) import mpas_pool_type type (mpas_pool_type), intent(inout) :: readDimensions type (mpas_pool_type), intent(inout) :: dimensionPool type (mpas_pool_type), intent(inout) :: configPool integer :: iErr - end function mpas_setup_setup_derived_dimensions_function + end function mpas_setup_derived_dimensions_function end interface + abstract interface + function mpas_setup_decomposed_dimensions_function(block, streamManager, readDimensions, dimensionPool, totalBlocks) result(iErr) + import block_type + import mpas_streamManager_type + import mpas_pool_type + + type (block_type), intent(inout) :: block + type (mpas_streamManager_type), intent(inout) :: streamManager + type (mpas_pool_type), intent(inout) :: readDimensions + type (mpas_pool_type), intent(inout) :: dimensionPool + integer, intent(in) :: totalBlocks + integer :: iErr + end function mpas_setup_decomposed_dimensions_function + end interface abstract interface function mpas_core_init_function(domain, timeStamp) result(iErr) @@ -142,7 +158,8 @@ procedure (mpas_setup_clock_function), pointer, nopass :: setup_clock => null() procedure (mpas_setup_block_function), pointer, nopass :: setup_block => null() procedure (mpas_setup_immutable_streams_function), pointer, nopass :: setup_immutable_streams => null() - procedure (mpas_setup_setup_derived_dimensions_function), pointer, nopass :: setup_derived_dimensions => null() + procedure (mpas_setup_derived_dimensions_function), pointer, nopass :: setup_derived_dimensions => null() + procedure (mpas_setup_decomposed_dimensions_function), pointer, nopass :: setup_decomposed_dimensions => null() ! core_type is a linked list type (core_type), pointer :: next => null() diff --git a/src/framework/mpas_decomp.F b/src/framework/mpas_decomp.F index c5b72534ae..ac42874282 100644 --- a/src/framework/mpas_decomp.F +++ b/src/framework/mpas_decomp.F @@ -46,7 +46,6 @@ module mpas_decomp !----------------------------------------------------------------------- subroutine mpas_decomp_create_decomp_list(decompList)!{{{ type (mpas_decomp_list), pointer :: decompList - procedure (mpas_decomp_function), pointer :: decompFunc integer :: errLocal diff --git a/src/framework/mpas_derived_types.F b/src/framework/mpas_derived_types.F index 041ac289eb..f6d2b8fdc3 100644 --- a/src/framework/mpas_derived_types.F +++ b/src/framework/mpas_derived_types.F @@ -42,6 +42,8 @@ module mpas_derived_types #include "mpas_pool_types.inc" +#include "mpas_particle_list_types.inc" + #include "mpas_io_types.inc" #include "mpas_io_streams_types.inc" diff --git a/src/framework/mpas_dmpar.F b/src/framework/mpas_dmpar.F index ec823a7b02..211e4e89c5 100644 --- a/src/framework/mpas_dmpar.F +++ b/src/framework/mpas_dmpar.F @@ -17,13 +17,31 @@ !----------------------------------------------------------------------- module mpas_dmpar +#define COMMA , +#define DMPAR_DEBUG_WRITE(M) ! write(stderrUnit,*) M +#define DMPAR_WARNING_WRITE(M) write(stderrUnit,*) 'WARNING: '//M +#define DMPAR_ERROR_WRITE(M) write(stderrUnit,*) 'ERROR: '//M + use mpas_derived_types use mpas_sort use mpas_hash use mpas_io_units + use mpas_threading + use mpas_pool_routines + +#ifdef _MPI +#ifndef NOMPIMOD + use mpi +#endif +#endif + + implicit none + private #ifdef _MPI +#ifdef NOMPIMOD include 'mpif.h' +#endif integer, parameter :: MPI_INTEGERKIND = MPI_INTEGER integer, parameter :: MPI_2INTEGERKIND = MPI_2INTEGER @@ -36,8 +54,59 @@ module mpas_dmpar #endif #endif - integer, parameter :: IO_NODE = 0 - integer, parameter :: BUFSIZE = 6000 + integer, parameter, public :: IO_NODE = 0 + integer, parameter, public :: BUFSIZE = 6000 + + public :: mpas_dmpar_init + public :: mpas_dmpar_finalize + public :: mpas_dmpar_abort + public :: mpas_dmpar_bcast_int + public :: mpas_dmpar_bcast_ints + public :: mpas_dmpar_bcast_real + public :: mpas_dmpar_bcast_reals + public :: mpas_dmpar_bcast_double + public :: mpas_dmpar_bcast_doubles + public :: mpas_dmpar_bcast_logical + public :: mpas_dmpar_bcast_char + public :: mpas_dmpar_bcast_chars + public :: mpas_dmpar_sum_int + public :: mpas_dmpar_sum_real + public :: mpas_dmpar_min_int + public :: mpas_dmpar_min_real + public :: mpas_dmpar_max_int + public :: mpas_dmpar_max_real + public :: mpas_dmpar_minloc_int + public :: mpas_dmpar_minloc_real + public :: mpas_dmpar_maxloc_int + public :: mpas_dmpar_maxloc_real + public :: mpas_dmpar_minattributes_real + public :: mpas_dmpar_maxattributes_real + public :: mpas_dmpar_sum_int_array + public :: mpas_dmpar_min_int_array + public :: mpas_dmpar_max_int_array + public :: mpas_dmpar_sum_real_array + public :: mpas_dmpar_min_real_array + public :: mpas_dmpar_max_real_array + public :: mpas_dmpar_scatter_ints + public :: mpas_dmpar_get_index_range + public :: mpas_dmpar_compute_index_range + public :: mpas_dmpar_get_exch_list + public :: mpas_dmpar_build_comm_lists + public :: mpas_dmpar_init_multihalo_exchange_list + public :: mpas_dmpar_destroy_mulithalo_exchange_list + public :: mpas_dmpar_destroy_communication_list + public :: mpas_dmpar_destroy_exchange_list + public :: mpas_dmpar_get_time + public :: mpas_dmpar_exch_group_create + public :: mpas_dmpar_exch_group_destroy + public :: mpas_dmpar_exch_group_add_field + public :: mpas_dmpar_exch_group_remove_field + public :: mpas_dmpar_exch_group_begin_halo_exch + public :: mpas_dmpar_exch_group_local_halo_exch + public :: mpas_dmpar_exch_group_end_halo_exch + public :: mpas_dmpar_exch_group_full_halo_exch + public :: mpas_dmpar_field_halo_exch + interface mpas_dmpar_alltoall_field module procedure mpas_dmpar_alltoall_field1d_integer @@ -49,6 +118,8 @@ module mpas_dmpar module procedure mpas_dmpar_alltoall_field5d_real end interface + public :: mpas_dmpar_alltoall_field + private :: mpas_dmpar_alltoall_field1d_integer private :: mpas_dmpar_alltoall_field2d_integer private :: mpas_dmpar_alltoall_field1d_real @@ -69,6 +140,8 @@ module mpas_dmpar module procedure mpas_dmpar_exch_halo_field5d_real end interface + public :: mpas_dmpar_exch_halo_field + private :: mpas_dmpar_exch_halo_field1d_integer private :: mpas_dmpar_exch_halo_field2d_integer private :: mpas_dmpar_exch_halo_field3d_integer @@ -89,6 +162,8 @@ module mpas_dmpar module procedure mpas_dmpar_copy_field5d_real end interface + public :: mpas_dmpar_copy_field + private :: mpas_dmpar_copy_field1d_integer private :: mpas_dmpar_copy_field2d_integer private :: mpas_dmpar_copy_field3d_integer @@ -98,6 +173,41 @@ module mpas_dmpar private :: mpas_dmpar_copy_field4d_real private :: mpas_dmpar_copy_field5d_real + interface mpas_dmpar_exch_group_pack_buffer_field + module procedure mpas_dmpar_exch_group_pack_buffer_field1d_integer + module procedure mpas_dmpar_exch_group_pack_buffer_field2d_integer + module procedure mpas_dmpar_exch_group_pack_buffer_field3d_integer + module procedure mpas_dmpar_exch_group_pack_buffer_field1d_real + module procedure mpas_dmpar_exch_group_pack_buffer_field2d_real + module procedure mpas_dmpar_exch_group_pack_buffer_field3d_real + module procedure mpas_dmpar_exch_group_pack_buffer_field4d_real + module procedure mpas_dmpar_exch_group_pack_buffer_field5d_real + end interface + + interface mpas_dmpar_exch_group_local_exch_field + module procedure mpas_dmpar_exch_group_local_exch_field1d_integer + module procedure mpas_dmpar_exch_group_local_exch_field2d_integer + module procedure mpas_dmpar_exch_group_local_exch_field3d_integer + module procedure mpas_dmpar_exch_group_local_exch_field1d_real + module procedure mpas_dmpar_exch_group_local_exch_field2d_real + module procedure mpas_dmpar_exch_group_local_exch_field3d_real + module procedure mpas_dmpar_exch_group_local_exch_field4d_real + module procedure mpas_dmpar_exch_group_local_exch_field5d_real + end interface mpas_dmpar_exch_group_local_exch_field + + interface mpas_dmpar_exch_group_unpack_buffer_field + module procedure mpas_dmpar_exch_group_unpack_buffer_field1d_integer + module procedure mpas_dmpar_exch_group_unpack_buffer_field2d_integer + module procedure mpas_dmpar_exch_group_unpack_buffer_field3d_integer + module procedure mpas_dmpar_exch_group_unpack_buffer_field1d_real + module procedure mpas_dmpar_exch_group_unpack_buffer_field2d_real + module procedure mpas_dmpar_exch_group_unpack_buffer_field3d_real + module procedure mpas_dmpar_exch_group_unpack_buffer_field4d_real + module procedure mpas_dmpar_exch_group_unpack_buffer_field5d_real + end interface + + + contains !----------------------------------------------------------------------- @@ -121,14 +231,25 @@ subroutine mpas_dmpar_init(dminfo, mpi_comm)!{{{ #ifdef _MPI integer :: mpi_rank, mpi_size integer :: mpi_ierr +#ifdef MPAS_OPENMP + integer :: desiredThreadLevel, threadLevel +#endif - if (present(mpi_comm)) then - dminfo % comm = mpi_comm - dminfo % using_external_comm = .true. + if ( present(mpi_comm) ) then + dminfo % initialized_mpi = .false. + call MPI_Comm_dup(mpi_comm, dminfo % comm, mpi_ierr) else + dminfo % initialized_mpi = .true. +#ifdef MPAS_OPENMP + desiredThreadLevel = MPI_THREAD_MULTIPLE + call MPI_Init_thread(desiredThreadLevel, threadLevel, mpi_ierr) + if ( threadLevel /= desiredThreadLevel ) then + write(stderrUnit, *) 'Warning: MPI implementation gave thread level of ', threadLevel, ' when ', desiredThreadLevel, ' was requested.' + end if +#else call MPI_Init(mpi_ierr) - dminfo % comm = MPI_COMM_WORLD - dminfo % using_external_comm = .false. +#endif + call MPI_Comm_dup(MPI_COMM_WORLD, dminfo % comm, mpi_ierr) end if ! Find out our rank and the total number of processors @@ -138,17 +259,19 @@ subroutine mpas_dmpar_init(dminfo, mpi_comm)!{{{ dminfo % nprocs = mpi_size dminfo % my_proc_id = mpi_rank - write(stderrUnit,'(a,i5,a,i5,a)') 'task ', mpi_rank, ' of ', mpi_size, & - ' is running' + if ( dminfo % initialized_mpi ) then + write(stderrUnit,'(a,i5,a,i5,a)') 'task ', mpi_rank, ' of ', mpi_size, & + ' is running' - call open_streams(dminfo % my_proc_id) + call open_streams(dminfo % my_proc_id) + end if dminfo % info = MPI_INFO_NULL #else dminfo % comm = 0 dminfo % my_proc_id = IO_NODE dminfo % nprocs = 1 - dminfo % using_external_comm = .false. + dminfo % initialized_mpi = .false. #endif end subroutine mpas_dmpar_init!}}} @@ -171,8 +294,11 @@ subroutine mpas_dmpar_finalize(dminfo)!{{{ #ifdef _MPI integer :: mpi_ierr +#endif - if (.not. dminfo % using_external_comm) then +#ifdef _MPI + call MPI_Comm_free(dminfo % comm, mpi_ierr) + if (dminfo % initialized_mpi) then call MPI_Finalize(mpi_ierr) end if #endif @@ -226,14 +352,19 @@ subroutine mpas_dmpar_bcast_int(dminfo, i, proc)!{{{ #ifdef _MPI integer :: mpi_ierr, source + integer :: threadNum - if (present(proc)) then - source = proc - else - source = IO_NODE - endif + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if (present(proc)) then + source = proc + else + source = IO_NODE + endif - call MPI_Bcast(i, 1, MPI_INTEGERKIND, source, dminfo % comm, mpi_ierr) + call MPI_Bcast(i, 1, MPI_INTEGERKIND, source, dminfo % comm, mpi_ierr) + end if #endif end subroutine mpas_dmpar_bcast_int!}}} @@ -260,14 +391,19 @@ subroutine mpas_dmpar_bcast_ints(dminfo, n, iarray, proc)!{{{ #ifdef _MPI integer :: mpi_ierr, source + integer :: threadNum - if (present(proc)) then - source = proc - else - source = IO_NODE - endif + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if (present(proc)) then + source = proc + else + source = IO_NODE + endif - call MPI_Bcast(iarray, n, MPI_INTEGERKIND, source, dminfo % comm, mpi_ierr) + call MPI_Bcast(iarray, n, MPI_INTEGERKIND, source, dminfo % comm, mpi_ierr) + end if #endif end subroutine mpas_dmpar_bcast_ints!}}} @@ -293,14 +429,19 @@ subroutine mpas_dmpar_bcast_real(dminfo, r, proc)!{{{ #ifdef _MPI integer :: mpi_ierr, source + integer :: threadNum - if (present(proc)) then - source = proc - else - source = IO_NODE - endif + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if (present(proc)) then + source = proc + else + source = IO_NODE + endif - call MPI_Bcast(r, 1, MPI_REALKIND, source, dminfo % comm, mpi_ierr) + call MPI_Bcast(r, 1, MPI_REALKIND, source, dminfo % comm, mpi_ierr) + end if #endif end subroutine mpas_dmpar_bcast_real!}}} @@ -327,14 +468,19 @@ subroutine mpas_dmpar_bcast_reals(dminfo, n, rarray, proc)!{{{ #ifdef _MPI integer :: mpi_ierr, source + integer :: threadNum - if (present(proc)) then - source = proc - else - source = IO_NODE - endif + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if (present(proc)) then + source = proc + else + source = IO_NODE + endif - call MPI_Bcast(rarray, n, MPI_REALKIND, source, dminfo % comm, mpi_ierr) + call MPI_Bcast(rarray, n, MPI_REALKIND, source, dminfo % comm, mpi_ierr) + end if #endif end subroutine mpas_dmpar_bcast_reals!}}} @@ -360,14 +506,19 @@ subroutine mpas_dmpar_bcast_double(dminfo, r, proc)!{{{ #ifdef _MPI integer :: mpi_ierr, source + integer :: threadNum - if (present(proc)) then - source = proc - else - source = IO_NODE - endif + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if (present(proc)) then + source = proc + else + source = IO_NODE + endif - call MPI_Bcast(r, 1, MPI_DOUBLE_PRECISION, source, dminfo % comm, mpi_ierr) + call MPI_Bcast(r, 1, MPI_DOUBLE_PRECISION, source, dminfo % comm, mpi_ierr) + end if #endif end subroutine mpas_dmpar_bcast_double!}}} @@ -394,14 +545,19 @@ subroutine mpas_dmpar_bcast_doubles(dminfo, n, rarray, proc)!{{{ #ifdef _MPI integer :: mpi_ierr, source + integer :: threadNum - if (present(proc)) then - source = proc - else - source = IO_NODE - endif + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if (present(proc)) then + source = proc + else + source = IO_NODE + endif - call MPI_Bcast(rarray, n, MPI_DOUBLE_PRECISION, source, dminfo % comm, mpi_ierr) + call MPI_Bcast(rarray, n, MPI_DOUBLE_PRECISION, source, dminfo % comm, mpi_ierr) + end if #endif end subroutine mpas_dmpar_bcast_doubles!}}} @@ -428,27 +584,32 @@ subroutine mpas_dmpar_bcast_logical(dminfo, l, proc)!{{{ #ifdef _MPI integer :: mpi_ierr, source integer :: itemp + integer :: threadNum - if (present(proc)) then - source = proc - else - source = IO_NODE - endif + threadNum = mpas_threading_get_thread_num() - if (dminfo % my_proc_id == IO_NODE) then - if (l) then - itemp = 1 + if ( threadNum == 0 ) then + if (present(proc)) then + source = proc else - itemp = 0 + source = IO_NODE + endif + + if (dminfo % my_proc_id == IO_NODE) then + if (l) then + itemp = 1 + else + itemp = 0 + end if end if - end if - call MPI_Bcast(itemp, 1, MPI_INTEGERKIND, source, dminfo % comm, mpi_ierr) + call MPI_Bcast(itemp, 1, MPI_INTEGERKIND, source, dminfo % comm, mpi_ierr) - if (itemp == 1) then - l = .true. - else - l = .false. + if (itemp == 1) then + l = .true. + else + l = .false. + end if end if #endif @@ -475,18 +636,62 @@ subroutine mpas_dmpar_bcast_char(dminfo, c, proc)!{{{ #ifdef _MPI integer :: mpi_ierr, source + integer :: threadNum - if (present(proc)) then - source = proc - else - source = IO_NODE - endif + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if (present(proc)) then + source = proc + else + source = IO_NODE + endif - call MPI_Bcast(c, len(c), MPI_CHARACTER, source, dminfo % comm, mpi_ierr) + call MPI_Bcast(c, len(c), MPI_CHARACTER, source, dminfo % comm, mpi_ierr) + end if #endif end subroutine mpas_dmpar_bcast_char!}}} +!----------------------------------------------------------------------- +! routine mpas_dmpar_bcast_chars +! +!> \brief MPAS dmpar broadcast character array routine. +!> \author Doug Jacobsen +!> \date 01/22/2016 +!> \details +!> This routine broadcasts an array of characters to all processors in the communicator. +!> An optional argument specifies the source node; else broadcast from IO_NODE. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_bcast_chars(dminfo, n, carray, proc)!{{{ + + implicit none + + type (dm_info), intent(in) :: dminfo !< Input: Domain information + integer, intent(in) :: n !< Input: Number of character strings to broadcast + character (len=*), dimension(:), intent(inout) :: carray !< Input/Output: Character to be broadcast + integer, intent(in), optional :: proc !< optional argument indicating which processor to broadcast from + +#ifdef _MPI + integer :: mpi_ierr, source + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if (present(proc)) then + source = proc + else + source = IO_NODE + endif + + call MPI_Bcast(carray, n * len(carray(1)), MPI_CHARACTER, source, dminfo % comm, mpi_ierr) + end if +#endif + + end subroutine mpas_dmpar_bcast_chars!}}} + !----------------------------------------------------------------------- ! routine mpas_dmpar_sum_int ! @@ -506,12 +711,17 @@ subroutine mpas_dmpar_sum_int(dminfo, i, isum)!{{{ integer, intent(out) :: isum !< Output: Integer sum for output integer :: mpi_ierr + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() + if ( threadNum == 0 ) then #ifdef _MPI - call MPI_Allreduce(i, isum, 1, MPI_INTEGERKIND, MPI_SUM, dminfo % comm, mpi_ierr) + call MPI_Allreduce(i, isum, 1, MPI_INTEGERKIND, MPI_SUM, dminfo % comm, mpi_ierr) #else - isum = i + isum = i #endif + end if end subroutine mpas_dmpar_sum_int!}}} @@ -533,13 +743,17 @@ subroutine mpas_dmpar_sum_real(dminfo, r, rsum)!{{{ real(kind=RKIND), intent(in) :: r !< Input: Real values to be summed real(kind=RKIND), intent(out) :: rsum !< Output: Sum of reals for output - integer :: mpi_ierr + integer :: mpi_ierr, threadNum + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then #ifdef _MPI - call MPI_Allreduce(r, rsum, 1, MPI_REALKIND, MPI_SUM, dminfo % comm, mpi_ierr) + call MPI_Allreduce(r, rsum, 1, MPI_REALKIND, MPI_SUM, dminfo % comm, mpi_ierr) #else - rsum = r + rsum = r #endif + end if end subroutine mpas_dmpar_sum_real!}}} @@ -561,13 +775,17 @@ subroutine mpas_dmpar_min_int(dminfo, i, imin)!{{{ integer, intent(in) :: i !< Input: Integer value integer, intent(out) :: imin !< Output: Minimum integer value - integer :: mpi_ierr + integer :: mpi_ierr, threadNum + + threadNum = mpas_threading_get_thread_num() + if ( threadNum == 0 ) then #ifdef _MPI - call MPI_Allreduce(i, imin, 1, MPI_INTEGERKIND, MPI_MIN, dminfo % comm, mpi_ierr) + call MPI_Allreduce(i, imin, 1, MPI_INTEGERKIND, MPI_MIN, dminfo % comm, mpi_ierr) #else - imin = i + imin = i #endif + end if end subroutine mpas_dmpar_min_int!}}} @@ -589,13 +807,17 @@ subroutine mpas_dmpar_min_real(dminfo, r, rmin)!{{{ real(kind=RKIND), intent(in) :: r !< Input: Real value real(kind=RKIND), intent(out) :: rmin !< Output: Minimum of real value - integer :: mpi_ierr + integer :: mpi_ierr, threadNum + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then #ifdef _MPI - call MPI_Allreduce(r, rmin, 1, MPI_REALKIND, MPI_MIN, dminfo % comm, mpi_ierr) + call MPI_Allreduce(r, rmin, 1, MPI_REALKIND, MPI_MIN, dminfo % comm, mpi_ierr) #else - rmin = r + rmin = r #endif + end if end subroutine mpas_dmpar_min_real!}}} @@ -616,14 +838,18 @@ subroutine mpas_dmpar_max_int(dminfo, i, imax)!{{{ type (dm_info), intent(in) :: dminfo !< Input: Domain information integer, intent(in) :: i !< Input: Integer value integer, intent(out) :: imax !< Output: Maximum of integer values - - integer :: mpi_ierr - + + integer :: mpi_ierr, threadNum + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then #ifdef _MPI - call MPI_Allreduce(i, imax, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr) + call MPI_Allreduce(i, imax, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr) #else - imax = i + imax = i #endif + end if end subroutine mpas_dmpar_max_int!}}} @@ -645,13 +871,17 @@ subroutine mpas_dmpar_max_real(dminfo, r, rmax)!{{{ real(kind=RKIND), intent(in) :: r !< Input: Real value real(kind=RKIND), intent(out) :: rmax !< Output: Maximum of real values - integer :: mpi_ierr + integer :: mpi_ierr, threadNum + + threadNum = mpas_threading_get_thread_num() + if ( threadNum == 0 ) then #ifdef _MPI - call MPI_Allreduce(r, rmax, 1, MPI_REALKIND, MPI_MAX, dminfo % comm, mpi_ierr) + call MPI_Allreduce(r, rmax, 1, MPI_REALKIND, MPI_MAX, dminfo % comm, mpi_ierr) #else - rmax = r + rmax = r #endif + end if end subroutine mpas_dmpar_max_real!}}} @@ -674,19 +904,23 @@ subroutine mpas_dmpar_minloc_int(dminfo, i, imin, procout)!{{{ integer, intent(in) :: i !< Input: Integer value integer, intent(out) :: imin !< Output: Minimum of integer values integer, intent(out) :: procout !< Output: Processor on which imin resides - integer :: mpi_ierr + integer :: mpi_ierr, threadNum integer, dimension(2,1) :: recvbuf, sendbuf + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then #ifdef _MPI - sendbuf(1,1) = i - sendbuf(2,1) = dminfo % my_proc_id ! This is the processor number associated with the value i - call MPI_Allreduce(sendbuf, recvbuf, 1, MPI_2INTEGERKIND, MPI_MINLOC, dminfo % comm, mpi_ierr) - imin = recvbuf(1,1) - procout = recvbuf(2,1) + sendbuf(1,1) = i + sendbuf(2,1) = dminfo % my_proc_id ! This is the processor number associated with the value i + call MPI_Allreduce(sendbuf, recvbuf, 1, MPI_2INTEGERKIND, MPI_MINLOC, dminfo % comm, mpi_ierr) + imin = recvbuf(1,1) + procout = recvbuf(2,1) #else - imin = i - procout = IO_NODE + imin = i + procout = IO_NODE #endif + end if end subroutine mpas_dmpar_minloc_int!}}} @@ -709,19 +943,21 @@ subroutine mpas_dmpar_minloc_real(dminfo, r, rmin, procout)!{{{ real(kind=RKIND), intent(in) :: r !< Input: Real value real(kind=RKIND), intent(out) :: rmin !< Output: Minimum of real values integer, intent(out) :: procout !< Output: Processor on which rin resides - integer :: mpi_ierr - real(kind=RKIND), dimension(2,1) :: recvbuf, sendbuf + integer :: mpi_ierr, threadNum + real(kind=RKIND), dimension(2,1) :: recvbuf, sendbuf + if ( threadNum == 0 ) then #ifdef _MPI - sendbuf(1,1) = r - sendbuf(2,1) = dminfo % my_proc_id ! This is the processor number associated with the value x (coerced to a real) - call MPI_Allreduce(sendbuf, recvbuf, 1, MPI_2REALKIND, MPI_MINLOC, dminfo % comm, mpi_ierr) - rmin = recvbuf(1,1) - procout = recvbuf(2,1) ! coerced back to integer + sendbuf(1,1) = r + sendbuf(2,1) = dminfo % my_proc_id ! This is the processor number associated with the value x (coerced to a real) + call MPI_Allreduce(sendbuf, recvbuf, 1, MPI_2REALKIND, MPI_MINLOC, dminfo % comm, mpi_ierr) + rmin = recvbuf(1,1) + procout = recvbuf(2,1) ! coerced back to integer #else - rmin = r - procout = IO_NODE + rmin = r + procout = IO_NODE #endif + end if end subroutine mpas_dmpar_minloc_real!}}} @@ -744,19 +980,23 @@ subroutine mpas_dmpar_maxloc_int(dminfo, i, imax, procout)!{{{ integer, intent(in) :: i !< Input: Integer value integer, intent(out) :: imax !< Output: Maximum of integer values integer, intent(out) :: procout !< Output: Processor on which imax resides - integer :: mpi_ierr - integer, dimension(2,1) :: recvbuf, sendbuf + integer :: mpi_ierr, threadNum + integer, dimension(2,1) :: recvbuf, sendbuf + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then #ifdef _MPI - sendbuf(1,1) = i - sendbuf(2,1) = dminfo % my_proc_id ! This is the processor number associated with the value i - call MPI_Allreduce(sendbuf, recvbuf, 1, MPI_2INTEGERKIND, MPI_MAXLOC, dminfo % comm, mpi_ierr) - imax = recvbuf(1,1) - procout = recvbuf(2,1) + sendbuf(1,1) = i + sendbuf(2,1) = dminfo % my_proc_id ! This is the processor number associated with the value i + call MPI_Allreduce(sendbuf, recvbuf, 1, MPI_2INTEGERKIND, MPI_MAXLOC, dminfo % comm, mpi_ierr) + imax = recvbuf(1,1) + procout = recvbuf(2,1) #else - imax = i - procout = IO_NODE + imax = i + procout = IO_NODE #endif + end if end subroutine mpas_dmpar_maxloc_int!}}} @@ -779,22 +1019,123 @@ subroutine mpas_dmpar_maxloc_real(dminfo, r, rmax, procout)!{{{ real(kind=RKIND), intent(in) :: r !< Input: Real value real(kind=RKIND), intent(out) :: rmax !< Output: Maximum of real values integer, intent(out) :: procout !< Output: Processor on which rmax resides - integer :: mpi_ierr - real(kind=RKIND), dimension(2,1) :: recvbuf, sendbuf + integer :: mpi_ierr, threadNum + real(kind=RKIND), dimension(2,1) :: recvbuf, sendbuf + + threadNum = mpas_threading_get_thread_num() + if ( threadNum == 0 ) then #ifdef _MPI - sendbuf(1,1) = r - sendbuf(2,1) = dminfo % my_proc_id ! This is the processor number associated with the value x (coerced to a real) - call MPI_Allreduce(sendbuf, recvbuf, 1, MPI_2REALKIND, MPI_MAXLOC, dminfo % comm, mpi_ierr) - rmax = recvbuf(1,1) - procout = recvbuf(2,1) ! coerced back to integer + sendbuf(1,1) = r + sendbuf(2,1) = dminfo % my_proc_id ! This is the processor number associated with the value x (coerced to a real) + call MPI_Allreduce(sendbuf, recvbuf, 1, MPI_2REALKIND, MPI_MAXLOC, dminfo % comm, mpi_ierr) + rmax = recvbuf(1,1) + procout = recvbuf(2,1) ! coerced back to integer #else - rmax = r - procout = IO_NODE + rmax = r + procout = IO_NODE #endif + end if end subroutine mpas_dmpar_maxloc_real!}}} + +!----------------------------------------------------------------------- +! routine mpas_dmpar_minattributes_real +! +!> \brief Returns the array associated with the global minimum value +!> \author Michael Duda +!> \date 12 February 2016 +!> \details +!> This routine takes as input a real value, plus a real-valued array +!> associated with that value, and returns the array from the task with +!> the minimum value of the variable being reduced. +!> +!> One possible application of this routine might be to return the latitude, +!> longitude, and model level associated with the global minimum of a field +!> using a call like: +!> call mpas_dmpar_minattributes_real(dminfo, localMinValue, & +!> (/latOfLocalMin, lonOfLocalMin, levOfLocalMin/) & +!> globalAttributes) +!> latOfGlobalMin = globalAttributes(1) +!> lonOfGlobalMin = globalAttributes(2) +!> levOfGlobalMin = globalAttributes(3) +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_minattributes_real(dminfo, localValue, localAttributes, globalAttributes) + + implicit none + + type (dm_info), intent(in) :: dminfo + real(kind=RKIND), intent(in) :: localValue + real(kind=RKIND), dimension(:), intent(in) :: localAttributes + real(kind=RKIND), dimension(:), intent(out) :: globalAttributes + + integer :: mpi_ierr + real(kind=RKIND), dimension(2,size(localAttributes)) :: recvbuf, sendbuf + +#ifdef _MPI + sendbuf(1,:) = localValue + sendbuf(2,:) = localAttributes(:) + + call MPI_Allreduce(sendbuf, recvbuf, size(recvbuf,dim=2), MPI_2REALKIND, MPI_MINLOC, dminfo % comm, mpi_ierr) + + globalAttributes(:) = recvbuf(2,:) +#else + globalAttributes(:) = localAttributes(:) +#endif + + end subroutine mpas_dmpar_minattributes_real + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_maxattributes_real +! +!> \brief Returns the array associated with the global maximum value +!> \author Michael Duda +!> \date 12 February 2016 +!> \details +!> This routine takes as input a real value, plus a real-valued array +!> associated with that value, and returns the array from the task with +!> the maximum value of the variable being reduced. +!> +!> One possible application of this routine might be to return the latitude, +!> longitude, and model level associated with the global maximum of a field +!> using a call like: +!> call mpas_dmpar_maxattributes_real(dminfo, localMaxValue, & +!> (/latOfLocalMax, lonOfLocalMax, levOfLocalMax/) & +!> globalAttributes) +!> latOfGlobalMax = globalAttributes(1) +!> lonOfGlobalMax = globalAttributes(2) +!> levOfGlobalMax = globalAttributes(3) +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_maxattributes_real(dminfo, localValue, localAttributes, globalAttributes) + + implicit none + + type (dm_info), intent(in) :: dminfo + real(kind=RKIND), intent(in) :: localValue + real(kind=RKIND), dimension(:), intent(in) :: localAttributes + real(kind=RKIND), dimension(:), intent(out) :: globalAttributes + + integer :: mpi_ierr + real(kind=RKIND), dimension(2,size(localAttributes)) :: recvbuf, sendbuf + +#ifdef _MPI + sendbuf(1,:) = localValue + sendbuf(2,:) = localAttributes(:) + + call MPI_Allreduce(sendbuf, recvbuf, size(recvbuf,dim=2), MPI_2REALKIND, MPI_MAXLOC, dminfo % comm, mpi_ierr) + + globalAttributes(:) = recvbuf(2,:) +#else + globalAttributes(:) = localAttributes(:) +#endif + + end subroutine mpas_dmpar_maxattributes_real + + !----------------------------------------------------------------------- ! routine mpas_dmpar_sum_int_array ! @@ -808,19 +1149,23 @@ end subroutine mpas_dmpar_maxloc_real!}}} subroutine mpas_dmpar_sum_int_array(dminfo, nElements, inArray, outArray)!{{{ implicit none - + type (dm_info), intent(in) :: dminfo !< Input: Domain information integer, intent(in) :: nElements !< Input: Length of arrays integer, dimension(nElements), intent(in) :: inArray !< Input: Processor specific array to sum integer, dimension(nElements), intent(out) :: outArray !< Output: Sum of arrays - - integer :: mpi_ierr + integer :: mpi_ierr, threadNum + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then #ifdef _MPI - call MPI_Allreduce(inArray, outArray, nElements, MPI_INTEGERKIND, MPI_SUM, dminfo % comm, mpi_ierr) + call MPI_Allreduce(inArray, outArray, nElements, MPI_INTEGERKIND, MPI_SUM, dminfo % comm, mpi_ierr) #else - outArray = inArray + outArray = inArray #endif + end if end subroutine mpas_dmpar_sum_int_array!}}} @@ -835,21 +1180,25 @@ end subroutine mpas_dmpar_sum_int_array!}}} ! !----------------------------------------------------------------------- subroutine mpas_dmpar_min_int_array(dminfo, nElements, inArray, outArray)!{{{ - + implicit none - + type (dm_info), intent(in) :: dminfo !< Input: Domain information integer, intent(in) :: nElements !< Input: Array size integer, dimension(nElements), intent(in) :: inArray !< Input: Input array of integers integer, dimension(nElements), intent(out) :: outArray !< Output: Array of minimum integers - integer :: mpi_ierr + integer :: mpi_ierr, threadNum + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then #ifdef _MPI - call MPI_Allreduce(inArray, outArray, nElements, MPI_INTEGERKIND, MPI_MIN, dminfo % comm, mpi_ierr) + call MPI_Allreduce(inArray, outArray, nElements, MPI_INTEGERKIND, MPI_MIN, dminfo % comm, mpi_ierr) #else - outArray = inArray + outArray = inArray #endif + end if end subroutine mpas_dmpar_min_int_array!}}} @@ -872,13 +1221,17 @@ subroutine mpas_dmpar_max_int_array(dminfo, nElements, inArray, outArray)!{{{ integer, dimension(nElements), intent(in) :: inArray !< Input: Array of integers integer, dimension(nElements), intent(out) :: outArray !< Output: Array of maximum integers - integer :: mpi_ierr + integer :: mpi_ierr, threadNum + + threadNum = mpas_threading_get_thread_num() + if ( threadNum == 0 ) then #ifdef _MPI - call MPI_Allreduce(inArray, outArray, nElements, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr) + call MPI_Allreduce(inArray, outArray, nElements, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr) #else - outArray = inArray + outArray = inArray #endif + end if end subroutine mpas_dmpar_max_int_array!}}} @@ -901,13 +1254,17 @@ subroutine mpas_dmpar_sum_real_array(dminfo, nElements, inArray, outArray)!{{{ real(kind=RKIND), dimension(nElements), intent(in) :: inArray !< Input: Array of reals real(kind=RKIND), dimension(nElements), intent(out) :: outArray !< Output: Array of real sums - integer :: mpi_ierr + integer :: mpi_ierr, threadNum + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then #ifdef _MPI - call MPI_Allreduce(inArray, outArray, nElements, MPI_REALKIND, MPI_SUM, dminfo % comm, mpi_ierr) + call MPI_Allreduce(inArray, outArray, nElements, MPI_REALKIND, MPI_SUM, dminfo % comm, mpi_ierr) #else - outArray = inArray + outArray = inArray #endif + end if end subroutine mpas_dmpar_sum_real_array!}}} @@ -930,13 +1287,17 @@ subroutine mpas_dmpar_min_real_array(dminfo, nElements, inArray, outArray)!{{{ real(kind=RKIND), dimension(nElements), intent(in) :: inArray !< Input: Array of reals real(kind=RKIND), dimension(nElements), intent(out) :: outArray !< Input: Array of minimum reals - integer :: mpi_ierr + integer :: mpi_ierr, threadNum + + threadNum = mpas_threading_get_thread_num() + if ( threadNum == 0 ) then #ifdef _MPI - call MPI_Allreduce(inArray, outArray, nElements, MPI_REALKIND, MPI_MIN, dminfo % comm, mpi_ierr) + call MPI_Allreduce(inArray, outArray, nElements, MPI_REALKIND, MPI_MIN, dminfo % comm, mpi_ierr) #else - outArray = inArray + outArray = inArray #endif + end if end subroutine mpas_dmpar_min_real_array!}}} @@ -959,13 +1320,17 @@ subroutine mpas_dmpar_max_real_array(dminfo, nElements, inArray, outArray)!{{{ real(kind=RKIND), dimension(nElements), intent(in) :: inArray !< Input: Array of reals real(kind=RKIND), dimension(nElements), intent(out) :: outArray !< Output: Array of maximum reals - integer :: mpi_ierr + integer :: mpi_ierr, threadNum + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then #ifdef _MPI - call MPI_Allreduce(inArray, outArray, nElements, MPI_REALKIND, MPI_MAX, dminfo % comm, mpi_ierr) + call MPI_Allreduce(inArray, outArray, nElements, MPI_REALKIND, MPI_MAX, dminfo % comm, mpi_ierr) #else - outArray = inArray + outArray = inArray #endif + end if end subroutine mpas_dmpar_max_real_array!}}} @@ -992,9 +1357,13 @@ subroutine mpas_dmpar_scatter_ints(dminfo, nprocs, noutlist, displs, counts, inl integer, dimension(noutlist), intent(inout) :: outlist !< Output: List of received integers #ifdef _MPI - integer :: mpi_ierr - - call MPI_Scatterv(inlist, counts, displs, MPI_INTEGERKIND, outlist, noutlist, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr) + integer :: mpi_ierr, threadNum + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + call MPI_Scatterv(inlist, counts, displs, MPI_INTEGERKIND, outlist, noutlist, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr) + end if #endif end subroutine mpas_dmpar_scatter_ints!}}} @@ -1024,7 +1393,7 @@ subroutine mpas_dmpar_get_index_range(dminfo, &!{{{ local_start = nint(real(dminfo % my_proc_id,R8KIND) * real(global_end - global_start + 1,R8KIND) & / real(dminfo % nprocs,R8KIND)) + 1 local_end = nint(real(dminfo % my_proc_id + 1,R8KIND) * real(global_end - global_start + 1,R8KIND) & - / real(dminfo % nprocs,R8KIND)) + / real(dminfo % nprocs,R8KIND)) end subroutine mpas_dmpar_get_index_range!}}} @@ -1039,29 +1408,40 @@ subroutine mpas_dmpar_compute_index_range(dminfo, &!{{{ integer, intent(inout) :: global_start, global_end integer :: n - integer :: mpi_ierr + integer :: mpi_ierr, threadNum + + threadNum = mpas_threading_get_thread_num() n = local_end - local_start + 1 if (dminfo % my_proc_id == 0) then global_start = 1 global_end = global_start + n - 1 - + #ifdef _MPI else if (dminfo % my_proc_id == dminfo % nprocs - 1) then - call MPI_Recv(global_start, 1, MPI_INTEGERKIND, dminfo % my_proc_id - 1, 0, dminfo % comm, MPI_STATUS_IGNORE, mpi_ierr) + if ( threadNum == 0 ) then + call MPI_Recv(global_start, 1, MPI_INTEGERKIND, dminfo % my_proc_id - 1, 0, dminfo % comm, MPI_STATUS_IGNORE, mpi_ierr) + end if + call mpas_threading_barrier() global_end = global_start + n - 1 else - call MPI_Recv(global_start, 1, MPI_INTEGERKIND, dminfo % my_proc_id - 1, 0, dminfo % comm, MPI_STATUS_IGNORE, mpi_ierr) + if ( threadNum == 0 ) then + call MPI_Recv(global_start, 1, MPI_INTEGERKIND, dminfo % my_proc_id - 1, 0, dminfo % comm, MPI_STATUS_IGNORE, mpi_ierr) + end if + call mpas_threading_barrier() global_end = global_start + n - call MPI_Send(global_end, 1, MPI_INTEGERKIND, dminfo % my_proc_id + 1, 0, dminfo % comm, mpi_ierr) + if ( threadNum == 0 ) then + call MPI_Send(global_end, 1, MPI_INTEGERKIND, dminfo % my_proc_id + 1, 0, dminfo % comm, mpi_ierr) + end if + call mpas_threading_barrier() global_end = global_end - 1 #endif end if - - + + end subroutine mpas_dmpar_compute_index_range!}}} !----------------------------------------------------------------------- @@ -1104,7 +1484,7 @@ subroutine mpas_dmpar_get_exch_list(haloLayer, ownedListField, neededListField, integer :: mpi_ierr, mpi_rreq, mpi_sreq type (hashtable) :: neededHash - integer :: nUniqueNeededList + integer :: nUniqueNeededList, threadNum integer, dimension(:,:), pointer :: uniqueSortedNeededList @@ -1117,520 +1497,522 @@ subroutine mpas_dmpar_get_exch_list(haloLayer, ownedListField, neededListField, ! For the ownedListField: ! - ownedList contains a list of the global indices owned by all blocks ! - ownedListIndex contains a list of the block-local indices of the global indices owned by all blocks - ! - ownedBlock contains the local block ID associated with each index + ! - ownedBlock contains the local block ID associated with each index ! ! Example: ! ownedList := ( 21 13 15 01 05 06 33 42 44 45 ) ! Global indices from all blocks on this task ! ownedListIndex := ( 1 2 3 4 1 2 3 4 5 6 ) ! Local indices of global indices on each block ! ownedBlock := ( 1 1 1 1 2 2 2 2 2 2 ) ! Local indices of global indices on each block ! - + ! For the neededListField: ! similar to the ownedListField... dminfo => ownedListField % block % domain % dminfo - - ! - ! Determine total number of owned blocks on this task - ! - nOwnedBlocks = 0 - fieldCursor => ownedListField - do while (associated(fieldCursor)) - nOwnedBlocks = nOwnedBlocks + 1 - if(associated(fieldCursor % sendList % halos(haloLayer) % exchList)) then - call mpas_dmpar_destroy_exchange_list(fieldCursor % sendList % halos(haloLayer) % exchList) - end if - - if(associated(fieldCursor % copyList % halos(haloLayer) % exchList)) then - call mpas_dmpar_destroy_exchange_list(fieldCursor % copyList % halos(haloLayer) % exchList) - end if - fieldCursor => fieldCursor % next - end do - - ! - ! Determine total number of needed indices on this task - ! - nNeededList = 0 - nNeededBlocks = 0 - fieldCursor => neededListField - do while (associated(fieldCursor)) - nNeededBlocks = nNeededBlocks + 1 - nNeededList = nNeededList + fieldCursor % dimSizes(1) - if(associated(fieldCursor % recvList % halos(haloLayer) % exchList)) then - call mpas_dmpar_destroy_exchange_list(fieldCursor % recvList % halos(haloLayer) % exchList) - end if - - fieldCursor => fieldCursor % next - end do + threadNum = mpas_threading_get_thread_num() ! - ! Determine unique list of needed elements. + ! Determine total number of owned blocks on this task ! - nUniqueNeededList = 0 - call mpas_hash_init(neededHash) - fieldCursor => neededListField - do while (associated(fieldCursor)) - do i = 1, fieldCursor % dimSizes(1) - if(.not. mpas_hash_search(neededHash, fieldCursor % array(i))) then - nUniqueNeededList = nUniqueNeededList + 1 - call mpas_hash_insert(neededHash, fieldCursor % array(i)) + if ( threadNum == 0 ) then + nOwnedBlocks = 0 + fieldCursor => ownedListField + do while (associated(fieldCursor)) + nOwnedBlocks = nOwnedBlocks + 1 + if(associated(fieldCursor % sendList % halos(haloLayer) % exchList)) then + call mpas_dmpar_destroy_exchange_list(fieldCursor % sendList % halos(haloLayer) % exchList) end if - end do - fieldCursor => fieldCursor % next - end do - kk = mpas_hash_size(neededHash) - - nUniqueNeededList = mpas_hash_size(neededHash) - allocate(uniqueSortedNeededList(2,nUniqueNeededList)) - allocate(packingOrder(nUniqueNeededList)) - call mpas_hash_destroy(neededHash) - call mpas_hash_init(neededHash) - - j = 0 - fieldCursor => neededListField - do while (associated(fieldCursor)) - do i = 1, fieldCursor % dimSizes(1) - if(.not. mpas_hash_search(neededHash, fieldCursor % array(i))) then - j = j +1 - uniqueSortedNeededList(1, j) = fieldCursor % array(i) - uniqueSortedNeededList(2, j) = fieldCursor % block % localBlockID - call mpas_hash_insert(neededHash, fieldCursor % array(i)) + if(associated(fieldCursor % copyList % halos(haloLayer) % exchList)) then + call mpas_dmpar_destroy_exchange_list(fieldCursor % copyList % halos(haloLayer) % exchList) end if + fieldCursor => fieldCursor % next end do - fieldCursor => fieldCursor % next - end do - kk = mpas_hash_size(neededHash) - - call mpas_hash_destroy(neededHash) - call mpas_quicksort(nUniqueNeededList, uniqueSortedNeededList) + ! + ! Determine total number of needed indices on this task + ! + nNeededList = 0 + nNeededBlocks = 0 + fieldCursor => neededListField + do while (associated(fieldCursor)) + nNeededBlocks = nNeededBlocks + 1 + nNeededList = nNeededList + fieldCursor % dimSizes(1) + if(associated(fieldCursor % recvList % halos(haloLayer) % exchList)) then + call mpas_dmpar_destroy_exchange_list(fieldCursor % recvList % halos(haloLayer) % exchList) + end if - ! - ! Get list of index offsets for all blocks - ! - allocate(offsetList(nNeededBlocks)) - if (present(offsetListField)) then - offsetCursor => offsetListField - do while (associated(offsetCursor)) - offsetList(offsetCursor % block % localBlockID+1) = offsetCursor % scalar - offsetCursor => offsetCursor % next + fieldCursor => fieldCursor % next end do - else - offsetList(:) = 0 - end if - ! - ! Get list of bounds limit for owned elements - ! - allocate(ownedLimitList(nOwnedBlocks)) - if(present(ownedLimitField)) then - ownedLimitCursor => ownedLimitField - do while(associated(ownedLimitCursor)) - ownedLimitList(ownedLimitCursor % block % localBlockID+1) = ownedLimitCursor % scalar - ownedLimitCursor => ownedLimitCursor % next - end do - else - fieldCursor => ownedListField - do while(associated(fieldCursor)) - ownedLimitList(fieldCursor % block % localBlockID+1) = fieldCursor % dimSizes(1) + ! + ! Determine unique list of needed elements. + ! + nUniqueNeededList = 0 + call mpas_hash_init(neededHash) + fieldCursor => neededListField + do while (associated(fieldCursor)) + do i = 1, fieldCursor % dimSizes(1) + if(.not. mpas_hash_search(neededHash, fieldCursor % array(i))) then + nUniqueNeededList = nUniqueNeededList + 1 + call mpas_hash_insert(neededHash, fieldCursor % array(i)) + end if + end do fieldCursor => fieldCursor % next end do - end if - ! - ! Determine total number of owned indices on this task, and - ! initialize output send and recv lists for ownedListField - ! - nOwnedList = 0 - fieldCursor => ownedListField - do while (associated(fieldCursor)) - iBlock = fieldcursor % block % localBlockID + 1 - nOwnedList = nOwnedList + ownedLimitList(iBlock) - fieldCursor => fieldCursor % next - end do + kk = mpas_hash_size(neededHash) -#ifdef _MPI - ! - ! Gather list of all owned indices and their associated blocks on this task - ! - allocate(ownedList(nOwnedList)) - allocate(ownedBlock(nOwnedList)) - ownedBlock = -1 - ownedList = -1 - fieldCursor => ownedListField - i = 1 - do while (associated(fieldCursor)) - iBlock = fieldCursor % block % localBlockID + 1 - ownedList(i:i+ownedLimitList(iBlock)-1) = fieldCursor % array(1:ownedLimitList(iBlock)) - ownedBlock(i:i+ownedLimitList(iBlock)-1) = fieldCursor % block % localBlockID - i = i + ownedLimitList(iBlock) - fieldCursor => fieldCursor % next - end do + nUniqueNeededList = mpas_hash_size(neededHash) + allocate(uniqueSortedNeededList(2,nUniqueNeededList)) + allocate(packingOrder(nUniqueNeededList)) + call mpas_hash_destroy(neededHash) + call mpas_hash_init(neededHash) - ! - ! Gather list of all needed indices and their associated blocks on this task - ! - allocate(neededList(nNeededList)) - allocate(neededBlock(nNeededList)) - fieldCursor => neededListField - i = 1 - do while (associated(fieldCursor)) - neededList(i:i+fieldCursor % dimSizes(1)-1) = fieldCursor % array(:) - neededBlock(i:i+fieldCursor % dimSizes(1)-1) = fieldCursor % block % localBlockID - i = i + fieldCursor % dimSizes(1) - fieldCursor => fieldCursor % next - end do + j = 0 + fieldCursor => neededListField + do while (associated(fieldCursor)) + do i = 1, fieldCursor % dimSizes(1) + if(.not. mpas_hash_search(neededHash, fieldCursor % array(i))) then + j = j +1 + uniqueSortedNeededList(1, j) = fieldCursor % array(i) + uniqueSortedNeededList(2, j) = fieldCursor % block % localBlockID + call mpas_hash_insert(neededHash, fieldCursor % array(i)) + end if + end do + fieldCursor => fieldCursor % next + end do - ! - ! Obtain sorted list of global indices owned by this task and the associated local indices and block IDs - ! - allocate(ownedListIndex(nOwnedList)) - allocate(ownedListSorted(2,nOwnedList)) - allocate(recipientList(2,nOwnedList)) - j = 1 - k = 1 - do i=1,nOwnedList - ownedListSorted(1,i) = ownedList(i) - if (i > 1) then - if(ownedBlock(i) /= ownedBlock(i-1)) k = 1 - end if - ownedListIndex(i) = k - ownedListSorted(2,i) = j - j = j + 1 - k = k + 1 - end do - call mpas_quicksort(nOwnedList, ownedListSorted) + kk = mpas_hash_size(neededHash) - allocate(ownedBlockSorted(2,nOwnedList)) - do i=1,nOwnedList - ownedBlockSorted(1,i) = ownedList(i) - ownedBlockSorted(2,i) = ownedBlock(i) - end do - call mpas_quicksort(nOwnedList, ownedBlockSorted) + call mpas_hash_destroy(neededHash) + call mpas_quicksort(nUniqueNeededList, uniqueSortedNeededList) + ! + ! Get list of index offsets for all blocks + ! + allocate(offsetList(nNeededBlocks)) + if (present(offsetListField)) then + offsetCursor => offsetListField + do while (associated(offsetCursor)) + offsetList(offsetCursor % block % localBlockID+1) = offsetCursor % scalar + offsetCursor => offsetCursor % next + end do + else + offsetList(:) = 0 + end if - allocate(neededListIndex(nNeededList)) - j = 1 - do i=1,nNeededList - if (i > 1) then - if(neededBlock(i) /= neededBlock(i-1)) j = 1 + ! + ! Get list of bounds limit for owned elements + ! + allocate(ownedLimitList(nOwnedBlocks)) + if(present(ownedLimitField)) then + ownedLimitCursor => ownedLimitField + do while(associated(ownedLimitCursor)) + ownedLimitList(ownedLimitCursor % block % localBlockID+1) = ownedLimitCursor % scalar + ownedLimitCursor => ownedLimitCursor % next + end do + else + fieldCursor => ownedListField + do while(associated(fieldCursor)) + ownedLimitList(fieldCursor % block % localBlockID+1) = fieldCursor % dimSizes(1) + fieldCursor => fieldCursor % next + end do end if - neededListIndex(i) = j - j = j + 1 - end do - ! - ! Set totalSize to the maximum number of items in any task's needed list - ! - call MPI_Allreduce(nUniqueNeededList, totalSize, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr) - - allocate(ownerListIn(totalSize)) - allocate(ownerListOut(totalSize)) - - nMesgSend = nUniqueNeededList - nMesgRecv = nUniqueNeededList - ownerListOut(1:nUniqueNeededList) = uniqueSortedNeededList(1,1:nUniqueNeededList) - - recvNeighbor = mod(dminfo % my_proc_id + dminfo % nprocs - 1, dminfo % nprocs) - sendNeighbor = mod(dminfo % my_proc_id + 1, dminfo % nprocs) - - allocate(numToSend(nOwnedBlocks)) - allocate(numToRecv(nNeededBlocks)) - - ! Initial send of data to neighbors. - if(dminfo % nProcs == 1) then - ownerListIn = ownerListOut - else - call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr) - call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr) - call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr) - call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr) - call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr) - call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr) - call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr) - call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr) - end if - - ! - ! For each processor (not including ourself), mark the indices that we will provide to - ! that processor in ownerListOut, and build a send list for that processor if we - ! do need to send any indices - ! - do i=2, dminfo % nprocs - recipientList = -1 - numToSend(:) = 0 - totalSent = 0 - - currentProc = mod(dminfo % my_proc_id + dminfo % nprocs - i + 1, dminfo % nprocs) - do j=1,nMesgRecv - if (ownerListIn(j) > 0) then - k = mpas_binary_search(ownedListSorted, 2, 1, nOwnedList, ownerListIn(j)) - if (k <= nOwnedList) then - iBlock = ownedBlock(ownedListSorted(2,k)) + 1 - numToSend(iBlock) = numToSend(iBlock) + 1 - totalSent = totalSent + 1 - - ! recipientList(1,:) represents the index in the srcList to place this data - recipientList(1,ownedListSorted(2,k)) = numToSend(iBlock) - ! recipientList(2,:) represnets the index in the buffer to place this data - recipientList(2,ownedListSorted(2,k)) = totalSent - - ownerListOut(j) = -1 * dminfo % my_proc_id - else - ownerListOut(j) = ownerListIn(j) - end if - else - ownerListOut(j) = ownerListIn(j) - end if + ! + ! Determine total number of owned indices on this task, and + ! initialize output send and recv lists for ownedListField + ! + nOwnedList = 0 + fieldCursor => ownedListField + do while (associated(fieldCursor)) + iBlock = fieldcursor % block % localBlockID + 1 + nOwnedList = nOwnedList + ownedLimitList(iBlock) + fieldCursor => fieldCursor % next end do +#ifdef _MPI + ! + ! Gather list of all owned indices and their associated blocks on this task + ! + allocate(ownedList(nOwnedList)) + allocate(ownedBlock(nOwnedList)) + ownedBlock = -1 + ownedList = -1 fieldCursor => ownedListField + i = 1 do while (associated(fieldCursor)) iBlock = fieldCursor % block % localBlockID + 1 + ownedList(i:i+ownedLimitList(iBlock)-1) = fieldCursor % array(1:ownedLimitList(iBlock)) + ownedBlock(i:i+ownedLimitList(iBlock)-1) = fieldCursor % block % localBlockID + i = i + ownedLimitList(iBlock) + fieldCursor => fieldCursor % next + end do - if (numToSend(iBlock) > 0) then - ! Find end of send list - if(.not.associated(fieldCursor % sendList % halos(haloLayer) % exchList)) then - allocate(fieldCursor % sendList % halos(haloLayer) % exchList) - exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList - nullify(exchListPtr % next) - else - exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList - exchListPtr2 => fieldCursor % sendList % halos(haloLayer) % exchList % next - do while(associated(exchListPtr2)) - exchListPtr => exchListPtr % next - exchListPtr2 => exchListPtr % next - end do - - allocate(exchListPtr % next) - exchListPtr => exchListPtr % next - nullify(exchListPtr % next) - end if - - exchListPtr % endPointID = currentProc - exchListPtr % nlist = numToSend(iBlock) - allocate(exchListPtr % srcList(numToSend(iBlock))) - allocate(exchListPtr % destList(numToSend(iBlock))) - exchListPtr % srcList = -1 - exchListPtr % destList = -1 - - kk = 1 - do j=1,nOwnedList - if (recipientList(1,j) /= -1) then - if(ownedBlock(j) == fieldCursor % block % localBlockID) then - exchListPtr % srcList(recipientList(1,j)) = ownedListIndex(j) - exchListPtr % destList(recipientList(1,j)) = recipientList(2,j) - kk = kk + 1 - end if - end if - end do - end if - + ! + ! Gather list of all needed indices and their associated blocks on this task + ! + allocate(neededList(nNeededList)) + allocate(neededBlock(nNeededList)) + fieldCursor => neededListField + i = 1 + do while (associated(fieldCursor)) + neededList(i:i+fieldCursor % dimSizes(1)-1) = fieldCursor % array(:) + neededBlock(i:i+fieldCursor % dimSizes(1)-1) = fieldCursor % block % localBlockID + i = i + fieldCursor % dimSizes(1) fieldCursor => fieldCursor % next end do - nMesgSend = nMesgRecv - call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr) - call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr) - call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr) - call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr) - call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr) - call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr) - call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr) - call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr) - end do + ! + ! Obtain sorted list of global indices owned by this task and the associated local indices and block IDs + ! + allocate(ownedListIndex(nOwnedList)) + allocate(ownedListSorted(2,nOwnedList)) + allocate(recipientList(2,nOwnedList)) + j = 1 + k = 1 + do i=1,nOwnedList + ownedListSorted(1,i) = ownedList(i) + if (i > 1) then + if(ownedBlock(i) /= ownedBlock(i-1)) k = 1 + end if + ownedListIndex(i) = k + ownedListSorted(2,i) = j + j = j + 1 + k = k + 1 + end do + call mpas_quicksort(nOwnedList, ownedListSorted) - ! - ! With our needed list returned to us, build receive lists based on which indices were - ! marked by other tasks - ! - do i=0, dminfo % nprocs - 1 - if(i == dminfo % my_proc_id) cycle + allocate(ownedBlockSorted(2,nOwnedList)) + do i=1,nOwnedList + ownedBlockSorted(1,i) = ownedList(i) + ownedBlockSorted(2,i) = ownedBlock(i) + end do + call mpas_quicksort(nOwnedList, ownedBlockSorted) - numToRecv(:) = 0 - packingOrder = 0 - k = 0 - do j=1,nUniqueNeededList - if (ownerListIn(j) == -i) then - k = k + 1 - packingOrder(j) = k + allocate(neededListIndex(nNeededList)) + j = 1 + do i=1,nNeededList + if (i > 1) then + if(neededBlock(i) /= neededBlock(i-1)) j = 1 end if + neededListIndex(i) = j + j = j + 1 end do - fieldCursor => neededListField - do while (associated(fieldCursor)) - do j = 1, fieldCursor % dimSizes(1) - k = mpas_binary_search(uniqueSortedNeededList, 2, 1, nUniqueNeededList, fieldCursor % array(j)) - if(k <= nUniqueNeededList) then - if(ownerListIn(k) == -i) then - iBlock = fieldCursor % block % localBlockID + 1 - numToRecv(iBlock) = numToRecv(iBlock) + 1 + ! + ! Set totalSize to the maximum number of items in any task's needed list + ! + call MPI_Allreduce(nUniqueNeededList, totalSize, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr) + + allocate(ownerListIn(totalSize)) + allocate(ownerListOut(totalSize)) + + nMesgSend = nUniqueNeededList + nMesgRecv = nUniqueNeededList + ownerListOut(1:nUniqueNeededList) = uniqueSortedNeededList(1,1:nUniqueNeededList) + + recvNeighbor = mod(dminfo % my_proc_id + dminfo % nprocs - 1, dminfo % nprocs) + sendNeighbor = mod(dminfo % my_proc_id + 1, dminfo % nprocs) + + allocate(numToSend(nOwnedBlocks)) + allocate(numToRecv(nNeededBlocks)) + + ! Initial send of data to neighbors. + if(dminfo % nProcs == 1) then + ownerListIn = ownerListOut + else + call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr) + call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr) + call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr) + call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr) + call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr) + call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr) + call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr) + call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr) + end if + + ! + ! For each processor (not including ourself), mark the indices that we will provide to + ! that processor in ownerListOut, and build a send list for that processor if we + ! do need to send any indices + ! + do i=2, dminfo % nprocs + recipientList = -1 + numToSend(:) = 0 + totalSent = 0 + + currentProc = mod(dminfo % my_proc_id + dminfo % nprocs - i + 1, dminfo % nprocs) + do j=1,nMesgRecv + if (ownerListIn(j) > 0) then + k = mpas_binary_search(ownedListSorted, 2, 1, nOwnedList, ownerListIn(j)) + if (k <= nOwnedList) then + iBlock = ownedBlock(ownedListSorted(2,k)) + 1 + numToSend(iBlock) = numToSend(iBlock) + 1 + totalSent = totalSent + 1 + + ! recipientList(1,:) represents the index in the srcList to place this data + recipientList(1,ownedListSorted(2,k)) = numToSend(iBlock) + ! recipientList(2,:) represnets the index in the buffer to place this data + recipientList(2,ownedListSorted(2,k)) = totalSent + + ownerListOut(j) = -1 * dminfo % my_proc_id + else + ownerListOut(j) = ownerListIn(j) end if + else + ownerListOut(j) = ownerListIn(j) end if end do - fieldCursor => fieldCursor % next - end do - fieldCursor => neededListField - do while (associated(fieldCursor)) - iBlock = fieldCursor % block % localBlockID + 1 + fieldCursor => ownedListField + do while (associated(fieldCursor)) + iBlock = fieldCursor % block % localBlockID + 1 + + if (numToSend(iBlock) > 0) then + ! Find end of send list + if(.not.associated(fieldCursor % sendList % halos(haloLayer) % exchList)) then + allocate(fieldCursor % sendList % halos(haloLayer) % exchList) + exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList + nullify(exchListPtr % next) + else + exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList + exchListPtr2 => fieldCursor % sendList % halos(haloLayer) % exchList % next + do while(associated(exchListPtr2)) + exchListPtr => exchListPtr % next + exchListPtr2 => exchListPtr % next + end do - if (numToRecv(iBlock) > 0) then - if(.not.associated(fieldCursor % recvList % halos(haloLayer) % exchList)) then - allocate(fieldCursor % recvList % halos(haloLayer) % exchList) - exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList - nullify(exchListPtr % next) - else - ! Find end of recv list - exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList - exchListPtr2 => fieldCursor % recvList % halos(haloLayer) % exchList % next - do while(associated(exchListPtr2)) + allocate(exchListPtr % next) exchListPtr => exchListPtr % next - exchListPtr2 => exchListPtr % next - end do + nullify(exchListPtr % next) + end if - allocate(exchListPtr % next) - exchListPtr => exchListPtr % next - nullify(exchListPtr % next) + exchListPtr % endPointID = currentProc + exchListPtr % nlist = numToSend(iBlock) + allocate(exchListPtr % srcList(numToSend(iBlock))) + allocate(exchListPtr % destList(numToSend(iBlock))) + exchListPtr % srcList = -1 + exchListPtr % destList = -1 + + kk = 1 + do j=1,nOwnedList + if (recipientList(1,j) /= -1) then + if(ownedBlock(j) == fieldCursor % block % localBlockID) then + exchListPtr % srcList(recipientList(1,j)) = ownedListIndex(j) + exchListPtr % destList(recipientList(1,j)) = recipientList(2,j) + kk = kk + 1 + end if + end if + end do end if - exchListPtr % endPointID = i - exchListPtr % nlist = numToRecv(iBlock) - allocate(exchListPtr % srcList(exchListPtr % nList)) - allocate(exchListPtr % destList(exchListPtr % nList)) - exchListPtr % srcList = -1 - exchListPtr % destList = -1 + fieldCursor => fieldCursor % next + end do + + nMesgSend = nMesgRecv + call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr) + call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr) + call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr) + call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr) + call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr) + call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr) + call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr) + call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr) + end do + + ! + ! With our needed list returned to us, build receive lists based on which indices were + ! marked by other tasks + ! + do i=0, dminfo % nprocs - 1 + if(i == dminfo % my_proc_id) cycle + + numToRecv(:) = 0 + packingOrder = 0 + + k = 0 + do j=1,nUniqueNeededList + if (ownerListIn(j) == -i) then + k = k + 1 + packingOrder(j) = k + end if + end do - kk = 0 - do j=1,fieldCursor % dimSizes(1) + fieldCursor => neededListField + do while (associated(fieldCursor)) + do j = 1, fieldCursor % dimSizes(1) k = mpas_binary_search(uniqueSortedNeededList, 2, 1, nUniqueNeededList, fieldCursor % array(j)) if(k <= nUniqueNeededList) then - if (ownerListIn(k) == -i) then - kk = kk + 1 - exchListPtr % srcList(kk) = packingOrder(k) - exchListPtr % destList(kk) = j + offsetList(iBlock) + if(ownerListIn(k) == -i) then + iBlock = fieldCursor % block % localBlockID + 1 + numToRecv(iBlock) = numToRecv(iBlock) + 1 end if end if end do - end if + fieldCursor => fieldCursor % next + end do - fieldCursor => fieldCursor % next - end do - end do + fieldCursor => neededListField + do while (associated(fieldCursor)) + iBlock = fieldCursor % block % localBlockID + 1 + + if (numToRecv(iBlock) > 0) then + if(.not.associated(fieldCursor % recvList % halos(haloLayer) % exchList)) then + allocate(fieldCursor % recvList % halos(haloLayer) % exchList) + exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList + nullify(exchListPtr % next) + else + ! Find end of recv list + exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList + exchListPtr2 => fieldCursor % recvList % halos(haloLayer) % exchList % next + do while(associated(exchListPtr2)) + exchListPtr => exchListPtr % next + exchListPtr2 => exchListPtr % next + end do - ! - ! Free up memory - ! - deallocate(numToSend) - deallocate(numToRecv) - deallocate(neededList) - deallocate(neededListIndex) - deallocate(neededBlock) + allocate(exchListPtr % next) + exchListPtr => exchListPtr % next + nullify(exchListPtr % next) + end if + + exchListPtr % endPointID = i + exchListPtr % nlist = numToRecv(iBlock) + allocate(exchListPtr % srcList(exchListPtr % nList)) + allocate(exchListPtr % destList(exchListPtr % nList)) + exchListPtr % srcList = -1 + exchListPtr % destList = -1 + + kk = 0 + do j=1,fieldCursor % dimSizes(1) + k = mpas_binary_search(uniqueSortedNeededList, 2, 1, nUniqueNeededList, fieldCursor % array(j)) + if(k <= nUniqueNeededList) then + if (ownerListIn(k) == -i) then + kk = kk + 1 + exchListPtr % srcList(kk) = packingOrder(k) + exchListPtr % destList(kk) = j + offsetList(iBlock) + end if + end if + end do + end if - deallocate(ownedList) - deallocate(ownedListIndex) - deallocate(ownedBlock) - deallocate(ownedListSorted) - deallocate(ownedBlockSorted) + fieldCursor => fieldCursor % next + end do + end do + + ! + ! Free up memory + ! + deallocate(numToSend) + deallocate(numToRecv) + deallocate(neededList) + deallocate(neededListIndex) + deallocate(neededBlock) + + deallocate(ownedList) + deallocate(ownedListIndex) + deallocate(ownedBlock) + deallocate(ownedListSorted) + deallocate(ownedBlockSorted) - deallocate(recipientList) + deallocate(recipientList) - deallocate(ownerListIn) - deallocate(ownerListOut) + deallocate(ownerListIn) + deallocate(ownerListOut) - deallocate(uniqueSortedNeededList) - deallocate(packingOrder) + deallocate(uniqueSortedNeededList) + deallocate(packingOrder) #endif - ! Build Copy Lists - allocate(numToSend(1)) - fieldCursor => ownedListField - do while (associated(fieldCursor)) - iBlock = fieldCursor % block % localBlockID + 1 - nOwnedList = ownedLimitList(iBlock) - allocate(ownedListSorted(2, nOwnedList)) - allocate(recipientList(2, nOwnedList)) - - do i = 1, nOwnedList - ownedListSorted(1, i) = fieldCursor % array(i) - ownedListSorted(2, i) = i - end do - - call mpas_quicksort(nOwnedList, ownedListSorted) + ! Build Copy Lists + allocate(numToSend(1)) + fieldCursor => ownedListField + do while (associated(fieldCursor)) + iBlock = fieldCursor % block % localBlockID + 1 + nOwnedList = ownedLimitList(iBlock) + allocate(ownedListSorted(2, nOwnedList)) + allocate(recipientList(2, nOwnedList)) - fieldCursor2 => neededListField - do while(associated(fieldCursor2)) - if(associated(fieldCursor, fieldCursor2)) then - fieldCursor2 => fieldCursor2 % next - cycle - end if + do i = 1, nOwnedList + ownedListSorted(1, i) = fieldCursor % array(i) + ownedListSorted(2, i) = i + end do - numToSend = 0 - recipientList = -1 + call mpas_quicksort(nOwnedList, ownedListSorted) - do i = 1, fieldCursor2 % dimSizes(1) - k = mpas_binary_search(ownedListSorted, 2, 1, nOwnedList, fieldCursor2 % array(i)) - if (k <= nOwnedList) then - numToSend(1) = numToSend(1) + 1 - ! recipientList(1,:) represents the needed block id - recipientList(1,ownedListSorted(2,k)) = fieldCursor2 % block % localBlockID - ! recipientList(2,:) represnets the index in the buffer to place this data - recipientList(2,ownedListSorted(2,k)) = i + fieldCursor2 => neededListField + do while(associated(fieldCursor2)) + if(associated(fieldCursor, fieldCursor2)) then + fieldCursor2 => fieldCursor2 % next + cycle end if - end do - - if(numToSend(1) > 0) then - if(.not.associated(fieldCursor % copyList % halos(haloLayer) % exchList)) then - allocate(fieldCursor % copyList % halos(haloLayer) % exchList) - exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList - nullify(exchListPtr % next) - else - ! Find end of copy list - exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList - exchListPtr2 => fieldCursor % copyList % halos(haloLayer) % exchList % next - do while(associated(exchListPtr2)) + + numToSend = 0 + recipientList = -1 + + do i = 1, fieldCursor2 % dimSizes(1) + k = mpas_binary_search(ownedListSorted, 2, 1, nOwnedList, fieldCursor2 % array(i)) + if (k <= nOwnedList) then + numToSend(1) = numToSend(1) + 1 + ! recipientList(1,:) represents the needed block id + recipientList(1,ownedListSorted(2,k)) = fieldCursor2 % block % localBlockID + ! recipientList(2,:) represnets the index in the buffer to place this data + recipientList(2,ownedListSorted(2,k)) = i + end if + end do + + if(numToSend(1) > 0) then + if(.not.associated(fieldCursor % copyList % halos(haloLayer) % exchList)) then + allocate(fieldCursor % copyList % halos(haloLayer) % exchList) + exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList + nullify(exchListPtr % next) + else + ! Find end of copy list + exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList + exchListPtr2 => fieldCursor % copyList % halos(haloLayer) % exchList % next + do while(associated(exchListPtr2)) + exchListPtr => exchListPtr % next + exchListPtr2 => exchListPtr % next + end do + + allocate(exchListPtr % next) exchListPtr => exchListPtr % next - exchListPtr2 => exchListPtr % next - end do + nullify(exchListPtr % next) + end if - allocate(exchListPtr % next) - exchListPtr => exchListPtr % next - nullify(exchListPtr % next) + exchListPtr % endPointID = fieldCursor2 % block % localBlockID + exchListPtr % nlist = numToSend(1) + allocate(exchListPtr % srcList(numToSend(1))) + allocate(exchListPtr % destList(numToSend(1))) + exchListPtr % srcList = -1 + exchListPtr % destList = -1 + + kk = 1 + do j=1,nOwnedList + if(recipientList(1,j) == fieldCursor2 % block % localBlockID) then + exchListPtr % srcList(kk) = j + exchListPtr % destList(kk) = recipientList(2,j) + offSetList(fieldCursor2 % block % localBlockID+1) + kk = kk + 1 + end if + end do end if - - exchListPtr % endPointID = fieldCursor2 % block % localBlockID - exchListPtr % nlist = numToSend(1) - allocate(exchListPtr % srcList(numToSend(1))) - allocate(exchListPtr % destList(numToSend(1))) - exchListPtr % srcList = -1 - exchListPtr % destList = -1 - - kk = 1 - do j=1,nOwnedList - if(recipientList(1,j) == fieldCursor2 % block % localBlockID) then - exchListPtr % srcList(kk) = j - exchListPtr % destList(kk) = recipientList(2,j) + offSetList(fieldCursor2 % block % localBlockID+1) - kk = kk + 1 - end if - end do - end if - fieldCursor2 => fieldCursor2 % next - end do + fieldCursor2 => fieldCursor2 % next + end do - deallocate(recipientList) - deallocate(ownedListSorted) - fieldCursor => fieldCursor % next - end do - deallocate(numToSend) - deallocate(offSetList) - deallocate(ownedLimitList) + deallocate(recipientList) + deallocate(ownedListSorted) + fieldCursor => fieldCursor % next + end do + deallocate(numToSend) + deallocate(offSetList) + deallocate(ownedLimitList) + end if end subroutine mpas_dmpar_get_exch_list!}}} - !*********************************************************************** ! ! routine mpas_dmpar_build_comm_lists @@ -1655,7 +2037,7 @@ end subroutine mpas_dmpar_get_exch_list!}}} !> has been made public, so cores have access to it. ! !----------------------------------------------------------------------- - subroutine mpas_dmpar_build_comm_lists(sendExchList, recvExchList, haloLayers, dimsizes, sendCommList, recvCommList) + subroutine mpas_dmpar_build_comm_lists(sendExchList, recvExchList, haloLayers, dimsizes, sendCommList, recvCommList)!{{{ !----------------------------------------------------------------- ! input variables @@ -1691,149 +2073,152 @@ subroutine mpas_dmpar_build_comm_lists(sendExchList, recvExchList, haloLayers, d integer :: nHaloLayers integer :: iDimen integer :: dimSizeProduct ! the product of the size of all dimensions + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - ! We only need the product of all dimension sizes (except the last), so calculate that now - dimSizeProduct = 1 - do iDimen = 1, size(dimsizes) - 1 - dimSizeProduct = dimSizeProduct * dimsizes(iDimen) - enddo - - ! Get size of haloLayers array - nHaloLayers = size(haloLayers) - - ! Allocate communication lists, and setup dead header node. - allocate(sendCommList) - nullify(sendCommList % next) - sendCommList % procID = -1 - sendCommList % nList = 0 - - allocate(recvCommList) - nullify(recvCommList % next) - recvCommList % procID = -1 - recvCommList % nList = 0 - - - ! Determine size of buffers for communication lists - sendListCursor => sendExchList - recvListCursor => recvExchList ! We need to traverse the send and recv exchange lists together in this loop - do while(associated(sendListCursor)) - - ! Need to aggregate across halo layers - do iHalo = 1, nHaloLayers + if ( threadNum == 0 ) then + ! We only need the product of all dimension sizes (except the last), so calculate that now + dimSizeProduct = 1 + do iDimen = 1, size(dimsizes) - 1 + dimSizeProduct = dimSizeProduct * dimsizes(iDimen) + enddo - ! Determine size from send lists & build the send list - exchListPtr => sendListCursor % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) ! loop through items representing different endPoint Id's - comm_list_found = .false. - - commListPtr => sendCommList - do while(associated(commListPtr)) ! Loop through items representing different procs being sent to - if(commListPtr % procID == exchListPtr % endPointId) then - comm_list_found = .true. - commListPtr % nList = commListPtr % nList + exchListPtr % nList * dimSizeProduct - exit - end if - - commListPtr => commListPtr % next - end do - - if(.not. comm_list_found) then ! Add an item to the sendCommList for this endpoint - commListPtr => sendCommList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do + ! Get size of haloLayers array + nHaloLayers = size(haloLayers) - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - commListPtr % procID = exchListPtr % endPointID - commListPtr % nList = exchListPtr % nList * dimSizeProduct - end if + ! Allocate communication lists, and setup dead header node. + allocate(sendCommList) + nullify(sendCommList % next) + sendCommList % procID = -1 + sendCommList % nList = 0 - exchListPtr => exchListPtr % next - end do + allocate(recvCommList) + nullify(recvCommList % next) + recvCommList % procID = -1 + recvCommList % nList = 0 - ! Setup recv lists - exchListPtr => recvListCursor % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - commListPtr => recvCommList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointId) then - comm_list_found = .true. - commListPtr % nList = commListPtr % nList + exchListPtr % nList * dimSizeProduct - exit - end if + ! Determine size of buffers for communication lists + sendListCursor => sendExchList + recvListCursor => recvExchList ! We need to traverse the send and recv exchange lists together in this loop + do while(associated(sendListCursor)) - commListPtr => commListPtr % next - end do + ! Need to aggregate across halo layers + do iHalo = 1, nHaloLayers - if(.not. comm_list_found) then - commListPtr => recvCommList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do + ! Determine size from send lists & build the send list + exchListPtr => sendListCursor % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) ! loop through items representing different endPoint Id's + comm_list_found = .false. - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - commListPtr % procID = exchListPtr % endPointID - commListPtr % nList = exchListPtr % nList * dimSizeProduct - end if + commListPtr => sendCommList + do while(associated(commListPtr)) ! Loop through items representing different procs being sent to + if(commListPtr % procID == exchListPtr % endPointId) then + comm_list_found = .true. + commListPtr % nList = commListPtr % nList + exchListPtr % nList * dimSizeProduct + exit + end if - exchListPtr => exchListPtr % next - end do - end do ! halo loop + commListPtr => commListPtr % next + end do - sendListCursor => sendListCursor % next ! Advance to next block (only happens if more than 1 block per proc) - recvListCursor => recvListCursor % next ! Advance to next block (only happens if more than 1 block per proc) - ! We need to traverse the send and recv exchange lists together in this loop (since we cannot traverse the field itself) - end do ! sendListCursor (block loop) + if(.not. comm_list_found) then ! Add an item to the sendCommList for this endpoint + commListPtr => sendCommList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next + end do - ! Remove the dead head pointer on send and recv list - commListPtr => sendCommList - sendCommList => sendCommList % next - deallocate(commListPtr) + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + commListPtr % procID = exchListPtr % endPointID + commListPtr % nList = exchListPtr % nList * dimSizeProduct + end if + + exchListPtr => exchListPtr % next + end do + + ! Setup recv lists + exchListPtr => recvListCursor % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + comm_list_found = .false. + + commListPtr => recvCommList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointId) then + comm_list_found = .true. + commListPtr % nList = commListPtr % nList + exchListPtr % nList * dimSizeProduct + exit + end if - commListPtr => recvCommList - recvCommList => recvCommList % next - deallocate(commListPtr) + commListPtr => commListPtr % next + end do - ! Determine size of receive lists - commListPtr => recvCommList - do while(associated(commListPtr)) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 + if(.not. comm_list_found) then + commListPtr => recvCommList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next + end do - recvListCursor => recvExchList - do while(associated(recvListCursor)) - exchListPtr => recvListCursor % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - nAdded = max(nAdded, maxval(exchListPtr % srcList) * dimSizeProduct) - end if - exchListPtr => exchListPtr % next - end do + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + commListPtr % procID = exchListPtr % endPointID + commListPtr % nList = exchListPtr % nList * dimSizeProduct + end if + + exchListPtr => exchListPtr % next + end do + end do ! halo loop + + sendListCursor => sendListCursor % next ! Advance to next block (only happens if more than 1 block per proc) + recvListCursor => recvListCursor % next ! Advance to next block (only happens if more than 1 block per proc) + ! We need to traverse the send and recv exchange lists together in this loop (since we cannot traverse the field itself) + end do ! sendListCursor (block loop) + + ! Remove the dead head pointer on send and recv list + commListPtr => sendCommList + sendCommList => sendCommList % next + deallocate(commListPtr) + + commListPtr => recvCommList + recvCommList => recvCommList % next + deallocate(commListPtr) + + ! Determine size of receive lists + commListPtr => recvCommList + do while(associated(commListPtr)) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + + recvListCursor => recvExchList + do while(associated(recvListCursor)) + exchListPtr => recvListCursor % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + nAdded = max(nAdded, maxval(exchListPtr % srcList) * dimSizeProduct) + end if + exchListPtr => exchListPtr % next + end do - recvListCursor => recvListCursor % next - end do - bufferOffset = bufferOffset + nAdded - end do - commListPtr % nList = bufferOffset + recvListCursor => recvListCursor % next + end do + bufferOffset = bufferOffset + nAdded + end do + commListPtr % nList = bufferOffset - commListPtr => commListPtr % next - end do ! commListPtr + commListPtr => commListPtr % next + end do ! commListPtr + end if !-------------------------------------------------------------------- - end subroutine mpas_dmpar_build_comm_lists - + end subroutine mpas_dmpar_build_comm_lists!}}} !----------------------------------------------------------------------- @@ -1865,264 +2250,267 @@ subroutine mpas_dmpar_alltoall_field1d_integer(fieldIn, fieldout, haloLayersIn)! integer :: nAdded, bufferOffset integer :: mpi_ierr integer :: iHalo, iBuffer, i - integer :: nHaloLayers + integer :: nHaloLayers, threadNum integer, dimension(:), pointer :: haloLayers + threadNum = mpas_threading_get_thread_num() dminfo => fieldIn % block % domain % dminfo - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(fieldIn % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo - end do - end if + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) + end do + else + nHaloLayers = size(fieldIn % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if #ifdef _MPI - nullify(sendList) - nullify(recvList) - - ! Setup receive lists. - do iHalo = 1, nHaloLayers - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => recvList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next - end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(recvList)) then - allocate(recvList) - nullify(recvList % next) - commListPtr => recvList - else - commListPtr => recvList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - - commListPtr % procID = exchListPtr % endPointID - commListPtr % nList = 0 - end if - - exchListPtr => exchListPtr % next - end do - - fieldOutPtr => fieldOutPtr % next - end do - end do + nullify(sendList) + nullify(recvList) - ! Determine size of receive list buffers. - commListPtr => recvList - do while(associated(commListPtr)) - bufferOffset = 0 + ! Setup receive lists. do iHalo = 1, nHaloLayers - nAdded = 0 fieldOutPtr => fieldOut do while(associated(fieldOutPtr)) exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - nAdded = max(nAdded, maxval(exchListPtr % srcList)) + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => recvList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(recvList)) then + allocate(recvList) + nullify(recvList % next) + commListPtr => recvList + else + commListPtr => recvList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next + end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + + commListPtr % procID = exchListPtr % endPointID + commListPtr % nList = 0 end if + exchListPtr => exchListPtr % next end do - + fieldOutPtr => fieldOutPtr % next end do - bufferOffset = bufferOffset + nAdded end do - commListPtr % nList = nAdded - - commListPtr => commListPtr % next - end do - - ! Allocate buffers for receives, and initiate mpi_irecv calls. - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % ibuffer(commListPtr % nList)) - nullify(commListPtr % rbuffer) - commListPtr % ibuffer = 0 - call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do - - ! Setup send lists, and determine the size of their buffers. - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => sendList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - commListPtr % nList = commListPtr % nList + exchListPtr % nList - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next + + ! Determine size of receive list buffers. + commListPtr => recvList + do while(associated(commListPtr)) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + nAdded = max(nAdded, maxval(exchListPtr % srcList)) + end if + exchListPtr => exchListPtr % next + end do + + fieldOutPtr => fieldOutPtr % next end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(sendList)) then - allocate(sendList) - nullify(sendList % next) - commListPtr => sendList - else - commListPtr => sendList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - commListPtr % procID = exchListPtr % endPointID - commListPtr % nList = exchListPtr % nList - end if - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - - fieldInPtr => fieldInPtr % next + commListPtr % nList = nAdded + + commListPtr => commListPtr % next + end do + + ! Allocate buffers for receives, and initiate mpi_irecv calls. + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % ibuffer(commListPtr % nList)) + nullify(commListPtr % rbuffer) + commListPtr % ibuffer = 0 + call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + commListPtr => commListPtr % next end do - end do - - ! Allocate sendLists, copy data into buffer, and initiate mpi_isends - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % ibuffer(commListPtr % nList)) - nullify(commListPtr % rbuffer) - bufferOffset = 0 + + ! Setup send lists, and determine the size of their buffers. do iHalo = 1, nHaloLayers - nAdded = 0 fieldInPtr => fieldIn do while(associated(fieldInPtr)) exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - iBuffer = exchListPtr % destList(i) + bufferOffset - commListPtr % ibuffer(iBuffer) = fieldInPtr % array(exchListPtr % srcList(i)) - nAdded = nAdded + 1 - end do + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => sendList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + commListPtr % nList = commListPtr % nList + exchListPtr % nList + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(sendList)) then + allocate(sendList) + nullify(sendList % next) + commListPtr => sendList + else + commListPtr => sendList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next + end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + commListPtr % procID = exchListPtr % endPointID + commListPtr % nList = exchListPtr % nList end if - + exchListPtr => exchListPtr % next end do - + fieldInPtr => fieldInPtr % next end do - bufferOffset = bufferOffset + nAdded end do - call MPI_Isend(commListPtr % ibuffer, commListPtr % nlist, MPI_INTEGERKIND, & - commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) - - commListPtr => commListPtr % next - end do + ! Allocate sendLists, copy data into buffer, and initiate mpi_isends + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % ibuffer(commListPtr % nList)) + nullify(commListPtr % rbuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + iBuffer = exchListPtr % destList(i) + bufferOffset + commListPtr % ibuffer(iBuffer) = fieldInPtr % array(exchListPtr % srcList(i)) + nAdded = nAdded + 1 + end do + end if -#endif + exchListPtr => exchListPtr % next + end do - ! Handle Local Copies. Only local copies if no MPI - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldOutPtr % array(exchListPtr % destList(i)) = fieldInPtr % array(exchListPtr % srcList(i)) - end do - end if - fieldOutPtr => fieldOutPtr % next + fieldInPtr => fieldInPtr % next end do - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - fieldInPtr => fieldInPtr % next + + call MPI_Isend(commListPtr % ibuffer, commListPtr % nlist, MPI_INTEGERKIND, & + commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + + commListPtr => commListPtr % next end do - end do -#ifdef _MPI - ! Wait for MPI_Irecv's to finish, and unpack data. - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) +#endif - bufferOffset = 0 + ! Handle Local Copies. Only local copies if no MPI do iHalo = 1, nHaloLayers - nAdded = 0 - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - iBuffer = exchListPtr % srcList(i) + bufferOffset - fieldOutPtr % array(exchListPtr % destList(i)) = commListPtr % ibuffer(iBuffer) - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList)) - end if + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldOutPtr % array(exchListPtr % destList(i)) = fieldInPtr % array(exchListPtr % srcList(i)) + end do + end if + fieldOutPtr => fieldOutPtr % next + end do + exchListPtr => exchListPtr % next end do - - fieldOutPtr => fieldOutPtr % next + fieldInPtr => fieldInPtr % next end do - bufferOffset = bufferOffset + nAdded end do - commListPtr => commListPtr % next - end do - - ! Wait for MPI_Isend's to finish. - commListPtr => sendList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - commListPtr => commListPtr % next - end do +#ifdef _MPI + ! Wait for MPI_Irecv's to finish, and unpack data. + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + iBuffer = exchListPtr % srcList(i) + bufferOffset + fieldOutPtr % array(exchListPtr % destList(i)) = commListPtr % ibuffer(iBuffer) + end do + nAdded = max(nAdded, maxval(exchListPtr % srcList)) + end if + exchListPtr => exchListPtr % next + end do + + fieldOutPtr => fieldOutPtr % next + end do + bufferOffset = bufferOffset + nAdded + end do - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) + commListPtr => commListPtr % next + end do + + ! Wait for MPI_Isend's to finish. + commListPtr => sendList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + commListPtr => commListPtr % next + end do + + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) #endif - deallocate(haloLayers) + deallocate(haloLayers) + end if end subroutine mpas_dmpar_alltoall_field1d_integer!}}} @@ -2155,265 +2543,269 @@ subroutine mpas_dmpar_alltoall_field2d_integer(fieldIn, fieldout, haloLayersIn)! integer :: nAdded, bufferOffset integer :: mpi_ierr integer :: iHalo, iBuffer, i, j - integer :: nHaloLayers + integer :: nHaloLayers, threadNum integer, dimension(:), pointer :: haloLayers dminfo => fieldIn % block % domain % dminfo - - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(fieldIn % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo - end do - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) + end do + else + nHaloLayers = size(fieldIn % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if #ifdef _MPI - nullify(sendList) - nullify(recvList) - - ! Setup receive lists - do iHalo = 1, nHaloLayers - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => recvList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next - end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(recvList)) then - allocate(recvList) - nullify(recvList % next) - commListPtr => recvList - else - commListPtr => recvList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - - commListPtr % procID = exchListPtr % endPointID - end if - - exchListPtr => exchListPtr % next - end do - - fieldOutPtr => fieldOutPtr % next - end do - end do + nullify(sendList) + nullify(recvList) - ! Determine size of receive list buffers. - commListPtr => recvList - do while(associated(commListPtr)) - bufferOffset = 0 + ! Setup receive lists do iHalo = 1, nHaloLayers - nAdded = 0 fieldOutPtr => fieldOut do while(associated(fieldOutPtr)) exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1)) + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => recvList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(recvList)) then + allocate(recvList) + nullify(recvList % next) + commListPtr => recvList + else + commListPtr => recvList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next + end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + + commListPtr % procID = exchListPtr % endPointID end if + exchListPtr => exchListPtr % next end do - + fieldOutPtr => fieldOutPtr % next end do - bufferOffset = bufferOffset + nAdded end do - commListPtr % nList = bufferOffset - - commListPtr => commListPtr % next - end do - - ! Allocate buffers for receives, and initiate mpi_irecv calls. - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % ibuffer(commListPtr % nList)) - nullify(commListPtr % rbuffer) - call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do - - ! Setup send lists, and determine the size of their buffers. - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => sendList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next + + ! Determine size of receive list buffers. + commListPtr => recvList + do while(associated(commListPtr)) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1)) + end if + exchListPtr => exchListPtr % next + end do + + fieldOutPtr => fieldOutPtr % next end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(sendList)) then - allocate(sendList) - nullify(sendList % next) - commListPtr => sendList - else - commListPtr => sendList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - commListPtr % procID = exchListPtr % endPointID - commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) - end if - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - - fieldInPtr => fieldInPtr % next + commListPtr % nList = bufferOffset + + commListPtr => commListPtr % next + end do + + ! Allocate buffers for receives, and initiate mpi_irecv calls. + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % ibuffer(commListPtr % nList)) + nullify(commListPtr % rbuffer) + call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + commListPtr => commListPtr % next end do - end do - - ! Allocate sendLists, copy data into buffer, and initiate mpi_isends - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % ibuffer(commListPtr % nList)) - nullify(commListPtr % rbuffer) - bufferOffset = 0 + + ! Setup send lists, and determine the size of their buffers. do iHalo = 1, nHaloLayers - nAdded = 0 fieldInPtr => fieldIn do while(associated(fieldInPtr)) exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldInPtr % dimSizes(1) - iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) + j + bufferOffset - commListPtr % ibuffer(iBuffer) = fieldInPtr % array(j, exchListPtr % srcList(i)) - nAdded = nAdded + 1 + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => sendList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(sendList)) then + allocate(sendList) + nullify(sendList % next) + commListPtr => sendList + else + commListPtr => sendList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next end do - end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + commListPtr % procID = exchListPtr % endPointID + commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) end if - + exchListPtr => exchListPtr % next end do - + fieldInPtr => fieldInPtr % next end do - bufferOffset = bufferOffset + nAdded end do - call MPI_Isend(commListPtr % ibuffer, commListPtr % nlist, MPI_INTEGERKIND, & - commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + ! Allocate sendLists, copy data into buffer, and initiate mpi_isends + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % ibuffer(commListPtr % nList)) + nullify(commListPtr % rbuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldInPtr % dimSizes(1) + iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) + j + bufferOffset + commListPtr % ibuffer(iBuffer) = fieldInPtr % array(j, exchListPtr % srcList(i)) + nAdded = nAdded + 1 + end do + end do + end if - commListPtr => commListPtr % next - end do -#endif + exchListPtr => exchListPtr % next + end do - ! Handle Local Copies. Only local copies if no MPI - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldOutPtr % array(:, exchListPtr % destList(i)) = fieldInPtr % array(:, exchListPtr % srcList(i)) - end do - end if - fieldOutPtr => fieldOutPtr % next + fieldInPtr => fieldInPtr % next end do - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - fieldInPtr => fieldInPtr % next + + call MPI_Isend(commListPtr % ibuffer, commListPtr % nlist, MPI_INTEGERKIND, & + commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + + commListPtr => commListPtr % next end do - end do -#ifdef _MPI - ! Wait for MPI_Irecv's to finish, and unpack data. - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) +#endif - bufferOffset = 0 + ! Handle Local Copies. Only local copies if no MPI do iHalo = 1, nHaloLayers - nAdded = 0 - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldOutPtr % dimSizes(1) - iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(1) + j + bufferOffset - fieldOutPtr % array(j, exchListPtr % destList(i)) = commListPtr % ibuffer(iBuffer) + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldOutPtr % array(:, exchListPtr % destList(i)) = fieldInPtr % array(:, exchListPtr % srcList(i)) end do - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1)) - end if + end if + fieldOutPtr => fieldOutPtr % next + end do + exchListPtr => exchListPtr % next end do - - fieldOutPtr => fieldOutPtr % next + fieldInPtr => fieldInPtr % next end do - bufferOffset = bufferOffset + nAdded end do - commListPtr => commListPtr % next - end do +#ifdef _MPI + ! Wait for MPI_Irecv's to finish, and unpack data. + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldOutPtr % dimSizes(1) + iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(1) + j + bufferOffset + fieldOutPtr % array(j, exchListPtr % destList(i)) = commListPtr % ibuffer(iBuffer) + end do + end do + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1)) + end if + exchListPtr => exchListPtr % next + end do + + fieldOutPtr => fieldOutPtr % next + end do + bufferOffset = bufferOffset + nAdded + end do + + commListPtr => commListPtr % next + end do - ! Wait for MPI_Isend's to finish. - commListPtr => sendList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - commListPtr => commListPtr % next - end do + ! Wait for MPI_Isend's to finish. + commListPtr => sendList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + commListPtr => commListPtr % next + end do - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) #endif - deallocate(haloLayers) + deallocate(haloLayers) + end if end subroutine mpas_dmpar_alltoall_field2d_integer!}}} @@ -2446,272 +2838,275 @@ subroutine mpas_dmpar_alltoall_field3d_integer(fieldIn, fieldout, haloLayersIn)! integer :: nAdded, bufferOffset integer :: mpi_ierr integer :: iHalo, iBuffer, i, j, k - integer :: nHaloLayers + integer :: nHaloLayers, threadNum integer, dimension(:), pointer :: haloLayers dminfo => fieldIn % block % domain % dminfo - - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(fieldIn % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo - end do - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) + end do + else + nHaloLayers = size(fieldIn % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if #ifdef _MPI - nullify(sendList) - nullify(recvList) - - ! Setup receive lists. - do iHalo = 1, nHaloLayers - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => recvList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next - end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(recvList)) then - allocate(recvList) - nullify(recvList % next) - commListPtr => recvList - else - commListPtr => recvList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - - commListPtr % procID = exchListPtr % endPointID - commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) - end if + nullify(sendList) + nullify(recvList) - exchListPtr => exchListPtr % next - end do - - fieldOutPtr => fieldOutPtr % next - end do - end do - - ! Determine size of receive list buffers - commListPtr => recvList - do while(associated(commListPtr)) - bufferOffset = 0 + ! Setup receive lists. do iHalo = 1, nHaloLayers - nAdded = 0 fieldOutPtr => fieldOut do while(associated(fieldOutPtr)) exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)) + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => recvList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(recvList)) then + allocate(recvList) + nullify(recvList % next) + commListPtr => recvList + else + commListPtr => recvList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next + end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + + commListPtr % procID = exchListPtr % endPointID + commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) end if + exchListPtr => exchListPtr % next end do - + fieldOutPtr => fieldOutPtr % next end do - bufferOffset = bufferOffset + nAdded end do - commListPtr % nList = nAdded - - commListPtr => commListPtr % next - end do - - ! Allocate buffers for receives, and initiate mpi_irecv calls. - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % ibuffer(commListPtr % nList)) - nullify(commListPtr % rbuffer) - call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do - - ! Setup send lists, and determine the size of their buffers. - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => sendList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next + + ! Determine size of receive list buffers + commListPtr => recvList + do while(associated(commListPtr)) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)) + end if + exchListPtr => exchListPtr % next + end do + + fieldOutPtr => fieldOutPtr % next end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(sendList)) then - allocate(sendList) - nullify(sendList % next) - commListPtr => sendList - else - commListPtr => sendList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - commListPtr % procID = exchListPtr % endPointID - commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) - end if - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - - fieldInPtr => fieldInPtr % next + commListPtr % nList = nAdded + + commListPtr => commListPtr % next + end do + + ! Allocate buffers for receives, and initiate mpi_irecv calls. + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % ibuffer(commListPtr % nList)) + nullify(commListPtr % rbuffer) + call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + commListPtr => commListPtr % next end do - end do - - ! Allocate sendLists, copy data into buffer, and initiate mpi_isends - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % ibuffer(commListPtr % nList)) - nullify(commListPtr % rbuffer) - bufferOffset = 0 + + ! Setup send lists, and determine the size of their buffers. do iHalo = 1, nHaloLayers - nAdded = 0 fieldInPtr => fieldIn do while(associated(fieldInPtr)) exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldInPtr % dimSizes(2) - do k = 1, fieldInPtr % dimSizes(1) - iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) + (j-1) * fieldInPtr % dimSizes(1) + k + bufferOffset - commListPtr % ibuffer(iBuffer) = fieldInPtr % array(k, j, exchListPtr % srcList(i)) - nAdded = nAdded + 1 - end do + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => sendList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(sendList)) then + allocate(sendList) + nullify(sendList % next) + commListPtr => sendList + else + commListPtr => sendList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next end do - end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + commListPtr % procID = exchListPtr % endPointID + commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) end if - + exchListPtr => exchListPtr % next end do - + fieldInPtr => fieldInPtr % next end do - bufferOffset = bufferOffset + nAdded end do - call MPI_Isend(commListPtr % ibuffer, commListPtr % nlist, MPI_INTEGERKIND, & - commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) - - commListPtr => commListPtr % next - end do + ! Allocate sendLists, copy data into buffer, and initiate mpi_isends + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % ibuffer(commListPtr % nList)) + nullify(commListPtr % rbuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldInPtr % dimSizes(2) + do k = 1, fieldInPtr % dimSizes(1) + iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) + (j-1) * fieldInPtr % dimSizes(1) + k + bufferOffset + commListPtr % ibuffer(iBuffer) = fieldInPtr % array(k, j, exchListPtr % srcList(i)) + nAdded = nAdded + 1 + end do + end do + end do + end if -#endif + exchListPtr => exchListPtr % next + end do - ! Handle Local Copies. Only local copies if no MPI - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldOutPtr % array(:, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, exchListPtr % srcList(i)) - end do - end if - fieldOutPtr => fieldOutPtr % next + fieldInPtr => fieldInPtr % next end do - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - fieldInPtr => fieldInPtr % next + + call MPI_Isend(commListPtr % ibuffer, commListPtr % nlist, MPI_INTEGERKIND, & + commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + + commListPtr => commListPtr % next end do - end do -#ifdef _MPI - ! Wait for MPI_Irecv's to finish, and unpack data. - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) +#endif - bufferOffset = 0 + ! Handle Local Copies. Only local copies if no MPI do iHalo = 1, nHaloLayers - nAdded = 0 - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldOutPtr % dimSizes(2) - do k = 1, fieldOutPtr % dimSizes(1) - iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1) + (j-1) * fieldOutPtr % dimSizes(1) + k + bufferOffset - fieldOutPtr % array(k, j, exchListPtr % destList(i)) = commListPtr % ibuffer(iBuffer) - end do + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldOutPtr % array(:, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, exchListPtr % srcList(i)) end do - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)) - end if + end if + fieldOutPtr => fieldOutPtr % next + end do + exchListPtr => exchListPtr % next end do - - fieldOutPtr => fieldOutPtr % next + fieldInPtr => fieldInPtr % next end do - bufferOffset = bufferOffset + nAdded end do - commListPtr => commListPtr % next - end do +#ifdef _MPI + ! Wait for MPI_Irecv's to finish, and unpack data. + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldOutPtr % dimSizes(2) + do k = 1, fieldOutPtr % dimSizes(1) + iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1) + (j-1) * fieldOutPtr % dimSizes(1) + k + bufferOffset + fieldOutPtr % array(k, j, exchListPtr % destList(i)) = commListPtr % ibuffer(iBuffer) + end do + end do + end do + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)) + end if + exchListPtr => exchListPtr % next + end do + + fieldOutPtr => fieldOutPtr % next + end do + bufferOffset = bufferOffset + nAdded + end do + + commListPtr => commListPtr % next + end do - ! Wait for MPI_Isend's to finish. - commListPtr => sendList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - commListPtr => commListPtr % next - end do + ! Wait for MPI_Isend's to finish. + commListPtr => sendList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + commListPtr => commListPtr % next + end do - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) #endif - deallocate(haloLayers) + deallocate(haloLayers) + end if end subroutine mpas_dmpar_alltoall_field3d_integer!}}} @@ -2744,262 +3139,265 @@ subroutine mpas_dmpar_alltoall_field1d_real(fieldIn, fieldout, haloLayersIn)!{{{ integer :: nAdded, bufferOffset integer :: mpi_ierr integer :: iHalo, iBuffer, i - integer :: nHaloLayers + integer :: nHaloLayers, threadNum integer, dimension(:), pointer :: haloLayers dminfo => fieldIn % block % domain % dminfo - - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(fieldIn % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo - end do - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) + end do + else + nHaloLayers = size(fieldIn % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if #ifdef _MPI - nullify(sendList) - nullify(recvList) - - ! Setup receive lists. - do iHalo = 1, nHaloLayers - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => recvList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next - end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(recvList)) then - allocate(recvList) - nullify(recvList % next) - commListPtr => recvList - else - commListPtr => recvList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - - commListPtr % procID = exchListPtr % endPointID - end if - - exchListPtr => exchListPtr % next - end do - - fieldOutPtr => fieldOutPtr % next - end do - end do + nullify(sendList) + nullify(recvList) - ! Determine size of receive list buffers - commListPtr => recvList - do while(associated(commListPtr)) - bufferOffset = 0 + ! Setup receive lists. do iHalo = 1, nHaloLayers - nAdded = 0 fieldOutPtr => fieldOut do while(associated(fieldOutPtr)) exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - nAdded = max(nAdded, maxval(exchListPtr % srcList)) + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => recvList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(recvList)) then + allocate(recvList) + nullify(recvList % next) + commListPtr => recvList + else + commListPtr => recvList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next + end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + + commListPtr % procID = exchListPtr % endPointID end if + exchListPtr => exchListPtr % next end do - + fieldOutPtr => fieldOutPtr % next end do - bufferOffset = bufferOffset + nAdded end do - commListPtr % nList = nAdded - - commListPtr => commListPtr % next - end do - - ! Allocate buffers for receives, and initiate mpi_irecv calls. - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do - - ! Setup send lists, and determine the size of their buffers. - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => sendList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - commListPtr % nList = commListPtr % nList + exchListPtr % nList - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next + + ! Determine size of receive list buffers + commListPtr => recvList + do while(associated(commListPtr)) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + nAdded = max(nAdded, maxval(exchListPtr % srcList)) + end if + exchListPtr => exchListPtr % next + end do + + fieldOutPtr => fieldOutPtr % next end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(sendList)) then - allocate(sendList) - nullify(sendList % next) - commListPtr => sendList - else - commListPtr => sendList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - commListPtr % procID = exchListPtr % endPointID - commListPtr % nList = exchListPtr % nList - end if - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - - fieldInPtr => fieldInPtr % next + commListPtr % nList = nAdded + + commListPtr => commListPtr % next + end do + + ! Allocate buffers for receives, and initiate mpi_irecv calls. + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + commListPtr => commListPtr % next end do - end do - - ! Allocate sendLists, copy data into buffer, and initiate mpi_isends - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - bufferOffset = 0 + + ! Setup send lists, and determine the size of their buffers. do iHalo = 1, nHaloLayers - nAdded = 0 fieldInPtr => fieldIn do while(associated(fieldInPtr)) exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - iBuffer = exchListPtr % destList(i) + bufferOffset - commListPtr % rbuffer(iBuffer) = fieldInPtr % array(exchListPtr % srcList(i)) - nAdded = nAdded + 1 - end do + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => sendList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + commListPtr % nList = commListPtr % nList + exchListPtr % nList + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(sendList)) then + allocate(sendList) + nullify(sendList % next) + commListPtr => sendList + else + commListPtr => sendList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next + end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + commListPtr % procID = exchListPtr % endPointID + commListPtr % nList = exchListPtr % nList end if - + exchListPtr => exchListPtr % next end do - + fieldInPtr => fieldInPtr % next end do - bufferOffset = bufferOffset + nAdded end do - call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, & - commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) - - commListPtr => commListPtr % next - end do + ! Allocate sendLists, copy data into buffer, and initiate mpi_isends + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + iBuffer = exchListPtr % destList(i) + bufferOffset + commListPtr % rbuffer(iBuffer) = fieldInPtr % array(exchListPtr % srcList(i)) + nAdded = nAdded + 1 + end do + end if -#endif + exchListPtr => exchListPtr % next + end do - ! Handle Local Copies. Only local copies if no MPI - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldOutPtr % array(exchListPtr % destList(i)) = fieldInPtr % array(exchListPtr % srcList(i)) - end do - end if - fieldOutPtr => fieldOutPtr % next + fieldInPtr => fieldInPtr % next end do - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - fieldInPtr => fieldInPtr % next + + call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, & + commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + + commListPtr => commListPtr % next end do - end do -#ifdef _MPI - ! Wait for MPI_Irecv's to finish, and unpack data. - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) +#endif - bufferOffset = 0 + ! Handle Local Copies. Only local copies if no MPI do iHalo = 1, nHaloLayers - nAdded = 0 - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - iBuffer = exchListPtr % srcList(i) + bufferOffset - fieldOutPtr % array(exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer) - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList)) - end if + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldOutPtr % array(exchListPtr % destList(i)) = fieldInPtr % array(exchListPtr % srcList(i)) + end do + end if + fieldOutPtr => fieldOutPtr % next + end do + exchListPtr => exchListPtr % next end do - - fieldOutPtr => fieldOutPtr % next + fieldInPtr => fieldInPtr % next end do - bufferOffset = bufferOffset + nAdded end do - commListPtr => commListPtr % next - end do +#ifdef _MPI + ! Wait for MPI_Irecv's to finish, and unpack data. + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + iBuffer = exchListPtr % srcList(i) + bufferOffset + fieldOutPtr % array(exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer) + end do + nAdded = max(nAdded, maxval(exchListPtr % srcList)) + end if + exchListPtr => exchListPtr % next + end do + + fieldOutPtr => fieldOutPtr % next + end do + bufferOffset = bufferOffset + nAdded + end do + + commListPtr => commListPtr % next + end do - ! Wait for MPI_Isend's to finish. - commListPtr => sendList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - commListPtr => commListPtr % next - end do + ! Wait for MPI_Isend's to finish. + commListPtr => sendList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + commListPtr => commListPtr % next + end do - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) #endif - deallocate(haloLayers) + deallocate(haloLayers) + end if end subroutine mpas_dmpar_alltoall_field1d_real!}}} @@ -3032,266 +3430,269 @@ subroutine mpas_dmpar_alltoall_field2d_real(fieldIn, fieldout, haloLayersIn)!{{{ integer :: nAdded, bufferOffset integer :: mpi_ierr integer :: iHalo, iBuffer, i, j - integer :: nHaloLayers + integer :: nHaloLayers, threadNum integer, dimension(:), pointer :: haloLayers dminfo => fieldIn % block % domain % dminfo - - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(fieldIn % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo - end do - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) + end do + else + nHaloLayers = size(fieldIn % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if #ifdef _MPI - nullify(sendList) - nullify(recvList) - - ! Setup receive lists, and determine the size of their buffers. - do iHalo = 1, nHaloLayers - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => recvList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next - end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(recvList)) then - allocate(recvList) - nullify(recvList % next) - commListPtr => recvList - else - commListPtr => recvList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - - commListPtr % procID = exchListPtr % endPointID - end if - - exchListPtr => exchListPtr % next - end do - - fieldOutPtr => fieldOutPtr % next - end do - end do + nullify(sendList) + nullify(recvList) - ! Determine size of receive list buffers. - commListPtr => recvList - do while(associated(commListPtr)) - bufferOffset = 0 + ! Setup receive lists, and determine the size of their buffers. do iHalo = 1, nHaloLayers - nAdded = 0 fieldOutPtr => fieldOut do while(associated(fieldOutPtr)) exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1)) + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => recvList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(recvList)) then + allocate(recvList) + nullify(recvList % next) + commListPtr => recvList + else + commListPtr => recvList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next + end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + + commListPtr % procID = exchListPtr % endPointID end if + exchListPtr => exchListPtr % next end do - + fieldOutPtr => fieldOutPtr % next end do - bufferOffset = bufferOffset + nAdded end do - commListPtr % nList = nAdded - - commListPtr => commListPtr % next - end do - - ! Allocate buffers for receives, and initiate mpi_irecv calls. - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do - - ! Setup send lists, and determine the size of their buffers. - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => sendList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next + + ! Determine size of receive list buffers. + commListPtr => recvList + do while(associated(commListPtr)) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1)) + end if + exchListPtr => exchListPtr % next + end do + + fieldOutPtr => fieldOutPtr % next end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(sendList)) then - allocate(sendList) - nullify(sendList % next) - commListPtr => sendList - else - commListPtr => sendList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - commListPtr % procID = exchListPtr % endPointID - commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) - end if - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - - fieldInPtr => fieldInPtr % next + commListPtr % nList = nAdded + + commListPtr => commListPtr % next end do - end do - - ! Allocate sendLists, copy data into buffer, and initiate mpi_isends - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - bufferOffset = 0 + + ! Allocate buffers for receives, and initiate mpi_irecv calls. + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + commListPtr => commListPtr % next + end do + + ! Setup send lists, and determine the size of their buffers. do iHalo = 1, nHaloLayers - nAdded = 0 fieldInPtr => fieldIn do while(associated(fieldInPtr)) exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldInPtr % dimSizes(1) - iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) + j + bufferOffset - commListPtr % rbuffer(iBuffer) = fieldInPtr % array(j, exchListPtr % srcList(i)) - nAdded = nAdded + 1 + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => sendList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(sendList)) then + allocate(sendList) + nullify(sendList % next) + commListPtr => sendList + else + commListPtr => sendList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next end do - end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + commListPtr % procID = exchListPtr % endPointID + commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) end if - + exchListPtr => exchListPtr % next end do - + fieldInPtr => fieldInPtr % next end do - bufferOffset = bufferOffset + nAdded end do - call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, & - commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) - - commListPtr => commListPtr % next - end do + ! Allocate sendLists, copy data into buffer, and initiate mpi_isends + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldInPtr % dimSizes(1) + iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) + j + bufferOffset + commListPtr % rbuffer(iBuffer) = fieldInPtr % array(j, exchListPtr % srcList(i)) + nAdded = nAdded + 1 + end do + end do + end if -#endif + exchListPtr => exchListPtr % next + end do - ! Handle Local Copies. Only local copies if no MPI - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldOutPtr % array(:, exchListPtr % destList(i)) = fieldInPtr % array(:, exchListPtr % srcList(i)) - end do - end if - fieldOutPtr => fieldOutPtr % next + fieldInPtr => fieldInPtr % next end do - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - fieldInPtr => fieldInPtr % next + + call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, & + commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + + commListPtr => commListPtr % next end do - end do -#ifdef _MPI - ! Wait for MPI_Irecv's to finish, and unpack data. - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) +#endif - bufferOffset = 0 + ! Handle Local Copies. Only local copies if no MPI do iHalo = 1, nHaloLayers - nAdded = 0 - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldOutPtr % dimSizes(1) - iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(1) + j + bufferOffset - fieldOutPtr % array(j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer) + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldOutPtr % array(:, exchListPtr % destList(i)) = fieldInPtr % array(:, exchListPtr % srcList(i)) end do - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1)) - end if + end if + fieldOutPtr => fieldOutPtr % next + end do + exchListPtr => exchListPtr % next end do - - fieldOutPtr => fieldOutPtr % next + fieldInPtr => fieldInPtr % next end do - bufferOffset = bufferOffset + nAdded end do - commListPtr => commListPtr % next - end do +#ifdef _MPI + ! Wait for MPI_Irecv's to finish, and unpack data. + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldOutPtr % dimSizes(1) + iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(1) + j + bufferOffset + fieldOutPtr % array(j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer) + end do + end do + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1)) + end if + exchListPtr => exchListPtr % next + end do + + fieldOutPtr => fieldOutPtr % next + end do + bufferOffset = bufferOffset + nAdded + end do + + commListPtr => commListPtr % next + end do - ! Wait for MPI_Isend's to finish. - commListPtr => sendList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - commListPtr => commListPtr % next - end do + ! Wait for MPI_Isend's to finish. + commListPtr => sendList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + commListPtr => commListPtr % next + end do - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) #endif - deallocate(haloLayers) + deallocate(haloLayers) + end if end subroutine mpas_dmpar_alltoall_field2d_real!}}} @@ -3324,276 +3725,279 @@ subroutine mpas_dmpar_alltoall_field3d_real(fieldIn, fieldout, haloLayersIn)!{{{ integer :: nAdded, bufferOffset integer :: mpi_ierr integer :: iHalo, iBuffer, i, j, k - integer :: nHaloLayers + integer :: nHaloLayers, threadNum integer, dimension(:), pointer :: haloLayers dminfo => fieldIn % block % domain % dminfo - - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(fieldIn % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo - end do - end if - -#ifdef _MPI - nullify(sendList) - nullify(recvList) - - ! Setup receive lists. - do iHalo = 1, nHaloLayers - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => recvList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next - end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(recvList)) then - allocate(recvList) - nullify(recvList % next) - commListPtr => recvList - else - commListPtr => recvList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - - commListPtr % procID = exchListPtr % endPointID - commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) - end if - - exchListPtr => exchListPtr % next + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) end do - - fieldOutPtr => fieldOutPtr % next - end do - end do + else + nHaloLayers = size(fieldIn % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if - ! Determine size of receive list buffers. - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) +#ifdef _MPI + nullify(sendList) + nullify(recvList) - bufferOffset = 0 + ! Setup receive lists. do iHalo = 1, nHaloLayers - nAdded = 0 fieldOutPtr => fieldOut do while(associated(fieldOutPtr)) exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)) + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => recvList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(recvList)) then + allocate(recvList) + nullify(recvList % next) + commListPtr => recvList + else + commListPtr => recvList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next + end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + + commListPtr % procID = exchListPtr % endPointID + commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) end if + exchListPtr => exchListPtr % next end do - + fieldOutPtr => fieldOutPtr % next end do - bufferOffset = bufferOffset + nAdded end do - commListPtr % nList = nAdded - - commListPtr => commListPtr % next - end do - - ! Allocate buffers for receives, and initiate mpi_irecv calls. - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do - - ! Setup send lists, and determine the size of their buffers. - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => sendList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next + + ! Determine size of receive list buffers. + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)) + end if + exchListPtr => exchListPtr % next + end do + + fieldOutPtr => fieldOutPtr % next end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(sendList)) then - allocate(sendList) - nullify(sendList % next) - commListPtr => sendList - else - commListPtr => sendList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - commListPtr % procID = exchListPtr % endPointID - commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) - end if - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - - fieldInPtr => fieldInPtr % next + commListPtr % nList = nAdded + + commListPtr => commListPtr % next end do - end do - - ! Allocate sendLists, copy data into buffer, and initiate mpi_isends - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - bufferOffset = 0 + + ! Allocate buffers for receives, and initiate mpi_irecv calls. + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + commListPtr => commListPtr % next + end do + + ! Setup send lists, and determine the size of their buffers. do iHalo = 1, nHaloLayers - nAdded = 0 fieldInPtr => fieldIn do while(associated(fieldInPtr)) exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldInPtr % dimSizes(2) - do k = 1, fieldInPtr % dimSizes(1) - iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) + (j-1) * fieldInPtr % dimSizes(1) + k + bufferOffset - commListPtr % rbuffer(iBuffer) = fieldInPtr % array(k, j, exchListPtr % srcList(i)) - nAdded = nAdded + 1 - end do + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => sendList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(sendList)) then + allocate(sendList) + nullify(sendList % next) + commListPtr => sendList + else + commListPtr => sendList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next end do - end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + commListPtr % procID = exchListPtr % endPointID + commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) end if - + exchListPtr => exchListPtr % next end do - + fieldInPtr => fieldInPtr % next end do - bufferOffset = bufferOffset + nAdded end do - call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, & - commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) - - commListPtr => commListPtr % next - end do + ! Allocate sendLists, copy data into buffer, and initiate mpi_isends + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldInPtr % dimSizes(2) + do k = 1, fieldInPtr % dimSizes(1) + iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) + (j-1) * fieldInPtr % dimSizes(1) + k + bufferOffset + commListPtr % rbuffer(iBuffer) = fieldInPtr % array(k, j, exchListPtr % srcList(i)) + nAdded = nAdded + 1 + end do + end do + end do + end if -#endif + exchListPtr => exchListPtr % next + end do - ! Handle Local Copies. Only local copies if no MPI - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldOutPtr % array(:, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, exchListPtr % srcList(i)) - end do - end if - fieldOutPtr => fieldOutPtr % next + fieldInPtr => fieldInPtr % next end do - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - fieldInPtr => fieldInPtr % next + + call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, & + commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + + commListPtr => commListPtr % next end do - end do -#ifdef _MPI - ! Wait for MPI_Irecv's to finish, and unpack data. - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) +#endif - bufferOffset = 0 + ! Handle Local Copies. Only local copies if no MPI do iHalo = 1, nHaloLayers - nAdded = 0 - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldOutPtr % dimSizes(2) - do k = 1, fieldOutPtr % dimSizes(1) - iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1) + (j-1) * fieldOutPtr % dimSizes(1) + k + bufferOffset - fieldOutPtr % array(k, j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer) - end do + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldOutPtr % array(:, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, exchListPtr % srcList(i)) end do - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)) - end if + end if + fieldOutPtr => fieldOutPtr % next + end do + exchListPtr => exchListPtr % next end do - - fieldOutPtr => fieldOutPtr % next + fieldInPtr => fieldInPtr % next end do - bufferOffset = bufferOffset + nAdded end do - commListPtr => commListPtr % next - end do +#ifdef _MPI + ! Wait for MPI_Irecv's to finish, and unpack data. + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldOutPtr % dimSizes(2) + do k = 1, fieldOutPtr % dimSizes(1) + iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1) + (j-1) * fieldOutPtr % dimSizes(1) + k + bufferOffset + fieldOutPtr % array(k, j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer) + end do + end do + end do + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)) + end if + exchListPtr => exchListPtr % next + end do - ! Wait for MPI_Isend's to finish. - commListPtr => sendList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - commListPtr => commListPtr % next - end do + fieldOutPtr => fieldOutPtr % next + end do + bufferOffset = bufferOffset + nAdded + end do - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) -#endif + commListPtr => commListPtr % next + end do - deallocate(haloLayers) + ! Wait for MPI_Isend's to finish. + commListPtr => sendList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + commListPtr => commListPtr % next + end do - end subroutine mpas_dmpar_alltoall_field3d_real!}}} + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) +#endif + + deallocate(haloLayers) + end if + + end subroutine mpas_dmpar_alltoall_field3d_real!}}} !----------------------------------------------------------------------- ! routine mpas_dmpar_alltoall_field4d_real @@ -3624,282 +4028,285 @@ subroutine mpas_dmpar_alltoall_field4d_real(fieldIn, fieldout, haloLayersIn)!{{{ integer :: nAdded, bufferOffset integer :: mpi_ierr integer :: iHalo, iBuffer, i, j, k, l - integer :: nHaloLayers + integer :: nHaloLayers, threadNum integer, dimension(:), pointer :: haloLayers dminfo => fieldIn % block % domain % dminfo - - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(fieldIn % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo - end do - end if - -#ifdef _MPI - nullify(sendList) - nullify(recvList) - - ! Setup receive lists. - do iHalo = 1, nHaloLayers - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => recvList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next - end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(recvList)) then - allocate(recvList) - nullify(recvList % next) - commListPtr => recvList - else - commListPtr => recvList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - - commListPtr % procID = exchListPtr % endPointID - commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) - end if - - exchListPtr => exchListPtr % next + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) end do - - fieldOutPtr => fieldOutPtr % next - end do - end do + else + nHaloLayers = size(fieldIn % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if - ! Determine size of receive list buffers. - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) +#ifdef _MPI + nullify(sendList) + nullify(recvList) - bufferOffset = 0 + ! Setup receive lists. do iHalo = 1, nHaloLayers - nAdded = 0 fieldOutPtr => fieldOut do while(associated(fieldOutPtr)) exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3)) + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => recvList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(recvList)) then + allocate(recvList) + nullify(recvList % next) + commListPtr => recvList + else + commListPtr => recvList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next + end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + + commListPtr % procID = exchListPtr % endPointID + commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) end if + exchListPtr => exchListPtr % next end do - + fieldOutPtr => fieldOutPtr % next end do - bufferOffset = bufferOffset + nAdded end do - commListPtr % nList = nAdded - - commListPtr => commListPtr % next - end do - - ! Allocate buffers for receives, and initiate mpi_irecv calls. - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do - - ! Setup send lists, and determine the size of their buffers. - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => sendList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next + + ! Determine size of receive list buffers. + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3)) + end if + exchListPtr => exchListPtr % next + end do + + fieldOutPtr => fieldOutPtr % next end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(sendList)) then - allocate(sendList) - nullify(sendList % next) - commListPtr => sendList - else - commListPtr => sendList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - commListPtr % procID = exchListPtr % endPointID - commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) - end if - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - - fieldInPtr => fieldInPtr % next + commListPtr % nList = nAdded + + commListPtr => commListPtr % next end do - end do - - ! Allocate sendLists, copy data into buffer, and initiate mpi_isends - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - bufferOffset = 0 + + ! Allocate buffers for receives, and initiate mpi_irecv calls. + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + commListPtr => commListPtr % next + end do + + ! Setup send lists, and determine the size of their buffers. do iHalo = 1, nHaloLayers - nAdded = 0 fieldInPtr => fieldIn do while(associated(fieldInPtr)) exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldInPtr % dimSizes(3) - do k = 1, fieldInPtr % dimSizes(2) - do l = 1, fieldInPtr % dimSizes(1) - iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldInPtr % dimSizes(3) & - + (j-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) & - + (k-1) * fieldInPtr % dimSizes(1) + l + bufferOffset - commListPtr % rbuffer(iBuffer) = fieldInPtr % array(l, k, j, exchListPtr % srcList(i)) - nAdded = nAdded + 1 - end do - end do + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => sendList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(sendList)) then + allocate(sendList) + nullify(sendList % next) + commListPtr => sendList + else + commListPtr => sendList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next end do - end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + commListPtr % procID = exchListPtr % endPointID + commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) end if - + exchListPtr => exchListPtr % next end do - + fieldInPtr => fieldInPtr % next end do - bufferOffset = bufferOffset + nAdded end do - call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, & - commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) - - commListPtr => commListPtr % next - end do + ! Allocate sendLists, copy data into buffer, and initiate mpi_isends + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldInPtr % dimSizes(3) + do k = 1, fieldInPtr % dimSizes(2) + do l = 1, fieldInPtr % dimSizes(1) + iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldInPtr % dimSizes(3) & + + (j-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) & + + (k-1) * fieldInPtr % dimSizes(1) + l + bufferOffset + commListPtr % rbuffer(iBuffer) = fieldInPtr % array(l, k, j, exchListPtr % srcList(i)) + nAdded = nAdded + 1 + end do + end do + end do + end do + end if -#endif + exchListPtr => exchListPtr % next + end do - ! Handle Local Copies. Only local copies if no MPI - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldOutPtr % array(:, :, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, :, exchListPtr % srcList(i)) - end do - end if - fieldOutPtr => fieldOutPtr % next + fieldInPtr => fieldInPtr % next end do - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - fieldInPtr => fieldInPtr % next + + call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, & + commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + + commListPtr => commListPtr % next end do - end do -#ifdef _MPI - ! Wait for MPI_Irecv's to finish, and unpack data. - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) +#endif - bufferOffset = 0 + ! Handle Local Copies. Only local copies if no MPI do iHalo = 1, nHaloLayers - nAdded = 0 - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldOutPtr % dimSizes(3) - do k = 1, fieldOutPtr % dimSizes(2) - do l = 1, fieldOutPtr % dimSizes(1) - iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1) & - + (j-1) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) & - + (k-1) * fieldOutPtr % dimSizes(1) + l + bufferOffset - fieldOutPtr % array(l, k, j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer) - end do - end do + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldOutPtr % array(:, :, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, :, exchListPtr % srcList(i)) end do - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3)) - end if + end if + fieldOutPtr => fieldOutPtr % next + end do + exchListPtr => exchListPtr % next end do - - fieldOutPtr => fieldOutPtr % next + fieldInPtr => fieldInPtr % next end do - bufferOffset = bufferOffset + nAdded end do - commListPtr => commListPtr % next - end do +#ifdef _MPI + ! Wait for MPI_Irecv's to finish, and unpack data. + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldOutPtr % dimSizes(3) + do k = 1, fieldOutPtr % dimSizes(2) + do l = 1, fieldOutPtr % dimSizes(1) + iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1) & + + (j-1) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) & + + (k-1) * fieldOutPtr % dimSizes(1) + l + bufferOffset + fieldOutPtr % array(l, k, j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer) + end do + end do + end do + end do + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3)) + end if + exchListPtr => exchListPtr % next + end do + + fieldOutPtr => fieldOutPtr % next + end do + bufferOffset = bufferOffset + nAdded + end do + + commListPtr => commListPtr % next + end do - ! Wait for MPI_Isend's to finish. - commListPtr => sendList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - commListPtr => commListPtr % next - end do + ! Wait for MPI_Isend's to finish. + commListPtr => sendList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + commListPtr => commListPtr % next + end do - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) #endif - deallocate(haloLayers) + deallocate(haloLayers) + end if end subroutine mpas_dmpar_alltoall_field4d_real!}}} @@ -3932,288 +4339,291 @@ subroutine mpas_dmpar_alltoall_field5d_real(fieldIn, fieldout, haloLayersIn)!{{{ integer :: nAdded, bufferOffset integer :: mpi_ierr integer :: iHalo, iBuffer, i, j, k, l, m - integer :: nHaloLayers + integer :: nHaloLayers, threadNum integer, dimension(:), pointer :: haloLayers dminfo => fieldIn % block % domain % dminfo - - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(fieldIn % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo - end do - end if - -#ifdef _MPI - nullify(sendList) - nullify(recvList) - - ! Setup receive lists. - do iHalo = 1, nHaloLayers - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => recvList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4) - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next - end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(recvList)) then - allocate(recvList) - nullify(recvList % next) - commListPtr => recvList - else - commListPtr => recvList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - - commListPtr % procID = exchListPtr % endPointID - commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4) - end if - - exchListPtr => exchListPtr % next + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) end do - - fieldOutPtr => fieldOutPtr % next - end do - end do + else + nHaloLayers = size(fieldIn % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if - ! Determine size of receive list buffers. - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) +#ifdef _MPI + nullify(sendList) + nullify(recvList) - bufferOffset = 0 + ! Setup receive lists. do iHalo = 1, nHaloLayers - nAdded = 0 fieldOutPtr => fieldOut do while(associated(fieldOutPtr)) exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4)) + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => recvList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4) + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(recvList)) then + allocate(recvList) + nullify(recvList % next) + commListPtr => recvList + else + commListPtr => recvList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next + end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + + commListPtr % procID = exchListPtr % endPointID + commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4) end if + exchListPtr => exchListPtr % next end do - + fieldOutPtr => fieldOutPtr % next end do - bufferOffset = bufferOffset + nAdded end do - commListPtr % nList = nAdded - - commListPtr => commListPtr % next - end do - - ! Allocate buffers for receives, and initiate mpi_irecv calls. - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do - - ! Setup send lists, and determine the size of their buffers. - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - comm_list_found = .false. - - ! Search for an already created commList to this processor. - commListPtr => sendList - do while(associated(commListPtr)) - if(commListPtr % procID == exchListPtr % endPointID) then - commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldInPtr % dimSizes(4) - comm_list_found = .true. - exit - end if - - commListPtr => commListPtr % next + + ! Determine size of receive list buffers. + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4)) + end if + exchListPtr => exchListPtr % next + end do + + fieldOutPtr => fieldOutPtr % next end do - - ! If no comm list exists, create a new one. - if(.not. comm_list_found) then - if(.not.associated(sendList)) then - allocate(sendList) - nullify(sendList % next) - commListPtr => sendList - else - commListPtr => sendList - commListPtr2 => commListPtr % next - do while(associated(commListPtr2)) - commListPtr => commListPtr % next - commListPtr2 => commListPtr % next - end do - - allocate(commListPtr % next) - commListPtr => commListPtr % next - nullify(commListPtr % next) - end if - commListPtr % procID = exchListPtr % endPointID - commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldInPtr % dimSizes(4) - end if - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - - fieldInPtr => fieldInPtr % next + commListPtr % nList = nAdded + + commListPtr => commListPtr % next end do - end do - - ! Allocate sendLists, copy data into buffer, and initiate mpi_isends - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - bufferOffset = 0 + + ! Allocate buffers for receives, and initiate mpi_irecv calls. + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + commListPtr => commListPtr % next + end do + + ! Setup send lists, and determine the size of their buffers. do iHalo = 1, nHaloLayers - nAdded = 0 fieldInPtr => fieldIn do while(associated(fieldInPtr)) exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldInPtr % dimSizes(4) - do k = 1, fieldInPtr % dimSizes(3) - do l = 1, fieldInPtr % dimSizes(2) - do m = 1, fieldInPtr % dimSizes(1) - iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldInPtr % dimSizes(3) * fieldInPtr % dimSizes(4) & - + (j-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldInPtr % dimSizes(3) & - + (k-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) & - + (l-1) * fieldInPtr % dimSizes(1) + m + bufferOffset - commListPtr % rbuffer(iBuffer) = fieldInPtr % array(m, l, k, j, exchListPtr % srcList(i)) - nAdded = nAdded + 1 - end do - end do - end do + comm_list_found = .false. + + ! Search for an already created commList to this processor. + commListPtr => sendList + do while(associated(commListPtr)) + if(commListPtr % procID == exchListPtr % endPointID) then + commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldInPtr % dimSizes(4) + comm_list_found = .true. + exit + end if + + commListPtr => commListPtr % next + end do + + ! If no comm list exists, create a new one. + if(.not. comm_list_found) then + if(.not.associated(sendList)) then + allocate(sendList) + nullify(sendList % next) + commListPtr => sendList + else + commListPtr => sendList + commListPtr2 => commListPtr % next + do while(associated(commListPtr2)) + commListPtr => commListPtr % next + commListPtr2 => commListPtr % next end do - end do + + allocate(commListPtr % next) + commListPtr => commListPtr % next + nullify(commListPtr % next) + end if + commListPtr % procID = exchListPtr % endPointID + commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldInPtr % dimSizes(4) end if - + exchListPtr => exchListPtr % next end do - + fieldInPtr => fieldInPtr % next end do - bufferOffset = bufferOffset + nAdded end do - call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, & - commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) - - commListPtr => commListPtr % next - end do + ! Allocate sendLists, copy data into buffer, and initiate mpi_isends + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldInPtr % dimSizes(4) + do k = 1, fieldInPtr % dimSizes(3) + do l = 1, fieldInPtr % dimSizes(2) + do m = 1, fieldInPtr % dimSizes(1) + iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldInPtr % dimSizes(3) * fieldInPtr % dimSizes(4) & + + (j-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldInPtr % dimSizes(3) & + + (k-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) & + + (l-1) * fieldInPtr % dimSizes(1) + m + bufferOffset + commListPtr % rbuffer(iBuffer) = fieldInPtr % array(m, l, k, j, exchListPtr % srcList(i)) + nAdded = nAdded + 1 + end do + end do + end do + end do + end do + end if -#endif + exchListPtr => exchListPtr % next + end do - ! Handle Local Copies. Only local copies if no MPI - do iHalo = 1, nHaloLayers - fieldInPtr => fieldIn - do while(associated(fieldInPtr)) - exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldOutPtr % array(:, :, :, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, :, :, exchListPtr % srcList(i)) - end do - end if - fieldOutPtr => fieldOutPtr % next + fieldInPtr => fieldInPtr % next end do - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do - fieldInPtr => fieldInPtr % next + + call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, & + commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + + commListPtr => commListPtr % next end do - end do -#ifdef _MPI - ! Wait for MPI_Irecv's to finish, and unpack data. - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) +#endif - bufferOffset = 0 + ! Handle Local Copies. Only local copies if no MPI do iHalo = 1, nHaloLayers - nAdded = 0 - fieldOutPtr => fieldOut - do while(associated(fieldOutPtr)) - exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + fieldInPtr => fieldIn + do while(associated(fieldInPtr)) + exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldOutPtr % dimSizes(4) - do k = 1, fieldOutPtr % dimSizes(3) - do l = 1, fieldOutPtr % dimSizes(2) - do m = 1, fieldOutPtr % dimSizes(1) - iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(4) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1) & - + (j-1) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) & - + (k-1) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) & - + (l-1) * fieldOutPtr % dimSizes(1) + m + bufferOffset - fieldOutPtr % array(m, l, k, j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer) + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldOutPtr % array(:, :, :, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, :, :, exchListPtr % srcList(i)) + end do + end if + fieldOutPtr => fieldOutPtr % next + end do + + exchListPtr => exchListPtr % next + end do + fieldInPtr => fieldInPtr % next + end do + end do + +#ifdef _MPI + ! Wait for MPI_Irecv's to finish, and unpack data. + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldOutPtr => fieldOut + do while(associated(fieldOutPtr)) + exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldOutPtr % dimSizes(4) + do k = 1, fieldOutPtr % dimSizes(3) + do l = 1, fieldOutPtr % dimSizes(2) + do m = 1, fieldOutPtr % dimSizes(1) + iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(4) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1) & + + (j-1) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) & + + (k-1) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) & + + (l-1) * fieldOutPtr % dimSizes(1) + m + bufferOffset + fieldOutPtr % array(m, l, k, j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer) + end do end do end do end do end do - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4)) - end if - exchListPtr => exchListPtr % next + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4)) + end if + exchListPtr => exchListPtr % next + end do + + fieldOutPtr => fieldOutPtr % next end do - - fieldOutPtr => fieldOutPtr % next + bufferOffset = bufferOffset + nAdded end do - bufferOffset = bufferOffset + nAdded - end do - commListPtr => commListPtr % next - end do + commListPtr => commListPtr % next + end do - ! Wait for MPI_Isend's to finish. - commListPtr => sendList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - commListPtr => commListPtr % next - end do + ! Wait for MPI_Isend's to finish. + commListPtr => sendList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + commListPtr => commListPtr % next + end do - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) #endif - deallocate(haloLayers) + deallocate(haloLayers) + end if end subroutine mpas_dmpar_alltoall_field5d_real!}}} @@ -4239,11 +4649,18 @@ subroutine mpas_dmpar_exch_halo_field1d_integer(field, haloLayersIn)!{{{ type (field1DInteger), pointer :: fieldCursor, fieldCursor2 type (mpas_exchange_list), pointer :: exchListPtr type (mpas_communication_list), pointer :: sendList, recvList, commListPtr - integer :: mpi_ierr + integer :: mpi_ierr, threadNum integer :: nHaloLayers, iHalo, i integer :: bufferOffset, nAdded integer, dimension(:), pointer :: haloLayers + if ( .not. field % isActive ) then +#ifdef MPAS_DEBUG + write(stderrUnit, *) ' -- Skipping halo exchange for deactivated field: ' // trim(field % fieldName) +#endif + return + end if + do i = 1, 1 if(field % dimSizes(i) <= 0) then return @@ -4251,137 +4668,140 @@ subroutine mpas_dmpar_exch_halo_field1d_integer(field, haloLayersIn)!{{{ end do dminfo => field % block % domain % dminfo - - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(field % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo - end do - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) + end do + else + nHaloLayers = size(field % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if #ifdef _MPI - ! Setup Communication Lists - call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) + ! Setup Communication Lists + call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) - ! Allocate space in recv lists, and initiate mpi_irecv calls - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % ibuffer(commListPtr % nList)) - nullify(commListPtr % rbuffer) - call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + ! Allocate space in recv lists, and initiate mpi_irecv calls + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % ibuffer(commListPtr % nList)) + nullify(commListPtr % rbuffer) + call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do + commListPtr => commListPtr % next + end do - ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % ibuffer(commListPtr % nList)) - nullify(commListPtr % rbuffer) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - commListPtr % ibuffer(exchListPtr % destList(i) + bufferOffset) = fieldCursor % array(exchListPtr % srcList(i)) - nAdded = nAdded + 1 + ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % ibuffer(commListPtr % nList)) + nullify(commListPtr % rbuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + commListPtr % ibuffer(exchListPtr % destList(i) + bufferOffset) = fieldCursor % array(exchListPtr % srcList(i)) + nAdded = nAdded + 1 - end do - end if + end do + end if - exchListPtr => exchListPtr % next - end do + exchListPtr => exchListPtr % next + end do - fieldCursor => fieldCursor % next + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded end do - bufferOffset = bufferOffset + nAdded - end do - call MPI_Isend(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + call MPI_Isend(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do + commListPtr => commListPtr % next + end do #endif - ! Handle local copy. If MPI is off, then only local copies are performed. - fieldCursor => field - do while(associated(fieldCursor)) - do iHalo = 1, nHaloLayers - exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList + ! Handle local copy. If MPI is off, then only local copies are performed. + fieldCursor => field + do while(associated(fieldCursor)) + do iHalo = 1, nHaloLayers + exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - fieldCursor2 => field - do while(associated(fieldCursor2)) - if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldCursor2 % array(exchListPtr % destList(i)) = fieldCursor % array(exchListPtr % srcList(i)) - end do - end if - - fieldCursor2 => fieldCursor2 % next - end do + do while(associated(exchListPtr)) + fieldCursor2 => field + do while(associated(fieldCursor2)) + if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldCursor2 % array(exchListPtr % destList(i)) = fieldCursor % array(exchListPtr % srcList(i)) + end do + end if - exchListPtr => exchListPtr % next + fieldCursor2 => fieldCursor2 % next + end do + + exchListPtr => exchListPtr % next + end do end do - end do - fieldCursor => fieldCursor % next - end do + fieldCursor => fieldCursor % next + end do #ifdef _MPI - ! Wait for mpi_irecv to finish, and unpack data from buffer - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - fieldCursor % array(exchListPtr % destList(i)) = commListPtr % ibuffer(exchListPtr % srcList(i) + bufferOffset) - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList)) - end if - exchListPtr => exchListPtr % next + ! Wait for mpi_irecv to finish, and unpack data from buffer + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + fieldCursor % array(exchListPtr % destList(i)) = commListPtr % ibuffer(exchListPtr % srcList(i) + bufferOffset) + end do + nAdded = max(nAdded, maxval(exchListPtr % srcList)) + end if + exchListPtr => exchListPtr % next + end do + + fieldCursor => fieldCursor % next end do - - fieldCursor => fieldCursor % next + bufferOffset = bufferOffset + nAdded end do - bufferOffset = bufferOffset + nAdded + commListPtr => commListPtr % next end do - commListPtr => commListPtr % next - end do - ! wait for mpi_isend to finish. - commListPtr => sendList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - commListPtr => commListPtr % next - end do + ! wait for mpi_isend to finish. + commListPtr => sendList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + commListPtr => commListPtr % next + end do - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) #endif - deallocate(haloLayers) + deallocate(haloLayers) + end if end subroutine mpas_dmpar_exch_halo_field1d_integer!}}} @@ -4407,11 +4827,18 @@ subroutine mpas_dmpar_exch_halo_field2d_integer(field, haloLayersIn)!{{{ type (field2DInteger), pointer :: fieldCursor, fieldCursor2 type (mpas_exchange_list), pointer :: exchListPtr type (mpas_communication_list), pointer :: sendList, recvList, commListPtr - integer :: mpi_ierr + integer :: mpi_ierr, threadNum integer :: nHaloLayers, iHalo, i, j integer :: bufferOffset, nAdded integer, dimension(:), pointer :: haloLayers + if ( .not. field % isActive ) then +#ifdef MPAS_DEBUG + write(stderrUnit, *) ' -- Skipping halo exchange for deactivated field: ' // trim(field % fieldName) +#endif + return + end if + do i = 1, 2 if(field % dimSizes(i) <= 0) then return @@ -4419,139 +4846,142 @@ subroutine mpas_dmpar_exch_halo_field2d_integer(field, haloLayersIn)!{{{ end do dminfo => field % block % domain % dminfo - - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(field % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo - end do - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) + end do + else + nHaloLayers = size(field % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if #ifdef _MPI - ! Setup Communication Lists - call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) + ! Setup Communication Lists + call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) - ! Allocate space in recv lists, and initiate mpi_irecv calls - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % ibuffer(commListPtr % nList)) - nullify(commListPtr % rbuffer) - call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + ! Allocate space in recv lists, and initiate mpi_irecv calls + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % ibuffer(commListPtr % nList)) + nullify(commListPtr % rbuffer) + call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do + commListPtr => commListPtr % next + end do - ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % ibuffer(commListPtr % nList)) - nullify(commListPtr % rbuffer) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldCursor % dimSizes(1) - commListPtr % ibuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) + j + bufferOffset) = fieldCursor % array(j, exchListPtr % srcList(i)) - nAdded = nAdded + 1 + ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % ibuffer(commListPtr % nList)) + nullify(commListPtr % rbuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(1) + commListPtr % ibuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) + j + bufferOffset) = fieldCursor % array(j, exchListPtr % srcList(i)) + nAdded = nAdded + 1 + end do end do - end do - end if + end if - exchListPtr => exchListPtr % next - end do + exchListPtr => exchListPtr % next + end do - fieldCursor => fieldCursor % next + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded end do - bufferOffset = bufferOffset + nAdded - end do - call MPI_Isend(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do + call MPI_Isend(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + commListPtr => commListPtr % next + end do #endif - ! Handle local copy. If MPI is off, then only local copies are performed. - fieldCursor => field - do while(associated(fieldCursor)) - do iHalo = 1, nHaloLayers - exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList + ! Handle local copy. If MPI is off, then only local copies are performed. + fieldCursor => field + do while(associated(fieldCursor)) + do iHalo = 1, nHaloLayers + exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - fieldCursor2 => field - do while(associated(fieldCursor2)) - if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldCursor2 % array(:, exchListPtr % destList(i)) = fieldCursor % array(:, exchListPtr % srcList(i)) - end do - end if - - fieldCursor2 => fieldCursor2 % next - end do + do while(associated(exchListPtr)) + fieldCursor2 => field + do while(associated(fieldCursor2)) + if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldCursor2 % array(:, exchListPtr % destList(i)) = fieldCursor % array(:, exchListPtr % srcList(i)) + end do + end if - exchListPtr => exchListPtr % next + fieldCursor2 => fieldCursor2 % next + end do + + exchListPtr => exchListPtr % next + end do end do - end do - fieldCursor => fieldCursor % next - end do + fieldCursor => fieldCursor % next + end do #ifdef _MPI - ! Wait for mpi_irecv to finish, and unpack data from buffer - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldCursor % dimSizes(1) - fieldCursor % array(j, exchListPtr % destList(i)) = commListPtr % ibuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizes(1) + j + bufferOffset) + ! Wait for mpi_irecv to finish, and unpack data from buffer + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(1) + fieldCursor % array(j, exchListPtr % destList(i)) = commListPtr % ibuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizes(1) + j + bufferOffset) + end do end do - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1)) - end if - exchListPtr => exchListPtr % next + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1)) + end if + exchListPtr => exchListPtr % next + end do + + fieldCursor => fieldCursor % next end do - - fieldCursor => fieldCursor % next + bufferOffset = bufferOffset + nAdded end do - bufferOffset = bufferOffset + nAdded + commListPtr => commListPtr % next end do - commListPtr => commListPtr % next - end do - ! wait for mpi_isend to finish. - commListPtr => sendList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - commListPtr => commListPtr % next - end do + ! wait for mpi_isend to finish. + commListPtr => sendList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + commListPtr => commListPtr % next + end do - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) #endif - deallocate(haloLayers) + deallocate(haloLayers) + end if end subroutine mpas_dmpar_exch_halo_field2d_integer!}}} @@ -4577,11 +5007,18 @@ subroutine mpas_dmpar_exch_halo_field3d_integer(field, haloLayersIn)!{{{ type (field3DInteger), pointer :: fieldCursor, fieldCursor2 type (mpas_exchange_list), pointer :: exchListPtr type (mpas_communication_list), pointer :: sendList, recvList, commListPtr - integer :: mpi_ierr + integer :: mpi_ierr, threadnum integer :: nHaloLayers, iHalo, i, j, k integer :: bufferOffset, nAdded integer, dimension(:), pointer :: haloLayers + if ( .not. field % isActive ) then +#ifdef MPAS_DEBUG + write(stderrUnit, *) ' -- Skipping halo exchange for deactivated field: ' // trim(field % fieldName) +#endif + return + end if + do i = 1, 3 if(field % dimSizes(i) <= 0) then return @@ -4589,131 +5026,133 @@ subroutine mpas_dmpar_exch_halo_field3d_integer(field, haloLayersIn)!{{{ end do dminfo => field % block % domain % dminfo - - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(field % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo - end do - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) + end do + else + nHaloLayers = size(field % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if #ifdef _MPI - ! Setup Communication Lists - call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) + ! Setup Communication Lists + call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) - ! Allocate space in recv lists, and initiate mpi_irecv calls - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % ibuffer(commListPtr % nList)) - nullify(commListPtr % rbuffer) - call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + ! Allocate space in recv lists, and initiate mpi_irecv calls + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % ibuffer(commListPtr % nList)) + nullify(commListPtr % rbuffer) + call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do + commListPtr => commListPtr % next + end do - ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % ibuffer(commListPtr % nList)) - nullify(commListPtr % rbuffer) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldCursor % dimSizes(2) - do k = 1, fieldCursor % dimSizes(1) - commListPtr % ibuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & - + (j-1) * fieldCursor % dimSizes(1) + k + bufferOffset) = fieldCursor % array(k, j, exchListPtr % srcList(i)) - nAdded = nAdded + 1 + ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % ibuffer(commListPtr % nList)) + nullify(commListPtr % rbuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(2) + do k = 1, fieldCursor % dimSizes(1) + commListPtr % ibuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (j-1) * fieldCursor % dimSizes(1) + k + bufferOffset) = fieldCursor % array(k, j, exchListPtr % srcList(i)) + nAdded = nAdded + 1 + end do end do end do - end do - end if + end if - exchListPtr => exchListPtr % next - end do + exchListPtr => exchListPtr % next + end do - fieldCursor => fieldCursor % next + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded end do - bufferOffset = bufferOffset + nAdded - end do - call MPI_Isend(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do + call MPI_Isend(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + commListPtr => commListPtr % next + end do #endif - ! Handle local copy. If MPI is off, then only local copies are performed. - fieldCursor => field - do while(associated(fieldCursor)) - do iHalo = 1, nHaloLayers - exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList + ! Handle local copy. If MPI is off, then only local copies are performed. + fieldCursor => field + do while(associated(fieldCursor)) + do iHalo = 1, nHaloLayers + exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - fieldCursor2 => field - do while(associated(fieldCursor2)) - if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldCursor2 % array(:, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, exchListPtr % srcList(i)) - end do - end if - - fieldCursor2 => fieldCursor2 % next - end do + do while(associated(exchListPtr)) + fieldCursor2 => field + do while(associated(fieldCursor2)) + if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldCursor2 % array(:, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, exchListPtr % srcList(i)) + end do + end if - exchListPtr => exchListPtr % next + fieldCursor2 => fieldCursor2 % next + end do + + exchListPtr => exchListPtr % next + end do end do - end do - fieldCursor => fieldCursor % next - end do + fieldCursor => fieldCursor % next + end do #ifdef _MPI - ! Wait for mpi_irecv to finish, and unpack data from buffer - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldCursor % dimSizes(2) - do k = 1, fieldCursor % dimSizes(1) - fieldCursor % array(k, j, exchListPtr % destList(i)) = commListPtr % ibuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & - + (j-1)*fieldCursor % dimSizes(1) + k + bufferOffset) + ! Wait for mpi_irecv to finish, and unpack data from buffer + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(2) + do k = 1, fieldCursor % dimSizes(1) + fieldCursor % array(k, j, exchListPtr % destList(i)) = commListPtr % ibuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (j-1)*fieldCursor % dimSizes(1) + k + bufferOffset) + end do end do end do - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2)) - end if - exchListPtr => exchListPtr % next + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2)) + end if + exchListPtr => exchListPtr % next + end do + + fieldCursor => fieldCursor % next end do - - fieldCursor => fieldCursor % next + bufferOffset = bufferOffset + nAdded end do - bufferOffset = bufferOffset + nAdded + commListPtr => commListPtr % next end do - commListPtr => commListPtr % next - end do ! wait for mpi_isend to finish. commListPtr => sendList @@ -4722,12 +5161,13 @@ subroutine mpas_dmpar_exch_halo_field3d_integer(field, haloLayersIn)!{{{ commListPtr => commListPtr % next end do - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) #endif - deallocate(haloLayers) + deallocate(haloLayers) + end if end subroutine mpas_dmpar_exch_halo_field3d_integer!}}} @@ -4753,11 +5193,18 @@ subroutine mpas_dmpar_exch_halo_field1d_real(field, haloLayersIn)!{{{ type (field1dReal), pointer :: fieldCursor, fieldCursor2 type (mpas_exchange_list), pointer :: exchListPtr type (mpas_communication_list), pointer :: sendList, recvList, commListPtr - integer :: mpi_ierr + integer :: mpi_ierr, threadNum integer :: nHaloLayers, iHalo, i integer :: bufferOffset, nAdded integer, dimension(:), pointer :: haloLayers + if ( .not. field % isActive ) then +#ifdef MPAS_DEBUG + write(stderrUnit, *) ' -- Skipping halo exchange for deactivated field: ' // trim(field % fieldName) +#endif + return + end if + do i = 1, 1 if(field % dimSizes(i) <= 0) then return @@ -4765,135 +5212,138 @@ subroutine mpas_dmpar_exch_halo_field1d_real(field, haloLayersIn)!{{{ end do dminfo => field % block % domain % dminfo + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) + end do + else + nHaloLayers = size(field % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(field % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo +#ifdef _MPI + + ! Setup Communication Lists + call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) + + ! Allocate space in recv lists, and initiate mpi_irecv calls + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + + commListPtr => commListPtr % next end do - end if -#ifdef _MPI + ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + commListPtr % rbuffer(exchListPtr % destList(i) + bufferOffset) = fieldCursor % array(exchListPtr % srcList(i)) + nAdded = nAdded + 1 + end do + end if - ! Setup Communication Lists - call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) + exchListPtr => exchListPtr % next + end do - ! Allocate space in recv lists, and initiate mpi_irecv calls - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + end do - commListPtr => commListPtr % next - end do + call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + commListPtr => commListPtr % next + end do +#endif + + ! Handle local copy. If MPI is off, then only local copies are performed. + fieldCursor => field + do while(associated(fieldCursor)) + do iHalo = 1, nHaloLayers + exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList - ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - commListPtr % rbuffer(exchListPtr % destList(i) + bufferOffset) = fieldCursor % array(exchListPtr % srcList(i)) - nAdded = nAdded + 1 - end do - end if + fieldCursor2 => field + do while(associated(fieldCursor2)) + if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldCursor2 % array(exchListPtr % destList(i)) = fieldCursor % array(exchListPtr % srcList(i)) + end do + end if + + fieldCursor2 => fieldCursor2 % next + end do exchListPtr => exchListPtr % next end do - - fieldCursor => fieldCursor % next end do - bufferOffset = bufferOffset + nAdded + + fieldCursor => fieldCursor % next end do - call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do -#endif +#ifdef _MPI - ! Handle local copy. If MPI is off, then only local copies are performed. - fieldCursor => field - do while(associated(fieldCursor)) - do iHalo = 1, nHaloLayers - exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList + ! Wait for mpi_irecv to finish, and unpack data from buffer + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + fieldCursor % array(exchListPtr % destList(i)) = commListPtr % rbuffer(exchListPtr % srcList(i) + bufferOffset) + end do + nAdded = max(nAdded, maxval(exchListPtr % srcList)) + end if + exchListPtr => exchListPtr % next + end do - do while(associated(exchListPtr)) - fieldCursor2 => field - do while(associated(fieldCursor2)) - if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldCursor2 % array(exchListPtr % destList(i)) = fieldCursor % array(exchListPtr % srcList(i)) - end do - end if - - fieldCursor2 => fieldCursor2 % next + fieldCursor => fieldCursor % next end do - - exchListPtr => exchListPtr % next + bufferOffset = bufferOffset + nAdded end do + commListPtr => commListPtr % next end do - fieldCursor => fieldCursor % next - end do + ! wait for mpi_isend to finish. + commListPtr => sendList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + commListPtr => commListPtr % next + end do -#ifdef _MPI + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) +#endif - ! Wait for mpi_irecv to finish, and unpack data from buffer - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - fieldCursor % array(exchListPtr % destList(i)) = commListPtr % rbuffer(exchListPtr % srcList(i) + bufferOffset) - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList)) - end if - exchListPtr => exchListPtr % next - end do - - fieldCursor => fieldCursor % next - end do - bufferOffset = bufferOffset + nAdded - end do - commListPtr => commListPtr % next - end do - - ! wait for mpi_isend to finish. - commListPtr => sendList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - commListPtr => commListPtr % next - end do - - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) -#endif - - deallocate(haloLayers) + deallocate(haloLayers) + end if end subroutine mpas_dmpar_exch_halo_field1d_real!}}} @@ -4919,11 +5369,18 @@ subroutine mpas_dmpar_exch_halo_field2d_real(field, haloLayersIn)!{{{ type (field2dReal), pointer :: fieldCursor, fieldCursor2 type (mpas_exchange_list), pointer :: exchListPtr type (mpas_communication_list), pointer :: sendList, recvList, commListPtr - integer :: mpi_ierr + integer :: mpi_ierr, threadNum integer :: nHaloLayers, iHalo, i, j integer :: bufferOffset, nAdded integer, dimension(:), pointer :: haloLayers + if ( .not. field % isActive ) then +#ifdef MPAS_DEBUG + write(stderrUnit, *) ' -- Skipping halo exchange for deactivated field: ' // trim(field % fieldName) +#endif + return + end if + do i = 1, 2 if(field % dimSizes(i) <= 0) then return @@ -4931,138 +5388,141 @@ subroutine mpas_dmpar_exch_halo_field2d_real(field, haloLayersIn)!{{{ end do dminfo => field % block % domain % dminfo - - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(field % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo - end do - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) + end do + else + nHaloLayers = size(field % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if #ifdef _MPI - ! Setup Communication Lists - call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) + ! Setup Communication Lists + call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) - ! Allocate space in recv lists, and initiate mpi_irecv calls - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + ! Allocate space in recv lists, and initiate mpi_irecv calls + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do + commListPtr => commListPtr % next + end do - ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldCursor % dimSizes(1) - commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) + j + bufferOffset) = fieldCursor % array(j, exchListPtr % srcList(i)) - nAdded = nAdded + 1 + ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(1) + commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) + j + bufferOffset) = fieldCursor % array(j, exchListPtr % srcList(i)) + nAdded = nAdded + 1 + end do end do - end do - end if + end if - exchListPtr => exchListPtr % next - end do + exchListPtr => exchListPtr % next + end do - fieldCursor => fieldCursor % next + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded end do - bufferOffset = bufferOffset + nAdded - end do - call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do + call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + commListPtr => commListPtr % next + end do #endif - ! Handle local copy. If MPI is off, then only local copies are performed. - fieldCursor => field - do while(associated(fieldCursor)) - do iHalo = 1, nHaloLayers - exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList + ! Handle local copy. If MPI is off, then only local copies are performed. + fieldCursor => field + do while(associated(fieldCursor)) + do iHalo = 1, nHaloLayers + exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - fieldCursor2 => field - do while(associated(fieldCursor2)) - if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldCursor2 % array(:, exchListPtr % destList(i)) = fieldCursor % array(:, exchListPtr % srcList(i)) - end do - end if - - fieldCursor2 => fieldCursor2 % next - end do + do while(associated(exchListPtr)) + fieldCursor2 => field + do while(associated(fieldCursor2)) + if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldCursor2 % array(:, exchListPtr % destList(i)) = fieldCursor % array(:, exchListPtr % srcList(i)) + end do + end if - exchListPtr => exchListPtr % next + fieldCursor2 => fieldCursor2 % next + end do + + exchListPtr => exchListPtr % next + end do end do - end do - fieldCursor => fieldCursor % next - end do + fieldCursor => fieldCursor % next + end do #ifdef _MPI - ! Wait for mpi_irecv to finish, and unpack data from buffer - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldCursor % dimSizes(1) - fieldCursor % array(j, exchListPtr % destList(i)) = commListPtr % rbuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizeS(1) + j + bufferOffset) + ! Wait for mpi_irecv to finish, and unpack data from buffer + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(1) + fieldCursor % array(j, exchListPtr % destList(i)) = commListPtr % rbuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizeS(1) + j + bufferOffset) + end do end do - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1)) - end if - exchListPtr => exchListPtr % next + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1)) + end if + exchListPtr => exchListPtr % next + end do + + fieldCursor => fieldCursor % next end do - - fieldCursor => fieldCursor % next + bufferOffset = bufferOffset + nAdded end do - bufferOffset = bufferOffset + nAdded + commListPtr => commListPtr % next end do - commListPtr => commListPtr % next - end do - ! wait for mpi_isend to finish. - commListPtr => sendList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - commListPtr => commListPtr % next - end do + ! wait for mpi_isend to finish. + commListPtr => sendList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + commListPtr => commListPtr % next + end do - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) #endif - deallocate(haloLayers) + deallocate(haloLayers) + end if end subroutine mpas_dmpar_exch_halo_field2d_real!}}} @@ -5088,11 +5548,18 @@ subroutine mpas_dmpar_exch_halo_field3d_real(field, haloLayersIn)!{{{ type (field3dReal), pointer :: fieldCursor, fieldCursor2 type (mpas_exchange_list), pointer :: exchListPtr type (mpas_communication_list), pointer :: sendList, recvList, commListPtr - integer :: mpi_ierr + integer :: mpi_ierr, threadNum integer :: nHaloLayers, iHalo, i, j, k integer :: bufferOffset, nAdded integer, dimension(:), pointer :: haloLayers + if ( .not. field % isActive ) then +#ifdef MPAS_DEBUG + write(stderrUnit, *) ' -- Skipping halo exchange for deactivated field: ' // trim(field % fieldName) +#endif + return + end if + do i = 1, 3 if(field % dimSizes(i) <= 0) then return @@ -5100,144 +5567,147 @@ subroutine mpas_dmpar_exch_halo_field3d_real(field, haloLayersIn)!{{{ end do dminfo => field % block % domain % dminfo - - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(field % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo - end do - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) + end do + else + nHaloLayers = size(field % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if #ifdef _MPI - ! Setup Communication Lists - call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) + ! Setup Communication Lists + call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) - ! Allocate space in recv lists, and initiate mpi_irecv calls - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + ! Allocate space in recv lists, and initiate mpi_irecv calls + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do + commListPtr => commListPtr % next + end do - ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldCursor % dimSizes(2) - do k = 1, fieldCursor % dimSizes(1) - commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & - + (j-1) * fieldCursor % dimSizes(1) + k + bufferOffset) = fieldCursor % array(k, j, exchListPtr % srcList(i)) - nAdded = nAdded + 1 + ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(2) + do k = 1, fieldCursor % dimSizes(1) + commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (j-1) * fieldCursor % dimSizes(1) + k + bufferOffset) = fieldCursor % array(k, j, exchListPtr % srcList(i)) + nAdded = nAdded + 1 + end do end do end do - end do - end if + end if - exchListPtr => exchListPtr % next - end do + exchListPtr => exchListPtr % next + end do - fieldCursor => fieldCursor % next + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded end do - bufferOffset = bufferOffset + nAdded - end do - call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do + call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + commListPtr => commListPtr % next + end do #endif - ! Handle local copy. If MPI is off, then only local copies are performed. - fieldCursor => field - do while(associated(fieldCursor)) - do iHalo = 1, nHaloLayers - exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList + ! Handle local copy. If MPI is off, then only local copies are performed. + fieldCursor => field + do while(associated(fieldCursor)) + do iHalo = 1, nHaloLayers + exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - fieldCursor2 => field - do while(associated(fieldCursor2)) - if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldCursor2 % array(:, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, exchListPtr % srcList(i)) - end do - end if - - fieldCursor2 => fieldCursor2 % next - end do + do while(associated(exchListPtr)) + fieldCursor2 => field + do while(associated(fieldCursor2)) + if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldCursor2 % array(:, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, exchListPtr % srcList(i)) + end do + end if - exchListPtr => exchListPtr % next + fieldCursor2 => fieldCursor2 % next + end do + + exchListPtr => exchListPtr % next + end do end do - end do - fieldCursor => fieldCursor % next - end do + fieldCursor => fieldCursor % next + end do #ifdef _MPI - ! Wait for mpi_irecv to finish, and unpack data from buffer - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldCursor % dimSizes(2) - do k = 1, fieldCursor % dimSizes(1) - fieldCursor % array(k, j, exchListPtr % destList(i)) = commListPtr % rbuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & - + (j-1)*fieldCursor % dimSizes(1) + k + bufferOffset) + ! Wait for mpi_irecv to finish, and unpack data from buffer + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(2) + do k = 1, fieldCursor % dimSizes(1) + fieldCursor % array(k, j, exchListPtr % destList(i)) = commListPtr % rbuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (j-1)*fieldCursor % dimSizes(1) + k + bufferOffset) + end do end do end do - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2)) - end if - exchListPtr => exchListPtr % next + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2)) + end if + exchListPtr => exchListPtr % next + end do + + fieldCursor => fieldCursor % next end do - - fieldCursor => fieldCursor % next + bufferOffset = bufferOffset + nAdded end do - bufferOffset = bufferOffset + nAdded + commListPtr => commListPtr % next end do - commListPtr => commListPtr % next - end do - ! wait for mpi_isend to finish. - commListPtr => sendList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - commListPtr => commListPtr % next - end do + ! wait for mpi_isend to finish. + commListPtr => sendList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + commListPtr => commListPtr % next + end do - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) #endif - deallocate(haloLayers) + deallocate(haloLayers) + end if end subroutine mpas_dmpar_exch_halo_field3d_real!}}} @@ -5263,11 +5733,18 @@ subroutine mpas_dmpar_exch_halo_field4d_real(field, haloLayersIn)!{{{ type (field4dReal), pointer :: fieldCursor, fieldCursor2 type (mpas_exchange_list), pointer :: exchListPtr type (mpas_communication_list), pointer :: sendList, recvList, commListPtr - integer :: mpi_ierr + integer :: mpi_ierr, threadNum integer :: nHaloLayers, iHalo, i, j, k, l integer :: bufferOffset, nAdded integer, dimension(:), pointer :: haloLayers + if ( .not. field % isActive ) then +#ifdef MPAS_DEBUG + write(stderrUnit, *) ' -- Skipping halo exchange for deactivated field: ' // trim(field % fieldName) +#endif + return + end if + do i = 1, 4 if(field % dimSizes(i) <= 0) then return @@ -5275,153 +5752,156 @@ subroutine mpas_dmpar_exch_halo_field4d_real(field, haloLayersIn)!{{{ end do dminfo => field % block % domain % dminfo - - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(field % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo - end do - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) + end do + else + nHaloLayers = size(field % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if #ifdef _MPI - ! Setup Communication Lists - call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) + ! Setup Communication Lists + call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) - ! Allocate space in recv lists, and initiate mpi_irecv calls - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + ! Allocate space in recv lists, and initiate mpi_irecv calls + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do + commListPtr => commListPtr % next + end do - ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldCursor % dimSizes(3) - do k = 1, fieldCursor % dimSizes(2) - do l = 1, fieldCursor % dimSizes(1) - commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) & - + (j-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & - + (k-1) * fieldCursor % dimSizes(1) + l + bufferOffset) & - = fieldCursor % array(l, k, j, exchListPtr % srcList(i)) - nAdded = nAdded + 1 + ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(3) + do k = 1, fieldCursor % dimSizes(2) + do l = 1, fieldCursor % dimSizes(1) + commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) & + + (j-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (k-1) * fieldCursor % dimSizes(1) + l + bufferOffset) & + = fieldCursor % array(l, k, j, exchListPtr % srcList(i)) + nAdded = nAdded + 1 + end do end do end do end do - end do - end if + end if - exchListPtr => exchListPtr % next - end do + exchListPtr => exchListPtr % next + end do - fieldCursor => fieldCursor % next + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded end do - bufferOffset = bufferOffset + nAdded - end do - call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do + call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + commListPtr => commListPtr % next + end do #endif - ! Handle local copy. If MPI is off, then only local copies are performed. - fieldCursor => field - do while(associated(fieldCursor)) - do iHalo = 1, nHaloLayers - exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList + ! Handle local copy. If MPI is off, then only local copies are performed. + fieldCursor => field + do while(associated(fieldCursor)) + do iHalo = 1, nHaloLayers + exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - fieldCursor2 => field - do while(associated(fieldCursor2)) - if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldCursor2 % array(:, :, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, :, exchListPtr % srcList(i)) - end do - end if - - fieldCursor2 => fieldCursor2 % next - end do + do while(associated(exchListPtr)) + fieldCursor2 => field + do while(associated(fieldCursor2)) + if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldCursor2 % array(:, :, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, :, exchListPtr % srcList(i)) + end do + end if - exchListPtr => exchListPtr % next + fieldCursor2 => fieldCursor2 % next + end do + + exchListPtr => exchListPtr % next + end do end do - end do - fieldCursor => fieldCursor % next - end do + fieldCursor => fieldCursor % next + end do #ifdef _MPI - ! Wait for mpi_irecv to finish, and unpack data from buffer - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldCursor % dimSizes(3) - do k = 1, fieldCursor % dimSizes(2) - do l = 1, fieldCursor % dimSizes(1) - fieldCursor % array(l, k, j, exchListPtr % destList(i)) = commListPtr % rbuffer((exchListPtr % srcList(i)-1)& - *fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) *fieldCursor % dimSizes(3)& - + (j-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & - + (k-1)*fieldCursor % dimSizes(1) + l + bufferOffset) + ! Wait for mpi_irecv to finish, and unpack data from buffer + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(3) + do k = 1, fieldCursor % dimSizes(2) + do l = 1, fieldCursor % dimSizes(1) + fieldCursor % array(l, k, j, exchListPtr % destList(i)) = commListPtr % rbuffer((exchListPtr % srcList(i)-1)& + *fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) *fieldCursor % dimSizes(3)& + + (j-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (k-1)*fieldCursor % dimSizes(1) + l + bufferOffset) + end do end do end do end do - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3)) - end if - exchListPtr => exchListPtr % next + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3)) + end if + exchListPtr => exchListPtr % next + end do + + fieldCursor => fieldCursor % next end do - - fieldCursor => fieldCursor % next + bufferOffset = bufferOffset + nAdded end do - bufferOffset = bufferOffset + nAdded + commListPtr => commListPtr % next end do - commListPtr => commListPtr % next - end do - ! wait for mpi_isend to finish. - commListPtr => sendList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - commListPtr => commListPtr % next - end do + ! wait for mpi_isend to finish. + commListPtr => sendList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + commListPtr => commListPtr % next + end do - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) #endif - deallocate(haloLayers) + deallocate(haloLayers) + end if end subroutine mpas_dmpar_exch_halo_field4d_real!}}} @@ -5447,11 +5927,18 @@ subroutine mpas_dmpar_exch_halo_field5d_real(field, haloLayersIn)!{{{ type (field5dReal), pointer :: fieldCursor, fieldCursor2 type (mpas_exchange_list), pointer :: exchListPtr type (mpas_communication_list), pointer :: sendList, recvList, commListPtr - integer :: mpi_ierr + integer :: mpi_ierr, threadNum integer :: nHaloLayers, iHalo, i, j, k, l, m integer :: bufferOffset, nAdded integer, dimension(:), pointer :: haloLayers + if ( .not. field % isActive ) then +#ifdef MPAS_DEBUG + write(stderrUnit, *) ' -- Skipping halo exchange for deactivated field: ' // trim(field % fieldName) +#endif + return + end if + do i = 1, 5 if(field % dimSizes(i) <= 0) then return @@ -5459,159 +5946,165 @@ subroutine mpas_dmpar_exch_halo_field5d_real(field, haloLayersIn)!{{{ end do dminfo => field % block % domain % dminfo - - if(present(haloLayersIn)) then - nHaloLayers = size(haloLayersIn) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = haloLayersIn(iHalo) - end do - else - nHaloLayers = size(field % sendList % halos) - allocate(haloLayers(nHaloLayers)) - do iHalo = 1, nHaloLayers - haloLayers(iHalo) = iHalo - end do - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(present(haloLayersIn)) then + nHaloLayers = size(haloLayersIn) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = haloLayersIn(iHalo) + end do + else + nHaloLayers = size(field % sendList % halos) + allocate(haloLayers(nHaloLayers)) + do iHalo = 1, nHaloLayers + haloLayers(iHalo) = iHalo + end do + end if #ifdef _MPI - ! Setup Communication Lists - call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) + ! Setup Communication Lists + call mpas_dmpar_build_comm_lists(field % sendList, field % recvList, haloLayers, field % dimsizes, sendList, recvList) - ! Allocate space in recv lists, and initiate mpi_irecv calls - commListPtr => recvList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) + ! Allocate space in recv lists, and initiate mpi_irecv calls + commListPtr => recvList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do + commListPtr => commListPtr % next + end do - ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls - commListPtr => sendList - do while(associated(commListPtr)) - allocate(commListPtr % rbuffer(commListPtr % nList)) - nullify(commListPtr % ibuffer) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldCursor % dimSizes(4) - do k = 1, fieldCursor % dimSizes(3) - do l = 1, fieldCursor % dimSizes(2) - do m = 1, fieldCursor % dimSizes(1) - commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4) & - + (j-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) & - + (k-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & - + (l-1) * fieldCursor % dimSizes(1) + m + bufferOffset) & - = fieldCursor % array(m, l, k, j, exchListPtr % srcList(i)) - nAdded = nAdded + 1 + ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls + commListPtr => sendList + do while(associated(commListPtr)) + allocate(commListPtr % rbuffer(commListPtr % nList)) + nullify(commListPtr % ibuffer) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(4) + do k = 1, fieldCursor % dimSizes(3) + do l = 1, fieldCursor % dimSizes(2) + do m = 1, fieldCursor % dimSizes(1) + commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4) & + + (j-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) & + + (k-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (l-1) * fieldCursor % dimSizes(1) + m + bufferOffset) & + = fieldCursor % array(m, l, k, j, exchListPtr % srcList(i)) + nAdded = nAdded + 1 + end do end do end do end do end do - end do - end if + end if - exchListPtr => exchListPtr % next - end do + exchListPtr => exchListPtr % next + end do - fieldCursor => fieldCursor % next + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded end do - bufferOffset = bufferOffset + nAdded - end do - call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) - commListPtr => commListPtr % next - end do + call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr) + commListPtr => commListPtr % next + end do #endif - ! Handle local copy. If MPI is off, then only local copies are performed. - fieldCursor => field - do while(associated(fieldCursor)) - do iHalo = 1, nHaloLayers - exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList + ! Handle local copy. If MPI is off, then only local copies are performed. + fieldCursor => field + do while(associated(fieldCursor)) + do iHalo = 1, nHaloLayers + exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - fieldCursor2 => field - do while(associated(fieldCursor2)) - if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then - do i = 1, exchListPtr % nList - fieldCursor2 % array(:, :, :, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, :, :, exchListPtr % srcList(i)) - end do - end if - - fieldCursor2 => fieldCursor2 % next - end do + do while(associated(exchListPtr)) + fieldCursor2 => field + do while(associated(fieldCursor2)) + if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then + do i = 1, exchListPtr % nList + fieldCursor2 % array(:, :, :, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, :, :, exchListPtr % srcList(i)) + end do + end if - exchListPtr => exchListPtr % next + fieldCursor2 => fieldCursor2 % next + end do + + exchListPtr => exchListPtr % next + end do end do - end do - fieldCursor => fieldCursor % next - end do + fieldCursor => fieldCursor % next + end do #ifdef _MPI - ! Wait for mpi_irecv to finish, and unpack data from buffer - commListPtr => recvList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - bufferOffset = 0 - do iHalo = 1, nHaloLayers - nAdded = 0 - fieldCursor => field - do while(associated(fieldCursor)) - exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList - do while(associated(exchListPtr)) - if(exchListPtr % endPointID == commListPtr % procID) then - do i = 1, exchListPtr % nList - do j = 1, fieldCursor % dimSizes(4) - do k = 1, fieldCursor % dimSizes(3) - do l = 1, fieldCursor % dimSizes(2) - do m = 1, fieldCursor % dimSizes(1) - fieldCursor % array(m, l, k, j, exchListPtr % destList(i)) = commListPtr % rbuffer((exchListPtr % srcList(i)-1)& - *fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) *fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4)& - + (j-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) & - + (k-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & - + (l-1)*fieldCursor % dimSizes(1) + m + bufferOffset) + ! Wait for mpi_irecv to finish, and unpack data from buffer + commListPtr => recvList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + bufferOffset = 0 + do iHalo = 1, nHaloLayers + nAdded = 0 + fieldCursor => field + do while(associated(fieldCursor)) + exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + do i = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(4) + do k = 1, fieldCursor % dimSizes(3) + do l = 1, fieldCursor % dimSizes(2) + do m = 1, fieldCursor % dimSizes(1) + fieldCursor % array(m, l, k, j, exchListPtr % destList(i)) = & + commListPtr % rbuffer((exchListPtr % srcList(i)-1) & + * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) & + * fieldCursor % dimSizes(4)& + + (j-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) & + + (k-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (l-1)*fieldCursor % dimSizes(1) + m + bufferOffset) + end do end do end do end do end do - end do - nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4)) - end if - exchListPtr => exchListPtr % next + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4)) + end if + exchListPtr => exchListPtr % next + end do + + fieldCursor => fieldCursor % next end do - - fieldCursor => fieldCursor % next + bufferOffset = bufferOffset + nAdded end do - bufferOffset = bufferOffset + nAdded + commListPtr => commListPtr % next end do - commListPtr => commListPtr % next - end do - ! wait for mpi_isend to finish. - commListPtr => sendList - do while(associated(commListPtr)) - call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) - commListPtr => commListPtr % next - end do + ! wait for mpi_isend to finish. + commListPtr => sendList + do while(associated(commListPtr)) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + commListPtr => commListPtr % next + end do - ! Destroy commLists. - call mpas_dmpar_destroy_communication_list(sendList) - call mpas_dmpar_destroy_communication_list(recvList) + ! Destroy commLists. + call mpas_dmpar_destroy_communication_list(sendList) + call mpas_dmpar_destroy_communication_list(recvList) #endif - deallocate(haloLayers) + deallocate(haloLayers) + end if end subroutine mpas_dmpar_exch_halo_field5d_real!}}} @@ -5629,15 +6122,19 @@ subroutine mpas_dmpar_init_multihalo_exchange_list(exchList, nHalos)!{{{ type (mpas_multihalo_exchange_list), pointer :: exchList !< Input: Exchange list to initialize integer, intent(in) :: nHalos !< Input: Number of halo layers for exchange list - integer :: i + integer :: i, threadNum - allocate(exchList) - allocate(exchList % halos(nHalos)) - do i = 1, nHalos - nullify(exchList % halos(i) % exchList) - end do - nullify(exchList % next) - nullify(exchList % prev) + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + allocate(exchList) + allocate(exchList % halos(nHalos)) + do i = 1, nHalos + nullify(exchList % halos(i) % exchList) + end do + nullify(exchList % next) + nullify(exchList % prev) + end if end subroutine mpas_dmpar_init_multihalo_exchange_list!}}} !----------------------------------------------------------------------- @@ -5654,17 +6151,20 @@ subroutine mpas_dmpar_destroy_mulithalo_exchange_list(exchList)!{{{ type (mpas_multihalo_exchange_list), pointer :: exchList !< Input: Exchange list to destroy. integer :: nHalos - integer :: i + integer :: i, threadNum nHalos = size(exchList % halos) + threadNum = mpas_threading_get_thread_num() - do i = 1, nHalos - call mpas_dmpar_destroy_exchange_list(exchList % halos(i) % exchList) - end do + if ( threadNum == 0 ) then + do i = 1, nHalos + call mpas_dmpar_destroy_exchange_list(exchList % halos(i) % exchList) + end do - deallocate(exchList % halos) - deallocate(exchList) - nullify(exchList) + deallocate(exchList % halos) + deallocate(exchList) + nullify(exchList) + end if end subroutine mpas_dmpar_destroy_mulithalo_exchange_list!}}} !----------------------------------------------------------------------- @@ -5680,26 +6180,31 @@ end subroutine mpas_dmpar_destroy_mulithalo_exchange_list!}}} subroutine mpas_dmpar_destroy_communication_list(commList)!{{{ type (mpas_communication_list), pointer :: commList !< Input: Communication list to destroy. type (mpas_communication_list), pointer :: commListPtr + integer :: threadNum - commListPtr => commList - do while(associated(commListPtr)) - if(associated(commList % next)) then - commList => commList % next - else - nullify(commList) - end if + threadNum = mpas_threading_get_thread_num() - if(associated(commListPtr % ibuffer)) then - deallocate(commListPtr % ibuffer) - end if + if ( threadNum == 0 ) then + commListPtr => commList + do while(associated(commListPtr)) + if(associated(commList % next)) then + commList => commList % next + else + nullify(commList) + end if - if(associated(commListPtr % rbuffer)) then - deallocate(commListPtr % rbuffer) - end if + if(associated(commListPtr % ibuffer)) then + deallocate(commListPtr % ibuffer) + end if - deallocate(commListPtr) - commListPtr => commList - end do + if(associated(commListPtr % rbuffer)) then + deallocate(commListPtr % rbuffer) + end if + + deallocate(commListPtr) + commListPtr => commList + end do + end if end subroutine mpas_dmpar_destroy_communication_list!}}} @@ -5716,26 +6221,31 @@ end subroutine mpas_dmpar_destroy_communication_list!}}} subroutine mpas_dmpar_destroy_exchange_list(exchList)!{{{ type (mpas_exchange_list), pointer :: exchList !< Input: Exchange list to destroy type (mpas_exchange_list), pointer :: exchListPtr + integer :: threadNum - exchListPtr => exchList - do while(associated(exchList)) - if(associated(exchList % next)) then - exchList => exchList % next - else - nullify(exchList) - end if + threadNum = mpas_threading_get_thread_num() - if(associated(exchListPtr % srcList)) then - deallocate(exchListPtr % srcList) - end if + if ( threadNum == 0 ) then + exchListPtr => exchList + do while(associated(exchList)) + if(associated(exchList % next)) then + exchList => exchList % next + else + nullify(exchList) + end if - if(associated(exchListPtr % destList)) then - deallocate(exchListPtr % destList) - end if + if(associated(exchListPtr % srcList)) then + deallocate(exchListPtr % srcList) + end if - deallocate(exchListPtr) - exchListPtr => exchList - end do + if(associated(exchListPtr % destList)) then + deallocate(exchListPtr % destList) + end if + + deallocate(exchListPtr) + exchListPtr => exchList + end do + end if end subroutine mpas_dmpar_destroy_exchange_list!}}} @@ -5752,13 +6262,18 @@ end subroutine mpas_dmpar_destroy_exchange_list!}}} subroutine mpas_dmpar_copy_field1d_integer(field)!{{{ type (field1dInteger), pointer :: field !< Input: Field to copy type (field1dInteger), pointer :: fieldCursor + integer :: threadNum - if(associated(field % next)) then - fieldCursor => field % next - do while(associated(fieldCursor)) - fieldCursor % array = field % array - fieldCursor => fieldCursor % next - end do + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(associated(field % next)) then + fieldCursor => field % next + do while(associated(fieldCursor)) + fieldCursor % array = field % array + fieldCursor => fieldCursor % next + end do + end if end if end subroutine mpas_dmpar_copy_field1d_integer!}}} @@ -5775,13 +6290,18 @@ end subroutine mpas_dmpar_copy_field1d_integer!}}} subroutine mpas_dmpar_copy_field2d_integer(field)!{{{ type (field2dInteger), pointer :: field !< Input: Field to copy type (field2dInteger), pointer :: fieldCursor + integer :: threadNum - if(associated(field % next)) then - fieldCursor => field % next - do while(associated(fieldCursor)) - fieldCursor % array = field % array - fieldCursor => fieldCursor % next - end do + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(associated(field % next)) then + fieldCursor => field % next + do while(associated(fieldCursor)) + fieldCursor % array = field % array + fieldCursor => fieldCursor % next + end do + end if end if end subroutine mpas_dmpar_copy_field2d_integer!}}} @@ -5798,13 +6318,18 @@ end subroutine mpas_dmpar_copy_field2d_integer!}}} subroutine mpas_dmpar_copy_field3d_integer(field)!{{{ type (field3dInteger), pointer :: field !< Input: Field to copy type (field3dInteger), pointer :: fieldCursor + integer :: threadNum - if(associated(field % next)) then - fieldCursor => field % next - do while(associated(fieldCursor)) - fieldCursor % array = field % array - fieldCursor => fieldCursor % next - end do + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(associated(field % next)) then + fieldCursor => field % next + do while(associated(fieldCursor)) + fieldCursor % array = field % array + fieldCursor => fieldCursor % next + end do + end if end if end subroutine mpas_dmpar_copy_field3d_integer!}}} @@ -5821,14 +6346,18 @@ end subroutine mpas_dmpar_copy_field3d_integer!}}} subroutine mpas_dmpar_copy_field1d_real(field)!{{{ type (field1dReal), pointer :: field !< Input: Field to copy type (field1dReal), pointer :: fieldCursor + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - if(associated(field % next)) then - fieldCursor => field - do while(associated(fieldCursor)) - fieldCursor % array(:) = field % array(:) - fieldCursor => fieldCursor % next - end do + if ( threadNum == 0 ) then + if(associated(field % next)) then + fieldCursor => field + do while(associated(fieldCursor)) + fieldCursor % array(:) = field % array(:) + fieldCursor => fieldCursor % next + end do + end if end if end subroutine mpas_dmpar_copy_field1d_real!}}} @@ -5845,13 +6374,18 @@ end subroutine mpas_dmpar_copy_field1d_real!}}} subroutine mpas_dmpar_copy_field2d_real(field)!{{{ type (field2dReal), pointer :: field !< Input: Field to copy type (field2dReal), pointer :: fieldCursor + integer :: threadNum - if(associated(field % next)) then - fieldCursor => field % next - do while(associated(fieldCursor)) - fieldCursor % array = field % array - fieldCursor => fieldCursor % next - end do + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(associated(field % next)) then + fieldCursor => field % next + do while(associated(fieldCursor)) + fieldCursor % array = field % array + fieldCursor => fieldCursor % next + end do + end if end if end subroutine mpas_dmpar_copy_field2d_real!}}} @@ -5868,13 +6402,18 @@ end subroutine mpas_dmpar_copy_field2d_real!}}} subroutine mpas_dmpar_copy_field3d_real(field)!{{{ type (field3dReal), pointer :: field !< Input: Field to copy type (field3dReal), pointer :: fieldCursor + integer :: threadNum - if(associated(field % next)) then - fieldCursor => field % next - do while(associated(fieldCursor)) - fieldCursor % array = field % array - fieldCursor => fieldCursor % next - end do + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(associated(field % next)) then + fieldCursor => field % next + do while(associated(fieldCursor)) + fieldCursor % array = field % array + fieldCursor => fieldCursor % next + end do + end if end if end subroutine mpas_dmpar_copy_field3d_real!}}} @@ -5891,13 +6430,17 @@ end subroutine mpas_dmpar_copy_field3d_real!}}} subroutine mpas_dmpar_copy_field4d_real(field)!{{{ type (field4dReal), pointer :: field !< Input: Field to copy type (field4dReal), pointer :: fieldCursor - - if(associated(field % next)) then - fieldCursor => field % next - do while(associated(fieldCursor)) - fieldCursor % array = field % array - fieldCursor => fieldCursor % next - end do + integer :: threadNum + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(associated(field % next)) then + fieldCursor => field % next + do while(associated(fieldCursor)) + fieldCursor % array = field % array + fieldCursor => fieldCursor % next + end do + end if end if end subroutine mpas_dmpar_copy_field4d_real!}}} @@ -5914,48 +6457,3337 @@ end subroutine mpas_dmpar_copy_field4d_real!}}} subroutine mpas_dmpar_copy_field5d_real(field)!{{{ type (field5dReal), pointer :: field !< Input: Field to copy type (field5dReal), pointer :: fieldCursor + integer :: threadNum - if(associated(field % next)) then - fieldCursor => field % next - do while(associated(fieldCursor)) - fieldCursor % array = field % array - fieldCursor => fieldCursor % next - end do + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(associated(field % next)) then + fieldCursor => field % next + do while(associated(fieldCursor)) + fieldCursor % array = field % array + fieldCursor => fieldCursor % next + end do + end if end if end subroutine mpas_dmpar_copy_field5d_real!}}} -end module mpas_dmpar - !----------------------------------------------------------------------- -! routine mpas_dmpar_global_abort +! routine mpas_dmpar_get_time ! -!> \brief MPAS dmpar global abort routine. -!> \author Michael Duda -!> \date 03/26/13 +!> \brief MPAS dmpar get time routine +!> \author Doug Jacobsen +!> \date 06/11/2015 !> \details -!> This routine aborts MPI. A call to it kills the model through the use of MPI_Abort on the world communicator, and outputs a message. +!> This routine returns the current time, either using the MPI interface, or +!> the system_clock interface. ! !----------------------------------------------------------------------- - subroutine mpas_dmpar_global_abort(mesg)!{{{ - use mpas_io_units + subroutine mpas_dmpar_get_time(curTime)!{{{ implicit none - include 'mpif.h' - - character (len=*), intent(in) :: mesg !< Input: Abort message + real (kind=R8KIND), intent(out) :: curTime !< Output: Current time + integer :: clock, hz, threadNum #ifdef _MPI - integer :: mpi_ierr, mpi_errcode - - write(stderrUnit,*) trim(mesg) - call MPI_Abort(MPI_COMM_WORLD, mpi_errcode, mpi_ierr) + curTime = MPI_WTime() +#else + call system_clock(count=clock) + call system_clock(count_rate=hz) + curTime = real(clock, kind=R8KIND) / real(hz, kind=R8KIND) #endif - write(stderrUnit,*) trim(mesg) - stop + end subroutine mpas_dmpar_get_time!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_create +! +!> \brief MPAS dmpar exchange group creation routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This routine creates a new exchange group named 'groupName' within a +!> domain's list of exchange groups. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_create(domain, groupName, iErr)!{{{ + + type (domain_type), intent(inout) :: domain + character (len=*), intent(in) :: groupName + integer, optional, intent(out) :: iErr + + type (mpas_exchange_group), pointer :: exchGroupPtr, prevExchGroupPtr + integer :: nLen + integer :: threadNum + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + nLen = len_trim(groupName) + DMPAR_DEBUG_WRITE(' -- Creating exchange group ' // trim(groupName)) + + ! If no exchange group has been created yet, allocate the first + if ( .not. associated(domain % exchangeGroups) ) then + allocate(domain % exchangeGroups) + exchGroupPtr => domain % exchangeGroups + ! Otherwise, make sure the group has not already been created, and add it to the end of the list + else + exchGroupPtr => domain % exchangeGroups + do while (associated(exchGroupPtr)) + if ( nLen == exchGroupPtr % nLen) then + if ( groupName(1:nLen) == exchGroupPtr % groupName(1:exchGroupPtr % nLen) ) then + write(stderrUnit, *) 'ERROR: Exchange group ''' // groupName(1:nLen) // ''' already exists.' + write(stderrUnit, *) ' Cannot create group.' + if ( present(iErr) ) then + iErr = MPAS_DMPAR_EXISTING_GROUP + end if + return + end if + end if + prevExchGroupPtr => exchGroupPtr + exchGroupPtr => exchGroupPtr % next + end do + + allocate(prevExchGroupPtr % next) + exchGroupPtr => prevExchGroupPtr % next + end if + + exchGroupPtr % nLen = nLen + exchGroupPtr % groupName = trim(groupName) + + call mpas_pool_create_pool(exchGroupPtr % fieldPool) + end if + + end subroutine mpas_dmpar_exch_group_create!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_destroy +! +!> \brief MPAS dmpar exchange group destruction routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This routine destroys an exchange group named 'groupName' within a +!> domain's list of exchange groups. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_destroy(domain, groupName, iErr)!{{{ + + type (domain_type), intent(inout) :: domain + character (len=*), intent(in) :: groupName + integer, optional, intent(out) :: iErr + + type (mpas_exchange_field_list), pointer :: exchFieldListPtr + type (mpas_exchange_group), pointer :: exchGroupPtr, prevExchGroupPtr + integer :: nLen + integer :: threadNum + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if - end subroutine mpas_dmpar_global_abort!}}} + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + nLen = len_trim(groupName) + DMPAR_DEBUG_WRITE(' -- Destroying exchange group ' // trim(groupName)) + + nullify(prevExchGroupPtr) + exchGroupPtr => domain % exchangeGroups + do while (associated(exchGroupPtr)) + if ( nLen == exchGroupPtr % nLen) then + if ( groupName(1:nLen) == exchGroupPtr % groupName(1:exchGroupPtr % nLen) ) then + if ( associated(prevExchGroupPtr) ) then + prevExchGroupPtr % next => exchGroupPtr % next + else + if ( associated(exchGroupPtr % next) ) then + domain % exchangeGroups => exchGroupPtr % next + else + nullify(domain % exchangeGroups) + end if + end if + + call mpas_pool_destroy_pool(exchGroupPtr % fieldPool) + + ! Destroy field list + do while ( associated(exchGroupPtr % fieldList) ) + exchFieldListPtr => exchGroupPtr % fieldList + if ( associated(exchFieldListPtr % next) ) then + exchGroupPtr % fieldList => exchFieldListPtr % next + else + nullify(exchGroupPtr % fieldList) + end if + + if ( associated(exchFieldListPtr % haloLayers) ) then + deallocate(exchFieldListPtr % haloLayers) + end if + + if ( associated(exchFieldListPtr % timeLevels) ) then + deallocate(exchFieldListPtr % timeLevels) + end if + + nullify(exchFieldListPtr % next) + deallocate(exchFieldListPtr) + end do + + deallocate(exchGroupPtr) + + return + end if + end if + prevExchGroupPtr => exchGroupPtr + exchGroupPtr => exchGroupPtr % next + end do + + write(stderrUnit, *) 'ERROR: No exchange group found named ''' // trim(groupName) // '''.' + write(stderrUnit, *) ' Cannot destroy group.' + if ( present(iErr) ) then + iErr = MPAS_DMPAR_MISSING_GROUP + end if + end if + + end subroutine mpas_dmpar_exch_group_destroy!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_add_field +! +!> \brief MPAS dmpar exchange group add field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This routine adds a field named 'fieldName' to an exchange group named +!> 'groupName' within a domain's list of exchange groups. +!> The timeLevel argument allows control over which timeLevel(s) will be +!> exchanged as part of this group. If the timeLevel argument is omitted or if it +!> has a value of -1, all time levels will be exchanged. +!> The haloLayers argument allows an input array to define the halo layers +!> that should be exchanged as part of this exchange group. If it is +!> omitted, all halo layers will be exchanged. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_add_field(domain, groupName, fieldName, timeLevel, haloLayers, iErr)!{{{ + + type (domain_type), intent(inout) :: domain + character (len=*), intent(in) :: groupName + character (len=*), intent(in) :: fieldName + integer, optional, intent(in) :: timeLevel + integer, dimension(:), optional, intent(in) :: haloLayers + integer, optional, intent(out) :: iErr + + type (mpas_exchange_field_list), pointer :: exchFieldListPtr + type (mpas_exchange_group), pointer :: exchGroupPtr + integer :: nLen, fieldNLen, timeLevelLocal + type (mpas_pool_field_info_type) :: fieldInfo + integer :: iHalo + integer :: threadNum + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + nLen = len_trim(groupName) + fieldNLen = len_trim(fieldName) + + DMPAR_DEBUG_WRITE(' -- Adding field ' // trim(fieldName) // ' to exchange group ' // trim(groupName) ) + + ! Check that field exists + fieldInfo % fieldType = -1 + fieldInfo % nDims = -1 + fieldInfo % nTimeLevels = -1 + fieldInfo % isActive = .false. + call mpas_pool_get_field_info(domain % blocklist % allFields, fieldName, fieldInfo) + + if ( fieldInfo % fieldType == -1 .and. fieldInfo % nDims == -1 .and. fieldInfo % nTimeLevels == -1 ) then + write(stderrUnit, *) 'ERROR: Field ''' // trim(fieldName) // ''' does not exist.' + write(stderrUnit, *) ' Cannot add field.' + if ( present(iErr) ) then + iErr = MPAS_DMPAR_MISSING_FIELD + end if + return + end if + + ! Validate dimensions + if ( fieldInfo % nDims == 0 ) then + write(stderrUnit, *) 'WARNING: Field ''' // trim(fieldName) // & + ''' has zero dimensions and cannot be added to exchange group ''' // & + trim(groupName) // '''.' + return + end if + + ! Validate field type + if ( fieldInfo % fieldType /= MPAS_POOL_REAL .and. fieldInfo % fieldType /= MPAS_POOL_INTEGER ) then + write(stderrUnit, *) 'WARNING: Field ''' // trim(fieldName) // & + ''' is an incorrect type for a halo exchange, and cannot be added to exchange group ''' // & + trim(groupName) // '''.' + return + end if + + ! Setup and validate time level + if ( present(timeLevel) ) then + timeLevelLocal = timeLevel + else + timeLevelLocal = -1 + end if + + if ( timeLevelLocal > fieldInfo % nTimeLevels ) then + write(stderrUnit, *) 'ERROR: Field ''' // trim(fieldName) // & + ''' has fewer time levels than the level requested. Cannot add to exchange group ''' // & + trim(groupName) // '''.' + if ( present(iErr) ) then + iErr = MPAS_DMPAR_FIELD_TIMELEVEL_ERR + end if + return + else if ( timeLevelLocal < -1 ) then + write(stderrUnit, '(a62, i2, a)') 'ERROR: timeLevel argument can only have a value between -1 and ', & + fieldInfo % nTimeLevels, ' when adding field ''' // trim(fieldName) & + // ''' to exchange group ''' // trim(groupName) // '''.' + if ( present(iErr) ) then + iErr = MPAS_DMPAR_FIELD_TIMELEVEL_ERR + end if + return + end if + + if ( present(haloLayers) ) then + do iHalo = 1, size(haloLayers) + if ( haloLayers(iHalo) > fieldInfo % nHaloLayers ) then + write(stderrUnit, '(a60, i2, a)') 'ERROR: haloLayers argument contains an invalid halo index of ', & + haloLayers(iHalo), ' when adding field ''' // trim(fieldName) // & + ''' to exchange group ''' // trim(groupName) // '''.' + if ( present(iErr) ) then + iErr = MPAS_DMPAR_FIELD_HALO_ERR + end if + return + end if + end do + end if + + exchGroupPtr => domain % exchangeGroups + do while (associated(exchGroupPtr)) + if ( nLen == exchGroupPtr % nLen) then + if ( groupName(1:nLen) == exchGroupPtr % groupName(1:exchGroupPtr % nLen) ) then + call mpas_pool_add_config(exchGroupPtr % fieldPool, fieldName, timeLevelLocal) + + ! Add field to exchange group's fieldList + if ( associated(exchGroupPtr % fieldList) ) then + ! If the field is already added, find it. + exchFieldListPtr => exchGroupPtr % fieldList + do while ( associated(exchFieldListPtr) ) + if ( fieldNLen == exchFieldListPtr % nLen ) then + if ( fieldName(1:fieldNLen) == exchFieldListPtr % fieldName(1:exchFieldListPtr % nLen) ) then + ! Make sure this field is marked for the correct + ! halo exchanges, as defined by the input + ! arguments + ! TODO + if ( timeLevelLocal == -1 ) then + exchFieldListPtr % timeLevels(:) = .true. + else + exchFieldListPtr % timeLevels(timeLevel) = .true. + end if + + if ( present(haloLayers) ) then + do iHalo = 1, size(haloLayers) + exchFieldListPtr % haloLayers( haloLayers(iHalo) ) = .true. + end do + else + exchFieldListPtr % haloLayers(:) = .true. + end if + + ! Return, as the field has already been added + return + end if + end if + + exchFieldListPtr => exchFieldListPtr % next + end do + end if + + ! Add the field to the beginning of the list, since it + ! isn't in the list. + allocate(exchFieldListPtr) + exchFieldListPtr % nLen = fieldNLen + exchFieldListPtr % fieldName = trim(fieldName) + exchFieldListPtr % nDims = fieldInfo % nDims + exchFieldListPtr % fieldType = fieldInfo % fieldType + allocate(exchFieldListPtr % haloLayers(fieldInfo % nHaloLayers)) + allocate(exchFieldListPtr % timeLevels(fieldInfo % nTimeLevels)) + exchFieldListPtr % haloLayers(:) = .false. + exchFieldListPtr % timeLevels(:) = .false. + + if ( timeLevelLocal == -1 ) then + exchFieldListPtr % timeLevels(:) = .true. + else + exchFieldListPtr % timeLevels(timeLevelLocal) = .true. + end if + + if ( present(haloLayers) ) then + do iHalo = 1, size(haloLayers) + exchFieldListPtr % haloLayers( haloLayers(iHalo) ) = .true. + end do + else + exchFieldListPtr % haloLayers(:) = .true. + end if + + ! The next pointer is null by default, so only update it if + ! there is a fieldList already. + if ( associated(exchGroupPtr % fieldList) ) then + exchFieldListPtr % next => exchGroupPtr % fieldList + end if + exchGroupPtr % fieldList => exchFieldListPtr + + return + end if + end if + exchGroupPtr => exchGroupPtr % next + end do + + write(stderrUnit, *) 'ERROR: No exchange group found named ''' // trim(groupName) // '''.' + write(stderrUnit, *) ' Cannot add field ''' // trim(fieldName) // ''' to group.' + if ( present(iErr) ) then + iErr = MPAS_DMPAR_MISSING_GROUP + end if + end if + + end subroutine mpas_dmpar_exch_group_add_field!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_remove_field +! +!> \brief MPAS dmpar exchange group remove field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This routine removes a field named 'fieldName' from an exchange group named +!> 'groupName' within a domain's list of exchange groups. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_remove_field(domain, groupName, fieldName, iErr)!{{{ + + type (domain_type), intent(inout) :: domain + character (len=*), intent(in) :: groupName + character (len=*), intent(in) :: fieldName + integer, optional, intent(out) :: iErr + + type (mpas_exchange_field_list), pointer :: exchFieldListPtr, prevFieldListPtr + type (mpas_exchange_group), pointer :: exchGroupPtr + integer :: nLen, fieldNLen + integer :: threadNum + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + nLen = len_trim(groupName) + fieldNLen = len_trim(fieldName) + DMPAR_DEBUG_WRITE(' -- Removing field ' // trim(fieldName) // ' from exchange group ' // trim(groupName)) + + exchGroupPtr => domain % exchangeGroups + do while (associated(exchGroupPtr)) + if ( nLen == exchGroupPtr % nLen) then + if ( groupName(1:nLen) == exchGroupPtr % groupName(1:exchGroupPtr % nLen) ) then + ! Remove after the transition to exchange_field_lists is complete + call mpas_pool_remove_config(exchGroupPtr % fieldPool, fieldName) + + if ( associated(exchGroupPtr % fieldList) ) then + exchFieldListPtr => exchGroupPtr % fieldList + nullify(prevFieldListPtr) + do while ( associated(exchFieldListPtr) ) + if ( fieldNLen == exchFieldListPtr % nLen ) then + if ( fieldName(1:fieldNLen) == exchFieldListPtr % fieldName(1:exchFieldListPtr % nLen) ) then + ! Remove exchFieldListPtr, as it's the field we're looking for. + if ( associated(prevFieldListPtr) ) then + if ( associated(exchFieldListPtr % next) ) then + prevFieldListPtr % next => exchFieldListPtr % next + else + nullify(prevFieldListPtr % next) + end if + else + if ( associated(exchFieldListPtr % next) ) then + exchGroupPtr % fieldList => exchFieldListPtr % next + else + nullify(exchGroupPtr % fieldList) + end if + end if + + if ( associated(exchFieldListPtr % haloLayers) ) then + deallocate(exchFieldListPtr % haloLayers) + end if + if ( associated(exchFieldListPtr % timeLevels) ) then + deallocate(exchFieldListPtr % timeLevels) + end if + nullify(exchFieldListPtr % next) + deallocate(exchFieldListPtr) + + ! Return, as we've successfully removed the field + ! from the field list. + return + end if + end if + + prevFieldListPtr => exchFieldListPtr + exchFieldListPtr => exchFieldListPtr % next + end do + end if + return + end if + end if + exchGroupPtr => exchGroupPtr % next + end do + + write(stderrUnit, *) 'ERROR: No exchange group found named ''' // trim(groupName) // '''.' + write(stderrUnit, *) ' Cannot add field ''' // trim(fieldName) // ''' to group.' + if ( present(iErr) ) then + iErr = MPAS_DMPAR_MISSING_GROUP + end if + end if + + end subroutine mpas_dmpar_exch_group_remove_field!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_begin_halo_exch +! +!> \brief MPAS dmpar exchange group begin halo exchange routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This routine starts a halo exchange for an exchange group. This includes +!> creating the buffers, packing the buffers, and starting the ISend / IRecv +!> commands. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_begin_halo_exch(domain, groupName, iErr)!{{{ + + type (domain_type), intent(inout) :: domain + character (len=*), intent(in) :: groupName + integer, optional, intent(out) :: iErr + + type (mpas_exchange_group), pointer :: exchGroupPtr + integer :: nLen + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + DMPAR_DEBUG_WRITE(' -- Trying to start halo exchange for group ' // trim(groupName)) + + nLen = len_trim(groupName) + + exchGroupPtr => domain % exchangeGroups + do while (associated(exchGroupPtr)) + if ( nLen == exchGroupPtr % nLen) then + if ( groupName(1:nLen) == exchGroupPtr % groupName(1:exchGroupPtr % nLen) ) then + DMPAR_DEBUG_WRITE(' -- Starting halo exchange for group ' // trim(groupName)) + call mpas_dmpar_exch_group_build_buffers(domain % blocklist % allFields, exchGroupPtr) + call mpas_dmpar_exch_group_start_recv(domain % dminfo, exchGroupPtr) + call mpas_dmpar_exch_group_pack_buffers(domain % blocklist % allFields, exchGroupPtr) + call mpas_dmpar_exch_group_start_send(domain % dminfo, exchGroupPtr) + return + end if + end if + exchGroupPtr => exchGroupPtr % next + end do + + write(stderrUnit, *) 'ERROR: No exchange group found named ''' // trim(groupName) // '''.' + write(stderrUnit, *) ' Cannot perform halo exchange on group.' + if ( present(iErr) ) then + iErr = MPAS_DMPAR_MISSING_GROUP + end if + + end subroutine mpas_dmpar_exch_group_begin_halo_exch!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_local_halo_exch +! +!> \brief MPAS dmpar exchange group local halo exchange routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This routine performs the local exchange portion of a halo exchange using +!> an exchange group. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_local_halo_exch(domain, groupName, iErr)!{{{ + + type (domain_type), intent(inout) :: domain + character (len=*), intent(in) :: groupName + integer, optional, intent(out) :: iErr + + type (mpas_exchange_group), pointer :: exchGroupPtr + integer :: nLen + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + nLen = len_trim(groupName) + DMPAR_DEBUG_WRITE(' -- Trying to perform local copies for exchange group ' // trim(groupName)) + + exchGroupPtr => domain % exchangeGroups + do while (associated(exchGroupPtr)) + if ( nLen == exchGroupPtr % nLen) then + if ( groupName(1:nLen) == exchGroupPtr % groupName(1:exchGroupPtr % nLen) ) then + DMPAR_DEBUG_WRITE(' -- Performing local copies for exchange group ' // trim(groupName)) + call mpas_dmpar_exch_group_local_exch_fields(domain % blocklist % allFields, exchGroupPtr) + return + end if + end if + exchGroupPtr => exchGroupPtr % next + end do + + write(stderrUnit, *) 'ERROR: No exchange group found named ''' // trim(groupName) // '''.' + write(stderrUnit, *) ' Cannot perform halo exchange on group.' + if ( present(iErr) ) then + iErr = MPAS_DMPAR_MISSING_GROUP + end if + + end subroutine mpas_dmpar_exch_group_local_halo_exch!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_end_halo_exch +! +!> \brief MPAS dmpar exchange group end halo exchange routine +!> \author Doug Jacobsen +!> \date 01/06/2016 +!> \details +!> This routine ends a halo exchange using an exchange group. This includes +!> waiting for IRecv commands to complete for receive buffers, unpacking buffers, +!> and deallocating buffers. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_end_halo_exch(domain, groupName, iErr)!{{{ + + type (domain_type), intent(inout) :: domain + character (len=*), intent(in) :: groupName + integer, optional, intent(out) :: iErr + + type (mpas_exchange_group), pointer :: exchGroupPtr + integer :: nLen + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + nLen = len_trim(groupName) + DMPAR_DEBUG_WRITE(' -- Trying to finish halo exchange for exchange group ' // trim(groupName)) + + exchGroupPtr => domain % exchangeGroups + do while (associated(exchGroupPtr)) + if ( nLen == exchGroupPtr % nLen) then + if ( groupName(1:nLen) == exchGroupPtr % groupName(1:exchGroupPtr % nLen) ) then + DMPAR_DEBUG_WRITE(' -- Finishing halo exchange for exchange group ' // trim(groupName)) + call mpas_dmpar_exch_group_unpack_buffers(domain % blocklist % allFields, exchGroupPtr) + return + end if + end if + exchGroupPtr => exchGroupPtr % next + end do + + write(stderrUnit, *) 'ERROR: No exchange group found named ''' // trim(groupName) // '''.' + write(stderrUnit, *) ' Cannot perform halo exchange on group.' + if ( present(iErr) ) then + iErr = MPAS_DMPAR_MISSING_GROUP + end if + + end subroutine mpas_dmpar_exch_group_end_halo_exch!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_full_halo_exch +! +!> \brief MPAS dmpar exchange group full halo exchange routine +!> \author Doug Jacobsen +!> \date 01/11/2016 +!> \details +!> This routine performs a full halo exchange on an exchange group. +!> It is blocking, in that the routine doesn't return until the full +!> exchange is complete. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_full_halo_exch(domain, groupName, iErr)!{{{ + + type (domain_type), intent(inout) :: domain + character (len=*), intent(in) :: groupName + integer, optional, intent(out) :: iErr + + type (mpas_exchange_group), pointer :: exchGroupPtr + integer :: nLen + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + nLen = len_trim(groupName) + DMPAR_DEBUG_WRITE(' -- Trying to perform a full exchange for group ' // trim(groupName)) + + exchGroupPtr => domain % exchangeGroups + do while (associated(exchGroupPtr)) + if ( nLen == exchGroupPtr % nLen) then + if ( groupName(1:nLen) == exchGroupPtr % groupName(1:exchGroupPtr % nLen) ) then + DMPAR_DEBUG_WRITE(' -- Performing a full exchange for group ' // trim(groupName)) + ! Setup exchange (should be the same as begin_halo_exch) + call mpas_dmpar_exch_group_build_buffers(domain % blocklist % allFields, exchGroupPtr) + call mpas_dmpar_exch_group_start_recv(domain % dminfo, exchGroupPtr) + call mpas_dmpar_exch_group_pack_buffers(domain % blocklist % allFields, exchGroupPtr) + call mpas_dmpar_exch_group_start_send(domain % dminfo, exchGroupPtr) + + ! Perform local copies (should be teh same as local_halo_exch) + call mpas_dmpar_exch_group_local_exch_fields(domain % blocklist % allFields, exchGroupPtr) + + ! Finish the halo exchange (should be the same as end_halo_exch) + call mpas_dmpar_exch_group_unpack_buffers(domain % blocklist % allFields, exchGroupPtr) + + ! Print out buffers for debugging + !call mpas_dmpar_exch_group_print_buffers(exchGroupPtr) + + ! Destroy the buffers + call mpas_dmpar_exch_group_destroy_buffers(exchGroupPtr) + return + end if + end if + exchGroupPtr => exchGroupPtr % next + end do + + write(stderrUnit, *) 'ERROR: No exchange group found named ''' // trim(groupName) // '''.' + write(stderrUnit, *) ' Cannot perform halo exchange on group.' + if ( present(iErr) ) then + iErr = MPAS_DMPAR_MISSING_GROUP + end if + + end subroutine mpas_dmpar_exch_group_full_halo_exch!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_field_halo_exch +! +!> \brief MPAS dmpar full halo exchange routine +!> \author Doug Jacobsen +!> \date 01/11/2016 +!> \details +!> This routine performs a full halo exchange on a specific field. +!> It is blocking, in that the routine doesn't return until the full +!> exchange is complete. +!> It creates a temporary exchange group, adds this field to it, exchanges it, +!> and destroys the group. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_field_halo_exch(domain, fieldName, timeLevel, haloLayers, iErr)!{{{ + + type (domain_type), intent(inout) :: domain + character (len=*), intent(in) :: fieldName + integer, optional, intent(in) :: timeLevel + integer, dimension(:), optional, intent(in) :: haloLayers + integer, optional, intent(out) :: iErr + + character (len=StrKIND) :: groupName + type (mpas_exchange_group), pointer :: exchGroupPtr + integer :: nLen + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + DMPAR_DEBUG_WRITE(' -- Performing a single field halo exchange for ' // trim(fieldName)) + + groupName = 'TEMPSingleFieldGroup' + call mpas_dmpar_exch_group_create(domain, groupName) + + call mpas_dmpar_exch_group_add_field(domain, groupName, fieldName, timeLevel=timeLevel, haloLayers=haloLayers) + + call mpas_threading_barrier() + + call mpas_dmpar_exch_group_full_halo_exch(domain, groupName) + + call mpas_threading_barrier() + + call mpas_dmpar_exch_group_destroy(domain, groupName) + + end subroutine mpas_dmpar_field_halo_exch!}}} + + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! +! Private routines for performing the beginning of halo exchanges +! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_build_buffers +! +!> \brief MPAS dmpar exchange group buffer construction routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine creates the buffers and communication lists for an exchange group. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_build_buffers(allFieldsPool, exchangeGroup, iErr)!{{{ + type (mpas_pool_type), intent(in) :: allFieldsPool + type (mpas_exchange_group), intent(inout) :: exchangeGroup + integer, optional, intent(out) :: iErr + + type (field1DInteger), pointer :: intField1D + type (field2DInteger), pointer :: intField2D + type (field3DInteger), pointer :: intField3D + type (field1DReal), pointer :: realField1D + type (field2DReal), pointer :: realField2D + type (field3DReal), pointer :: realField3D + type (field4DReal), pointer :: realField4D + type (field5DReal), pointer :: realField5D + + type (mpas_exchange_field_list), pointer :: exchFieldListPtr + type (mpas_communication_list), pointer :: commListPtr + + integer :: iHalo, iTimeLevel + integer :: threadNum + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + threadNum = mpas_threading_get_thread_num() + + ! Only master thread can create buffers. + if ( threadNum == 0 ) then + ! Allocate communication lists, and setup dead header nodes + allocate(exchangeGroup % sendList) + nullify(exchangeGroup % sendList % next) + exchangeGroup % sendList % procID = -1 + exchangeGroup % sendList % nList = 0 + + allocate(exchangeGroup % recvList) + nullify(exchangeGroup % recvList % next) + exchangeGroup % recvList % procID = -1 + exchangeGroup % recvList % nList = 0 + + exchFieldListPtr => exchangeGroup % fieldList + do while ( associated(exchFieldListPtr) ) + DMPAR_DEBUG_WRITE(' -- Building buffers for field ' // trim(exchFieldListPtr % fieldName)) + do iTimeLevel = 1, size(exchFieldListPtr % timeLevels) + if ( exchFieldListPtr % timeLevels(iTimeLevel) ) then + if ( exchFieldListPtr % fieldType == MPAS_POOL_REAL ) then + if ( exchFieldListPtr % nDims == 1 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField1D, iTimeLevel) + + if ( realField1D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_aggregate_exch_list(realField1D % sendList, exchangeGroup % sendList, & + realField1D % dimSizes, iHalo) + call mpas_dmpar_exch_group_aggregate_exch_list(realField1D % recvList, exchangeGroup % recvList, & + realField1D % dimSizes, iHalo, .true.) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 2 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField2D, iTimeLevel) + + if ( realField2D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_aggregate_exch_list(realField2D % sendList, exchangeGroup % sendList, & + realField2D % dimSizes, iHalo) + call mpas_dmpar_exch_group_aggregate_exch_list(realField2D % recvList, exchangeGroup % recvList, & + realField2D % dimSizes, iHalo, .true.) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 3 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField3D, iTimeLevel) + + if ( realField3D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_aggregate_exch_list(realField3D % sendList, exchangeGroup % sendList, & + realField3D % dimSizes, iHalo) + call mpas_dmpar_exch_group_aggregate_exch_list(realField3D % recvList, exchangeGroup % recvList, & + realField3D % dimSizes, iHalo, .true.) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 4 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField4D, iTimeLevel) + + if ( realField4D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_aggregate_exch_list(realField4D % sendList, exchangeGroup % sendList, & + realField4D % dimSizes, iHalo) + call mpas_dmpar_exch_group_aggregate_exch_list(realField4D % recvList, exchangeGroup % recvList, & + realField4D % dimSizes, iHalo, .true.) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 5 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField5D, iTimeLevel) + + if ( realField5D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_aggregate_exch_list(realField5D % sendList, exchangeGroup % sendList, & + realField5D % dimSizes, iHalo) + call mpas_dmpar_exch_group_aggregate_exch_list(realField5D % recvList, exchangeGroup % recvList, & + realField5D % dimSizes, iHalo, .true.) + end if + end do + end if + end if + else if ( exchFieldListPtr % fieldType == MPAS_POOL_INTEGER ) then + if ( exchFieldListPtr % nDims == 1 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, intField1D, iTimeLevel) + + if ( intField1D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_aggregate_exch_list(intField1D % sendList, exchangeGroup % sendList, & + intField1D % dimSizes, iHalo) + call mpas_dmpar_exch_group_aggregate_exch_list(intField1D % recvList, exchangeGroup % recvList, & + intField1D % dimSizes, iHalo, .true.) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 2 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, intField2D, iTimeLevel) + + if ( intField2D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_aggregate_exch_list(intField2D % sendList, exchangeGroup % sendList, & + intField2D % dimSizes, iHalo) + call mpas_dmpar_exch_group_aggregate_exch_list(intField2D % recvList, exchangeGroup % recvList, & + intField2D % dimSizes, iHalo, .true.) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 3 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, intField3D, iTimeLevel) + + if ( intField3D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_aggregate_exch_list(intField3D % sendList, exchangeGroup % sendList, & + intField3D % dimSizes, iHalo) + call mpas_dmpar_exch_group_aggregate_exch_list(intField3D % recvList, exchangeGroup % recvList, & + intField3D % dimSizes, iHalo, .true.) + end if + end do + end if + end if + end if + end if + end do + + exchFieldListPtr => exchFieldListPtr % next + end do + + ! Remove dead header nodes on communication lists + commListPtr => exchangeGroup % sendList + if ( associated(exchangeGroup % sendList % next) ) then + exchangeGroup % sendList => exchangeGroup % sendList % next + else + nullify(exchangeGroup % sendList) + end if + deallocate(commListPtr) + + commListPtr => exchangeGroup % recvList + if ( associated(exchangeGroup % recvList % next) ) then + exchangeGroup % recvList => exchangeGroup % recvList % next + else + nullify(exchangeGroup % recvList) + end if + deallocate(commListPtr) + + + ! Allocate buffers for each processor's communication list + commListPtr => exchangeGroup % sendList + do while ( associated(commListPtr) ) + if ( associated(commListPtr % rbuffer) ) then + write(stderrUnit, *) 'ERROR: Communication buffer already exists. A halo exchange might be ' // & + 'in progress for group ''' // trim(exchangeGroup % groupName) // '''.' + if ( present(iErr) ) then + iErr = MPAS_DMPAR_BUFFER_EXISTS + end if + return + end if + allocate(commListPtr % rbuffer(commListPtr % nList)) + commListPtr % bufferOffset = 0 + + commListPtr => commListPtr % next + end do + + commListPtr => exchangeGroup % recvList + do while ( associated(commListPtr) ) + if ( associated(commListPtr % rbuffer) ) then + write(stderrUnit, *) 'ERROR: Communication buffer already exists. A halo exchange might be ' // & + 'in progress for group ''' // trim(exchangeGroup % groupName) // '''.' + if ( present(iErr) ) then + iErr = MPAS_DMPAR_BUFFER_EXISTS + end if + return + end if + allocate(commListPtr % rbuffer(commListPtr % nList)) + commListPtr % bufferOffset = 0 + + commListPtr => commListPtr % next + end do + end if + + call mpas_threading_barrier() + + end subroutine mpas_dmpar_exch_group_build_buffers!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_aggregate_exch_list +! +!> \brief MPAS dmpar exchange group exchange list aggregation routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine aggregates a multihalo exchange list into a communication list. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_aggregate_exch_list(multiHaloExchList, commList, dimSizes, haloLayer, recvList, iErr)!{{{ + type (mpas_multihalo_exchange_list), pointer :: multiHaloExchList + type (mpas_communication_list), pointer :: commList + integer, dimension(:), intent(in) :: dimSizes + integer, intent(in) :: haloLayer + logical, optional, intent(in) :: recvList + integer, optional, intent(out) :: iErr + + type (mpas_multihalo_exchange_list), pointer :: multiHaloExchListPtr + type (mpas_exchange_list), pointer :: exchListPtr + type (mpas_communication_list), pointer :: commListPtr, commListPtr2 + + logical :: comm_list_found = .false. + logical :: recvListLocal + integer :: bufferOffset, nAdded + integer :: dimProd + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + if ( present(recvList) ) then + recvListLocal = recvList + else + recvListLocal = .false. + end if + + call mpas_dmpar_build_dim_size_product(dimSizes, dimProd) + + multiHaloExchListPtr => multiHaloExchList + do while ( associated(multiHaloExchListPtr) ) + ! Aggregate this fields send list for this halo into the buffer size + exchListPtr => multiHaloExchListPtr % halos(haloLayer) % exchList + do while (associated(exchListPtr)) + comm_list_found = .false. + + commListPtr => commList + do while(associated(commListPtr)) + if ( commListPtr % procID == exchListPtr % endPointID ) then + comm_list_found = .true. + if ( .not. recvListLocal ) then + commListPtr % nList = commListPtr % nList + exchListPtr % nList * dimProd + end if + exit + end if + commListPtr => commListPtr % next + end do + + ! Set up a new communication list for this exchange list + ! because it was not found. Put it at the head of the list to make insertion quick. + if ( .not. comm_list_found ) then + commListPtr => commList + + allocate(commListPtr2) + if ( associated(commListPtr % next) ) then + commListPtr2 % next => commListPtr % next + else + nullify(commListPtr2 % next) + end if + commListPtr2 % procID = exchListPtr % endPointID + if ( .not. recvListLocal ) then + commListPtr2 % nList = exchListPtr % nList * dimProd + else + commListPtr2 % nList = 0 + end if + nullify(commListPtr2 % rbuffer) + nullify(commListPtr2 % ibuffer) + + commListPtr % next => commListPtr2 + end if + + exchListPtr => exchListPtr % next + end do + multiHaloExchListPtr => multiHaloExchListPtr % next + end do + + ! If this is a recive list, nlist needs to be build differently. + ! Determine size of receive lists + if ( recvListLocal ) then + commListPtr => commList + do while(associated(commListPtr)) + bufferOffset = 0 + nAdded = 0 + + multiHaloExchListPtr => multiHaloExchList + do while(associated(multiHaloExchListPtr)) + exchListPtr => multiHaloExchListPtr % halos(haloLayer) % exchList + do while(associated(exchListPtr)) + if(exchListPtr % endPointID == commListPtr % procID) then + nAdded = max(nAdded, maxval(exchListPtr % srcList) * dimProd) + end if + exchListPtr => exchListPtr % next + end do + + multiHaloExchListPtr => multiHaloExchListPtr % next + end do + bufferOffset = bufferOffset + nAdded + commListPtr % nList = commListPtr % nList + nAdded + commListPtr % bufferOffset = 0 + + commListPtr => commListPtr % next + end do ! commListPtr + end if + + + end subroutine mpas_dmpar_exch_group_aggregate_exch_list!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_build_dim_size_product +! +!> \brief MPAS dmpar build dimension size product routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine determines the dimension size product for a field, +!> which is used when building the size of a halo exchange buffer. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_build_dim_size_product(dimSizes, dimSizeProd)!{{{ + integer, dimension(:), intent(in) :: dimSizes + integer, intent(out) :: dimSizeProd + + integer :: iDimen + + dimSizeProd = 1 + + do iDimen = 1, size(dimSizes) - 1 + dimSizeProd = dimSizeProd * dimSizes(iDimen) + end do + + end subroutine mpas_dmpar_build_dim_size_product!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_start_recv +! +!> \brief MPAS dmpar exchange group start irecv routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine starts the irecv commands for each communication in an exchange group. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_start_recv(dminfo, exchangeGroup)!{{{ + type (dm_info), intent(in) :: dminfo + type (mpas_exchange_group), intent(inout) :: exchangeGroup + + type (mpas_communication_list), pointer :: commListPtr + + integer :: threadNum + integer :: mpi_ierr + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + commListPtr => exchangeGroup % recvList + do while ( associated(commListPtr) ) + DMPAR_DEBUG_WRITE(' -- Starting recv: ' COMMA commListPtr % procID COMMA commListPtr % nList COMMA size(commListPtr % rbuffer) ) + call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, & + dminfo % comm, commListPtr % reqID, mpi_ierr) + + commListPtr => commListPtr % next + end do + end if + + call mpas_threading_barrier() + + end subroutine mpas_dmpar_exch_group_start_recv!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_start_send +! +!> \brief MPAS dmpar exchange group start isend routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine starts the isend commands for each communication in an exchange group. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_start_send(dminfo, exchangeGroup)!{{{ + type (dm_info), intent(in) :: dminfo + type (mpas_exchange_group), intent(inout) :: exchangeGroup + + type (mpas_communication_list), pointer :: commListPtr + + integer :: threadNum + integer :: mpi_ierr + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + commListPtr => exchangeGroup % sendList + do while ( associated(commListPtr) ) + DMPAR_DEBUG_WRITE(' -- Starting send: ' COMMA commListPtr % procID COMMA commListPtr % nList COMMA size(commListPtr % rbuffer) ) + call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, & + dminfo % comm, commListPtr % reqID, mpi_ierr) + + commListPtr => commListPtr % next + end do + end if + + call mpas_threading_barrier() + + end subroutine mpas_dmpar_exch_group_start_send!}}} + + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! +! Private routines for group operations on a list of fields +! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_pack_buffers +! +!> \brief MPAS dmpar exchange group buffer pack routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine packs all fields into their send buffers for an exchange group +!> It packs a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_pack_buffers(allFieldsPool, exchangeGroup, iErr)!{{{ + type (mpas_pool_type), intent(in) :: allFieldsPool + type (mpas_exchange_group), intent(inout) :: exchangeGroup + integer, optional, intent(out) :: iErr + + type (mpas_exchange_field_list), pointer :: exchFieldListPtr + + type (field1DInteger), pointer :: intField1D + type (field2DInteger), pointer :: intField2D + type (field3DInteger), pointer :: intField3D + type (field1DReal), pointer :: realField1D + type (field2DReal), pointer :: realField2D + type (field3DReal), pointer :: realField3D + type (field4DReal), pointer :: realField4D + type (field5DReal), pointer :: realField5D + + integer :: iTimeLevel, iHalo + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + ! Pack each feild entirely before moving on to the next field + exchFieldListPtr => exchangeGroup % fieldList + do while ( associated(exchFieldListPtr) ) + DMPAR_DEBUG_WRITE(' -- Packing buffers for field ' // trim(exchFieldListPtr % fieldName)) + do iTimeLevel = 1, size(exchFieldListPtr % timeLevels) + if ( exchFieldListPtr % timeLevels(iTimeLevel) ) then + + if ( exchFieldListPtr % fieldType == MPAS_POOL_REAL ) then + if ( exchFieldListPtr % nDims == 1 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField1D, iTimeLevel) + if ( realField1D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_pack_buffer_field(exchangeGroup, realField1D, iHalo) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 2 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField2D, iTimeLevel) + if ( realField2D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_pack_buffer_field(exchangeGroup, realField2D, iHalo) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 3 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField3D, iTimeLevel) + if ( realField3D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_pack_buffer_field(exchangeGroup, realField3D, iHalo) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 4 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField4D, iTimeLevel) + if ( realField4D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_pack_buffer_field(exchangeGroup, realField4D, iHalo) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 5 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField5D, iTimeLevel) + if ( realField5D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_pack_buffer_field(exchangeGroup, realField5D, iHalo) + end if + end do + end if + end if + else if ( exchFieldListPtr % fieldType == MPAS_POOL_INTEGER ) then + if ( exchFieldListPtr % nDims == 1 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, intField1D, iTimeLevel) + if ( intField1D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_pack_buffer_field(exchangeGroup, intField1D, iHalo) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 2 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, intField2D, iTimeLevel) + if ( intField2D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_pack_buffer_field(exchangeGroup, intField2D, iHalo) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 3 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, intField3D, iTimeLevel) + if ( intField3D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_pack_buffer_field(exchangeGroup, intField3D, iHalo) + end if + end do + end if + end if + end if + end if + end do + + exchFieldListPtr => exchFieldListPtr % next + end do + + end subroutine mpas_dmpar_exch_group_pack_buffers!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_local_exch_fields +! +!> \brief MPAS dmpar exchange group local exchange routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine performs the actual local exchanges for each field in +!> an exchange group. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_local_exch_fields(allFieldsPool, exchangeGroup, iErr)!{{{ + type (mpas_pool_type), intent(in) :: allFieldsPool + type (mpas_exchange_group), intent(inout) :: exchangeGroup + integer, optional, intent(out) :: iErr + + type (field1DInteger), pointer :: intField1D + type (field2DInteger), pointer :: intField2D + type (field3DInteger), pointer :: intField3D + type (field1DReal), pointer :: realField1D + type (field2DReal), pointer :: realField2D + type (field3DReal), pointer :: realField3D + type (field4DReal), pointer :: realField4D + type (field5DReal), pointer :: realField5D + + type (mpas_exchange_field_list), pointer :: exchFieldListPtr + + integer :: iTimeLevel, iHalo + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + exchFieldListPtr => exchangeGroup % fieldList + do while ( associated(exchFieldListPtr) ) + DMPAR_DEBUG_WRITE(' -- Local copy for field ' // trim(exchFieldListPtr % fieldName)) + do iTimeLevel = 1, size(exchFieldListPtr % timeLevels) + if ( exchFieldListPtr % timeLevels(iTimeLevel) ) then + if ( exchFieldListPtr % fieldType == MPAS_POOL_REAL ) then + if ( exchFieldListPtr % nDims == 1 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField1D, iTimeLevel) + if ( realField1D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_local_exch_field(realField1D, iHalo) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 2 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField2D, iTimeLevel) + if ( realField2D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_local_exch_field(realField2D, iHalo) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 3 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField3D, iTimeLevel) + if ( realField3D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_local_exch_field(realField3D, iHalo) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 4 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField4D, iTimeLevel) + if ( realField4D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_local_exch_field(realField4D, iHalo) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 5 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField5D, iTimeLevel) + if ( realField5D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_local_exch_field(realField5D, iHalo) + end if + end do + end if + end if + else if ( exchFieldListPtr % fieldType == MPAS_POOL_INTEGER ) then + if ( exchFieldListPtr % nDims == 1 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, intField1D, iTimeLevel) + if ( intField1D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_local_exch_field(intField1D, iHalo) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 2 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, intField2D, iTimeLevel) + if ( intField2D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_local_exch_field(intField2D, iHalo) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 3 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, intField3D, iTimeLevel) + if ( intField3D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_local_exch_field(intField3D, iHalo) + end if + end do + end if + end if + end if + end if + end do + + exchFieldListPtr => exchFieldListPtr % next + end do + + call mpas_threading_barrier() + + end subroutine mpas_dmpar_exch_group_local_exch_fields!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_unpack_buffers +! +!> \brief MPAS dmpar exchange group buffer unpack routine +!> \author Doug Jacobsen +!> \date 01/06/2016 +!> \details +!> This private routine unpacks buffers. Additionally, it includes MPI_Wait +!> commands to finish receiving messages before unpacking buffers. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_unpack_buffers(allFieldsPool, exchangeGroup, iErr)!{{{ + type (mpas_pool_type), intent(in) :: allFieldsPool + type (mpas_exchange_group), intent(inout) :: exchangeGroup + integer, optional, intent(out) :: iErr + + type (mpas_exchange_field_list), pointer :: exchFieldListPtr + type (mpas_communication_list), pointer :: commListPtr + + logical :: allReceived + integer :: mpi_ierr, iErr_tmp + integer :: threadNum + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + threadNum = mpas_threading_get_thread_num() + + !$omp master + commListPtr => exchangeGroup % recvList + do while ( associated(commListPtr) ) + DMPAR_DEBUG_WRITE(' -- Marking receive lists as not received, and not unpacked') + commListPtr % received = .false. + + commListPtr => commListPtr % next + end do + !$omp end master + call mpas_threading_barrier() + + allReceived = .false. + do while ( .not. allReceived ) + + allReceived = .true. + + DMPAR_DEBUG_WRITE(' -- Test receive messages, and unpack buffers as they receive') + ! Loop over receive lists to check if they have been received yet. + commListPtr => exchangeGroup % recvList + do while ( associated(commListPtr) ) + + call mpas_threading_barrier() + + ! Poll receive list messages, and unpack as they are received. + if ( .not. commListPtr % received ) then + call mpas_threading_barrier() + + allReceived = .false. + if ( threadNum == 0 ) then + call MPI_Test(commListPtr % reqID, commListPtr % received, MPI_STATUS_IGNORE, mpi_ierr) + end if + + call mpas_threading_barrier() + + if ( commListPtr % received ) then + call mpas_dmpar_exch_group_unpack_single_buffer(allFieldsPool, exchangeGroup, commListPtr, iErr_tmp) + if ( iErr_tmp /= MPAS_DMPAR_NOERR ) then + if ( present(iErr) ) then + iErr = iErr_tmp + end if + DMPAR_ERROR_WRITE('Halo exchange unpacking encountered an error.') + end if + end if + end if + + call mpas_threading_barrier() + commListPtr => commListPtr % next + end do + + end do + DMPAR_DEBUG_WRITE(' -- All buffers have been unpacked. Exiting buffer unpack routine.') + + end subroutine mpas_dmpar_exch_group_unpack_buffers!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_unpack_single_buffer +! +!> \brief MPAS dmpar exchange group single buffer unpack routine +!> \author Doug Jacobsen +!> \date 08/11/2016 +!> \details +!> This private routine unpacks a single receive buffer. It assumes the +!> message has been received already, and so does not include an MPI_Wait. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_unpack_single_buffer(allFieldsPool, exchangeGroup, recvList, iErr)!{{{ + type (mpas_pool_type), intent(in) :: allFieldsPool + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (mpas_communication_list), intent(inout) :: recvList + + integer, optional, intent(out) :: iErr + + type (field1DInteger), pointer :: intField1D + type (field2DInteger), pointer :: intField2D + type (field3DInteger), pointer :: intField3D + type (field1DReal), pointer :: realField1D + type (field2DReal), pointer :: realField2D + type (field3DReal), pointer :: realField3D + type (field4DReal), pointer :: realField4D + type (field5DReal), pointer :: realField5D + + type (mpas_exchange_field_list), pointer :: exchFieldListPtr + + integer :: mpi_ierr + integer :: threadNum + integer :: iTimeLevel, iHalo + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + threadNum = mpas_threading_get_thread_num() + + exchFieldListPtr => exchangeGroup % fieldList + do while ( associated(exchFieldListPtr) ) + DMPAR_DEBUG_WRITE(' -- Unpacking buffers for field ' // trim(exchFieldListPtr % fieldName)) + do iTimeLevel = 1, size(exchFieldListPtr % timeLevels) + if ( exchFieldListPtr % timeLevels(iTimeLevel) ) then + if ( exchFieldListPtr % fieldType == MPAS_POOL_REAL ) then + if ( exchFieldListPtr % nDims == 1 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField1D, iTimeLevel) + if ( realField1D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_unpack_buffer_field(exchangeGroup, realField1D, iHalo, recvList) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 2 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField2D, iTimeLevel) + if ( realField2D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_unpack_buffer_field(exchangeGroup, realField2D, iHalo, recvList) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 3 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField3D, iTimeLevel) + if ( realField3D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_unpack_buffer_field(exchangeGroup, realField3D, iHalo, recvList) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 4 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField4D, iTimeLevel) + if ( realField4D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_unpack_buffer_field(exchangeGroup, realField4D, iHalo, recvList) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 5 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, realField5D, iTimeLevel) + if ( realField5D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_unpack_buffer_field(exchangeGroup, realField5D, iHalo, recvList) + end if + end do + end if + end if + else if ( exchFieldListPtr % fieldType == MPAS_POOL_INTEGER ) then + if ( exchFieldListPtr % nDims == 1 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, intField1D, iTimeLevel) + if ( intField1D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_unpack_buffer_field(exchangeGroup, intField1D, iHalo, recvList) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 2 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, intField2D, iTimeLevel) + if ( intField2D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_unpack_buffer_field(exchangeGroup, intField2D, iHalo, recvList) + end if + end do + end if + else if ( exchFieldListPtr % nDims == 3 ) then + call mpas_pool_get_field(allFieldsPool, exchFieldListPtr % fieldName, intField3D, iTimeLevel) + if ( intField3D % isActive ) then + do iHalo = 1, size(exchFieldListPtr % haloLayers) + if ( exchFieldListPtr % haloLayers(iHalo) ) then + call mpas_dmpar_exch_group_unpack_buffer_field(exchangeGroup, intField3D, iHalo, recvList) + end if + end do + end if + end if + end if + end if + end do + + exchFieldListPtr => exchFieldListPtr % next + end do + + call mpas_threading_barrier() + + end subroutine mpas_dmpar_exch_group_unpack_single_buffer!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_destroy_buffers +! +!> \brief MPAS dmpar exchange group destroy buffers routine +!> \author Doug Jacobsen +!> \date 04/07/2016 +!> \details +!> This private routine destroys buffers. Additionally, it includes MPI_Wait +!> commands to finish receiving messages before destroying buffers. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_destroy_buffers(exchangeGroup, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + integer, optional, intent(out) :: iErr + + type (mpas_communication_list), pointer :: commListPtr + + integer :: mpi_ierr + integer :: threadNum + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + threadNum = mpas_threading_get_thread_num() + + call mpas_threading_barrier() + + if ( threadNum == 0 ) then + ! Wait for isends to finish + commListPtr => exchangeGroup % sendList + do while ( associated(commListPtr) ) + call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr) + + commListPtr => commListPtr % next + end do + + ! Destroy communication lists + call mpas_dmpar_destroy_communication_list(exchangeGroup % sendList) + call mpas_dmpar_destroy_communication_list(exchangeGroup % recvList) + end if + + call mpas_threading_barrier() + + end subroutine mpas_dmpar_exch_group_destroy_buffers!}}} + + + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! +! Private routines for packing send buffers +! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_pack_buffer_field1d_integer +! +!> \brief MPAS dmpar exchange pack 1D integer field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine packs a 1D integer field into the send buffers +!> It packs a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_pack_buffer_field1d_integer(exchangeGroup, field, haloLayer, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field1DInteger), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field1DInteger), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + type (mpas_communication_list), pointer :: commListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + commListPtr => exchangeGroup % sendList + do while ( associated(commListPtr) ) + bufferOffset = commListPtr % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == commListPtr % procID ) then + !$omp do schedule(runtime) private(iBuffer) + do iExch = 1, exchListPtr % nList + iBuffer = exchListPtr % destList(iExch) + bufferOffset + commListPtr % rbuffer(iBuffer) = transfer(fieldCursor % array(exchListPtr % srcList(iExch)), & + commListPtr % rbuffer(1)) + end do + !$omp end do + nAdded = nAdded + exchListPtr % nList + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + !$omp master + commListPtr % bufferOffset = commListPtr % bufferOffset + nAdded + !$omp end master + call mpas_threading_barrier() + commListPtr => commListPtr % next + end do + + end subroutine mpas_dmpar_exch_group_pack_buffer_field1d_integer!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_pack_buffer_field2d_integer +! +!> \brief MPAS dmpar exchange pack 2D integer field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine packs a 2D integer field into the send buffers +!> It packs a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_pack_buffer_field2d_integer(exchangeGroup, field, haloLayer, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field2DInteger), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field2DInteger), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + type (mpas_communication_list), pointer :: commListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded + integer :: j + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + commListPtr => exchangeGroup % sendList + do while ( associated(commListPtr) ) + bufferOffset = commListPtr % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == commListPtr % procID ) then + !$omp do schedule(runtime) private(j, iBuffer) + do iExch = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(1) + iBuffer = (exchListPtr % destList(iExch)-1) * fieldCursor % dimSizes(1) + j + bufferOffset + commListPtr % rbuffer(iBuffer) = transfer(fieldCursor % array(j, exchListPtr % srcList(iExch)), & + commListPtr % rbuffer(1)) + end do + end do + !$omp end do + nAdded = nAdded + exchListPtr % nList * fieldCursor % dimSizes(1) + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + !$omp master + commListPtr % bufferOffset = commListPtr % bufferOffset + nAdded + !$omp end master + call mpas_threading_barrier() + commListPtr => commListPtr % next + end do + + end subroutine mpas_dmpar_exch_group_pack_buffer_field2d_integer!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_pack_buffer_field3d_integer +! +!> \brief MPAS dmpar exchange pack 3D integer field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine packs a 3D integer field into the send buffers +!> It packs a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_pack_buffer_field3d_integer(exchangeGroup, field, haloLayer, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field3DInteger), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field3DInteger), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + type (mpas_communication_list), pointer :: commListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded + integer :: j, k + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + commListPtr => exchangeGroup % sendList + do while ( associated(commListPtr) ) + bufferOffset = commListPtr % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == commListPtr % procID ) then + !$omp do schedule(runtime) private(j, k, iBuffer) + do iExch = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(2) + do k = 1, fieldCursor % dimSizes(1) + iBuffer = (exchListPtr % destList(iExch) - 1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (j - 1) * fieldCursor % dimSizes(1) + k + bufferOffset + commListPtr % rbuffer(iBuffer) = transfer(fieldCursor % array(k, j, exchListPtr % srcList(iExch)), & + commListPtr % rbuffer(1)) + end do + end do + end do + !$omp end do + nAdded = nAdded + exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + !$omp master + commListPtr % bufferOffset = commListPtr % bufferOffset + nAdded + !$omp end master + call mpas_threading_barrier() + commListPtr => commListPtr % next + end do + + end subroutine mpas_dmpar_exch_group_pack_buffer_field3d_integer!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_pack_buffer_field1d_real +! +!> \brief MPAS dmpar exchange pack 1D real field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine packs a 1D real field into the send buffers +!> It packs a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_pack_buffer_field1d_real(exchangeGroup, field, haloLayer, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field1DReal), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field1DReal), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + type (mpas_communication_list), pointer :: commListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + commListPtr => exchangeGroup % sendList + do while ( associated(commListPtr) ) + bufferOffset = commListPtr % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == commListPtr % procID ) then + !$omp do schedule(runtime) private(iBuffer) + do iExch = 1, exchListPtr % nList + iBuffer = exchListPtr % destList(iExch) + bufferOffset + commListPtr % rbuffer(iBuffer) = fieldCursor % array(exchListPtr % srcList(iExch)) + end do + !$omp end do + nAdded = nAdded + exchListPtr % nList + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + !$omp master + commListPtr % bufferOffset = commListPtr % bufferOffset + nAdded + !$omp end master + call mpas_threading_barrier() + commListPtr => commListPtr % next + end do + + end subroutine mpas_dmpar_exch_group_pack_buffer_field1d_real!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_pack_buffer_field2d_real +! +!> \brief MPAS dmpar exchange pack 2D real field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine packs a 2D real field into the send buffers +!> It packs a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_pack_buffer_field2d_real(exchangeGroup, field, haloLayer, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field2DReal), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field2DReal), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + type (mpas_communication_list), pointer :: commListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded + integer :: j + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + commListPtr => exchangeGroup % sendList + do while ( associated(commListPtr) ) + bufferOffset = commListPtr % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == commListPtr % procID ) then + !$omp do schedule(runtime) private(j, iBuffer) + do iExch = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(1) + iBuffer = (exchListPtr % destList(iExch)-1) * fieldCursor % dimSizes(1) + j + bufferOffset + commListPtr % rbuffer(iBuffer) = fieldCursor % array(j, exchListPtr % srcList(iExch)) + end do + end do + !$omp end do + nAdded = nAdded + exchListPtr % nList * fieldCursor % dimSizes(1) + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + !$omp master + commListPtr % bufferOffset = commListPtr % bufferOffset + nAdded + !$omp end master + call mpas_threading_barrier() + commListPtr => commListPtr % next + end do + + end subroutine mpas_dmpar_exch_group_pack_buffer_field2d_real!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_pack_buffer_field3d_real +! +!> \brief MPAS dmpar exchange pack 3D real field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine packs a 3D real field into the send buffers +!> It packs a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_pack_buffer_field3d_real(exchangeGroup, field, haloLayer, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field3DReal), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field3DReal), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + type (mpas_communication_list), pointer :: commListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded + integer :: j, k + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + commListPtr => exchangeGroup % sendList + do while ( associated(commListPtr) ) + bufferOffset = commListPtr % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == commListPtr % procID ) then + !$omp do schedule(runtime) private(j, k, iBuffer) + do iExch = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(2) + do k = 1, fieldCursor % dimSizes(1) + iBuffer = (exchListPtr % destList(iExch) - 1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (j - 1) * fieldCursor % dimSizes(1) + k + bufferOffset + commListPtr % rbuffer(iBuffer) = fieldCursor % array(k, j, exchListPtr % srcList(iExch)) + end do + end do + end do + !$omp end do + nAdded = nAdded + exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + !$omp master + commListPtr % bufferOffset = commListPtr % bufferOffset + nAdded + !$omp end master + call mpas_threading_barrier() + commListPtr => commListPtr % next + end do + + end subroutine mpas_dmpar_exch_group_pack_buffer_field3d_real!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_pack_buffer_field4d_real +! +!> \brief MPAS dmpar exchange pack 4D real field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine packs a 4D real field into the send buffers +!> It packs a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_pack_buffer_field4d_real(exchangeGroup, field, haloLayer, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field4DReal), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field4DReal), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + type (mpas_communication_list), pointer :: commListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded + integer :: j, k, l + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + commListPtr => exchangeGroup % sendList + do while ( associated(commListPtr) ) + bufferOffset = commListPtr % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == commListPtr % procID ) then + !$omp do schedule(runtime) private(j, k, l, iBuffer) + do iExch = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(3) + do k = 1, fieldCursor % dimSizes(2) + do l = 1, fieldCursor % dimSizes(1) + iBuffer = (exchListPtr % destList(iExch) - 1) * fieldCursor % dimSizes(1) & + * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) & + + (j - 1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (k - 1) * fieldCursor % dimSizes(1) + l + bufferOffset + commListPtr % rbuffer(iBuffer) = fieldCursor % array(l, k, j, exchListPtr % srcList(iExch)) + end do + end do + end do + end do + !$omp end do + nAdded = nAdded + exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + * fieldCursor % dimSizes(3) + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + !$omp master + commListPtr % bufferOffset = commListPtr % bufferOffset + nAdded + !$omp end master + call mpas_threading_barrier() + commListPtr => commListPtr % next + end do + + end subroutine mpas_dmpar_exch_group_pack_buffer_field4d_real!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_pack_buffer_field5d_real +! +!> \brief MPAS dmpar exchange pack 5D real field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine packs a 5D real field into the send buffers +!> It packs a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_pack_buffer_field5d_real(exchangeGroup, field, haloLayer, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field5DReal), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field5DReal), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + type (mpas_communication_list), pointer :: commListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded + integer :: j, k, l, m + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + commListPtr => exchangeGroup % sendList + do while ( associated(commListPtr) ) + bufferOffset = commListPtr % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == commListPtr % procID ) then + !$omp do schedule(runtime) private(j, k, l, m, iBuffer) + do iExch = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(4) + do k = 1, fieldCursor % dimSizes(3) + do l = 1, fieldCursor % dimSizes(2) + do m = 1, fieldCursor % dimSizes(1) + iBuffer = (exchListPtr % destList(iExch) - 1) * fieldCursor % dimSizes(1) & + * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4)& + + (j - 1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + * fieldCursor % dimSizes(3) & + + (k - 1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (l - 1) * fieldCursor % dimSizes(1) + m + bufferOffset + commListPtr % rbuffer(iBuffer) = fieldCursor % array(m, l, k, j, exchListPtr % srcList(iExch)) + end do + end do + end do + end do + end do + !$omp end do + nAdded = nAdded + exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4) + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + !$omp master + commListPtr % bufferOffset = commListPtr % bufferOffset + nAdded + !$omp end master + call mpas_threading_barrier() + commListPtr => commListPtr % next + end do + + end subroutine mpas_dmpar_exch_group_pack_buffer_field5d_real!}}} + + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! +! Private routines for performing local exchanges +! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_local_exch_field1d_integer +! +!> \brief MPAS dmpar exchange pack 1D integer field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine exchanges a 1D integer field between local blocks +!> It exchanges a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_local_exch_field1d_integer(field, haloLayer, iErr)!{{{ + type (field1DInteger), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field1DInteger), pointer :: fieldCursor, fieldCursor2 + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList + + do while ( associated(exchListPtr) ) + fieldCursor2 => field + do while ( associated(fieldCursor2) ) + if ( exchListPtr % endPointID == fieldCursor2 % block % localBlockID ) then + !$omp do schedule(runtime) + do iExch = 1, exchListPtr % nList + fieldCursor2 % array(exchListPtr % destList(iExch)) = fieldCursor % array(exchListPtr % srcList(iExch)) + end do + !$omp end do + end if + + fieldCursor2 => fieldCursor2 % next + end do + + exchListPtr => exchListPtr % next + end do + + call mpas_threading_barrier() + + fieldCursor => fieldCursor % next + end do + + end subroutine mpas_dmpar_exch_group_local_exch_field1d_integer!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_local_exch_field2d_integer +! +!> \brief MPAS dmpar exchange pack 2D integer field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine exchanges a 2D integer field between local blocks +!> It exchanges a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_local_exch_field2d_integer(field, haloLayer, iErr)!{{{ + type (field2DInteger), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field2DInteger), pointer :: fieldCursor, fieldCursor2 + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList + + do while ( associated(exchListPtr) ) + fieldCursor2 => field + do while ( associated(fieldCursor2) ) + if ( exchListPtr % endPointID == fieldCursor2 % block % localBlockID ) then + !$omp do schedule(runtime) + do iExch = 1, exchListPtr % nList + fieldCursor2 % array(:, exchListPtr % destList(iExch)) = fieldCursor % array(:, exchListPtr % srcList(iExch)) + end do + !$omp end do + end if + + fieldCursor2 => fieldCursor2 % next + end do + + exchListPtr => exchListPtr % next + end do + + call mpas_threading_barrier() + + fieldCursor => fieldCursor % next + end do + + end subroutine mpas_dmpar_exch_group_local_exch_field2d_integer!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_local_exch_field3d_integer +! +!> \brief MPAS dmpar exchange pack 3D integer field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine exchanges a 3D integer field between local blocks +!> It exchanges a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_local_exch_field3d_integer(field, haloLayer, iErr)!{{{ + type (field3DInteger), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field3DInteger), pointer :: fieldCursor, fieldCursor2 + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList + + do while ( associated(exchListPtr) ) + fieldCursor2 => field + do while ( associated(fieldCursor2) ) + if ( exchListPtr % endPointID == fieldCursor2 % block % localBlockID ) then + !$omp do schedule(runtime) + do iExch = 1, exchListPtr % nList + fieldCursor2 % array(:, :, exchListPtr % destList(iExch)) = & + fieldCursor % array(:, :, exchListPtr % srcList(iExch)) + end do + !$omp end do + end if + + fieldCursor2 => fieldCursor2 % next + end do + + exchListPtr => exchListPtr % next + end do + + call mpas_threading_barrier() + + fieldCursor => fieldCursor % next + end do + + end subroutine mpas_dmpar_exch_group_local_exch_field3d_integer!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_local_exch_field1d_real +! +!> \brief MPAS dmpar exchange pack 1D real field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine exchanges a 1D real field between local blocks +!> It exchanges a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_local_exch_field1d_real(field, haloLayer, iErr)!{{{ + type (field1DReal), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field1DReal), pointer :: fieldCursor, fieldCursor2 + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList + + do while ( associated(exchListPtr) ) + fieldCursor2 => field + do while ( associated(fieldCursor2) ) + if ( exchListPtr % endPointID == fieldCursor2 % block % localBlockID ) then + !$omp do schedule(runtime) + do iExch = 1, exchListPtr % nList + fieldCursor2 % array(exchListPtr % destList(iExch)) = fieldCursor % array(exchListPtr % srcList(iExch)) + end do + !$omp end do + end if + + fieldCursor2 => fieldCursor2 % next + end do + + exchListPtr => exchListPtr % next + end do + + call mpas_threading_barrier() + + fieldCursor => fieldCursor % next + end do + + end subroutine mpas_dmpar_exch_group_local_exch_field1d_real!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_local_exch_field2d_real +! +!> \brief MPAS dmpar exchange pack 2D real field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine exchanges a 2D real field between local blocks +!> It exchanges a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_local_exch_field2d_real(field, haloLayer, iErr)!{{{ + type (field2DReal), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field2DReal), pointer :: fieldCursor, fieldCursor2 + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList + + do while ( associated(exchListPtr) ) + fieldCursor2 => field + do while ( associated(fieldCursor2) ) + if ( exchListPtr % endPointID == fieldCursor2 % block % localBlockID ) then + !$omp do schedule(runtime) + do iExch = 1, exchListPtr % nList + fieldCursor2 % array(:, exchListPtr % destList(iExch)) = fieldCursor % array(:, exchListPtr % srcList(iExch)) + end do + !$omp end do + end if + + fieldCursor2 => fieldCursor2 % next + end do + + exchListPtr => exchListPtr % next + end do + + call mpas_threading_barrier() + + fieldCursor => fieldCursor % next + end do + + end subroutine mpas_dmpar_exch_group_local_exch_field2d_real!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_local_exch_field3d_real +! +!> \brief MPAS dmpar exchange pack 3D real field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine exchanges a 3D real field between local blocks +!> It exchanges a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_local_exch_field3d_real(field, haloLayer, iErr)!{{{ + type (field3DReal), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field3DReal), pointer :: fieldCursor, fieldCursor2 + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList + + do while ( associated(exchListPtr) ) + fieldCursor2 => field + do while ( associated(fieldCursor2) ) + if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then + !$omp do schedule(runtime) + do iExch = 1, exchListPtr % nList + fieldCursor2 % array(:, :, exchListPtr % destList(iExch)) = & + fieldCursor % array(:, :, exchListPtr % srcList(iExch)) + end do + !$omp end do + end if + + fieldCursor2 => fieldCursor2 % next + end do + + exchListPtr => exchListPtr % next + end do + + call mpas_threading_barrier() + + fieldCursor => fieldCursor % next + end do + + end subroutine mpas_dmpar_exch_group_local_exch_field3d_real!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_local_exch_field4d_real +! +!> \brief MPAS dmpar exchange pack 4D real field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine exchanges a 4D real field between local blocks +!> It exchanges a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_local_exch_field4d_real(field, haloLayer, iErr)!{{{ + type (field4DReal), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field4DReal), pointer :: fieldCursor, fieldCursor2 + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList + + do while ( associated(exchListPtr) ) + fieldCursor2 => field + do while ( associated(fieldCursor2) ) + if ( exchListPtr % endPointID == fieldCursor2 % block % localBlockID ) then + !$omp do schedule(runtime) + do iExch = 1, exchListPtr % nList + fieldCursor2 % array(:, :, :, exchListPtr % destList(iExch)) = & + fieldCursor % array(:, :, :, exchListPtr % srcList(iExch)) + end do + !$omp end do + end if + + fieldCursor2 => fieldCursor2 % next + end do + + exchListPtr => exchListPtr % next + end do + + call mpas_threading_barrier() + + fieldCursor => fieldCursor % next + end do + + end subroutine mpas_dmpar_exch_group_local_exch_field4d_real!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_local_exch_field5d_real +! +!> \brief MPAS dmpar exchange pack 5D real field routine +!> \author Doug Jacobsen +!> \date 01/05/2016 +!> \details +!> This private routine exchanges a 5D real field between local blocks +!> It exchanges a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_local_exch_field5d_real(field, haloLayer, iErr)!{{{ + type (field5DReal), pointer :: field + integer, intent(in) :: haloLayer + integer, optional, intent(out) :: iErr + + type (field5DReal), pointer :: fieldCursor, fieldCursor2 + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList + + do while ( associated(exchListPtr) ) + fieldCursor2 => field + do while ( associated(fieldCursor2) ) + if ( exchListPtr % endPointID == fieldCursor2 % block % localBlockID ) then + !$omp do schedule(runtime) + do iExch = 1, exchListPtr % nList + fieldCursor2 % array(:, :, :, :, exchListPtr % destList(iExch)) = & + fieldCursor % array(:, :, :, :, exchListPtr % srcList(iExch)) + end do + !$omp end do + end if + + fieldCursor2 => fieldCursor2 % next + end do + + exchListPtr => exchListPtr % next + end do + + call mpas_threading_barrier() + + fieldCursor => fieldCursor % next + end do + + end subroutine mpas_dmpar_exch_group_local_exch_field5d_real!}}} + + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! +! Private routines for unpacking receive buffers +! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_unpack_buffer_field1d_integer +! +!> \brief MPAS dmpar exchange unpack 1D integer field routine +!> \author Doug Jacobsen +!> \date 01/06/2016 +!> \details +!> This private routine unpacks a 1D integer field from the receive buffers +!> It unpacks a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_unpack_buffer_field1d_integer(exchangeGroup, field, haloLayer, recvList, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field1DInteger), pointer :: field + integer, intent(in) :: haloLayer + type (mpas_communication_list), intent(inout) :: recvList + integer, optional, intent(out) :: iErr + + type (field1DInteger), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded, mpi_ierr + integer :: threadNum + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + threadNum = mpas_threading_get_thread_num() + + call mpas_threading_barrier() + bufferOffset = recvList % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == recvList % procID ) then + !$omp do schedule(runtime) private(iBuffer) + do iExch = 1, exchListPtr % nList + iBuffer = exchListPtr % srcList(iExch) + bufferOffset + fieldCursor % array(exchListPtr % destList(iExch)) = transfer(recvList % rbuffer(iBuffer), & + fieldCursor % array(1)) + end do + !$omp end do + nAdded = max(nAdded, maxval(exchListPtr % srcList)) + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + if ( threadNum == 0 ) then + recvList % bufferOffset = recvList % bufferOffset + nAdded + end if + call mpas_threading_barrier() + + end subroutine mpas_dmpar_exch_group_unpack_buffer_field1d_integer!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_unpack_buffer_field2d_integer +! +!> \brief MPAS dmpar exchange unpack 2D integer field routine +!> \author Doug Jacobsen +!> \date 01/06/2016 +!> \details +!> This private routine unpacks a 2D integer field from the receive buffers +!> It unpacks a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_unpack_buffer_field2d_integer(exchangeGroup, field, haloLayer, recvList, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field2DInteger), pointer :: field + integer, intent(in) :: haloLayer + type (mpas_communication_list), intent(inout) :: recvList + integer, optional, intent(out) :: iErr + + type (field2DInteger), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded, mpi_ierr + integer :: j + integer :: threadNum + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + threadNum = mpas_threading_get_thread_num() + + call mpas_threading_barrier() + bufferOffset = recvList % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == recvList % procID ) then + !$omp do schedule(runtime) private(j, iBuffer) + do iExch = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(1) + iBuffer = (exchListPtr % srcList(iExch)-1) * fieldCursor % dimSizes(1) + j + bufferOffset + fieldCursor % array(j, exchListPtr % destList(iExch)) = transfer(recvList % rbuffer(iBuffer), & + fieldCursor % array(1,1)) + end do + end do + !$omp end do + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1)) + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + if ( threadNum == 0 ) then + recvList % bufferOffset = recvList % bufferOffset + nAdded + end if + + end subroutine mpas_dmpar_exch_group_unpack_buffer_field2d_integer!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_unpack_buffer_field3d_integer +! +!> \brief MPAS dmpar exchange unpack 3D integer field routine +!> \author Doug Jacobsen +!> \date 01/06/2016 +!> \details +!> This private routine unpacks a 3D integer field from the receive buffers +!> It unpacks a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_unpack_buffer_field3d_integer(exchangeGroup, field, haloLayer, recvList, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field3DInteger), pointer :: field + integer, intent(in) :: haloLayer + type (mpas_communication_list), intent(inout) :: recvList + integer, optional, intent(out) :: iErr + + type (field3DInteger), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded, mpi_ierr + integer :: j, k + integer :: threadNum + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + threadNum = mpas_threading_get_thread_num() + + call mpas_threading_barrier() + bufferOffset = recvList % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == recvList % procID ) then + !$omp do schedule(runtime) private(j, k, iBuffer) + do iExch = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(2) + do k = 1, fieldCursor % dimSizes(1) + iBuffer = (exchListPtr % srcList(iExch) - 1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (j - 1) * fieldCursor % dimSizes(1) + k + bufferOffset + fieldCursor % array(k, j, exchListPtr % destList(iExch)) = transfer(recvList % rbuffer(iBuffer), & + fieldCursor % array(1,1,1)) + end do + end do + end do + !$omp end do + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2)) + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + if ( threadNum == 0 ) then + recvList % bufferOffset = recvList % bufferOffset + nAdded + end if + + end subroutine mpas_dmpar_exch_group_unpack_buffer_field3d_integer!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_unpack_buffer_field1d_real +! +!> \brief MPAS dmpar exchange unpack 1D real field routine +!> \author Doug Jacobsen +!> \date 01/06/2016 +!> \details +!> This private routine unpacks a 1D real field from the receive buffers +!> It unpacks a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_unpack_buffer_field1d_real(exchangeGroup, field, haloLayer, recvList, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field1DReal), pointer :: field + integer, intent(in) :: haloLayer + type (mpas_communication_list), intent(inout) :: recvList + integer, optional, intent(out) :: iErr + + type (field1DReal), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded, mpi_ierr + integer :: threadNum + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + threadNum = mpas_threading_get_thread_num() + + call mpas_threading_barrier() + bufferOffset = recvList % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == recvList % procID ) then + !$omp do schedule(runtime) private(iBuffer) + do iExch = 1, exchListPtr % nList + iBuffer = exchListPtr % srcList(iExch) + bufferOffset + fieldCursor % array(exchListPtr % destList(iExch)) = recvList % rbuffer(iBuffer) + end do + !$omp end do + nAdded = max(nAdded, maxval(exchListPtr % srcList)) + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + if ( threadNum == 0 ) then + recvList % bufferOffset = recvList % bufferOffset + nAdded + end if + + end subroutine mpas_dmpar_exch_group_unpack_buffer_field1d_real!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_unpack_buffer_field2d_real +! +!> \brief MPAS dmpar exchange unpack 2D real field routine +!> \author Doug Jacobsen +!> \date 01/06/2016 +!> \details +!> This private routine unpacks a 2D real field from the receive buffers +!> It unpacks a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_unpack_buffer_field2d_real(exchangeGroup, field, haloLayer, recvList, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field2DReal), pointer :: field + integer, intent(in) :: haloLayer + type (mpas_communication_list), intent(inout) :: recvList + integer, optional, intent(out) :: iErr + + type (field2DReal), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded, mpi_ierr + integer :: j + integer :: threadNum + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + threadNum = mpas_threading_get_thread_num() + + call mpas_threading_barrier() + bufferOffset = recvList % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == recvList % procID ) then + !$omp do schedule(runtime) private(j, iBuffer) + do iExch = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(1) + iBuffer = (exchListPtr % srcList(iExch)-1) * fieldCursor % dimSizes(1) + j + bufferOffset + fieldCursor % array(j, exchListPtr % destList(iExch)) = recvList % rbuffer(iBuffer) + end do + end do + !$omp end do + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1)) + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + if ( threadNum == 0 ) then + recvList % bufferOffset = recvList % bufferOffset + nAdded + end if + + end subroutine mpas_dmpar_exch_group_unpack_buffer_field2d_real!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_unpack_buffer_field3d_real +! +!> \brief MPAS dmpar exchange unpack 3D real field routine +!> \author Doug Jacobsen +!> \date 01/06/2016 +!> \details +!> This private routine unpacks a 3D real field from the receive buffers +!> It unpacks a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_unpack_buffer_field3d_real(exchangeGroup, field, haloLayer, recvList, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field3DReal), pointer :: field + integer, intent(in) :: haloLayer + type (mpas_communication_list), intent(inout) :: recvList + integer, optional, intent(out) :: iErr + + type (field3DReal), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded, mpi_ierr + integer :: j, k + integer :: threadNum + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + threadNum = mpas_threading_get_thread_num() + + call mpas_threading_barrier() + bufferOffset = recvList % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == recvList % procID ) then + !$omp do schedule(runtime) private(j, k, iBuffer) + do iExch = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(2) + do k = 1, fieldCursor % dimSizes(1) + iBuffer = (exchListPtr % srcList(iExch) - 1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (j - 1) * fieldCursor % dimSizes(1) + k + bufferOffset + fieldCursor % array(k, j, exchListPtr % destList(iExch)) = recvList % rbuffer(iBuffer) + end do + end do + end do + !$omp end do + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2)) + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + if ( threadNum == 0 ) then + recvList % bufferOffset = recvList % bufferOffset + nAdded + end if + + end subroutine mpas_dmpar_exch_group_unpack_buffer_field3d_real!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_unpack_buffer_field4d_real +! +!> \brief MPAS dmpar exchange unpack 4D real field routine +!> \author Doug Jacobsen +!> \date 01/06/2016 +!> \details +!> This private routine unpacks a 4D real field from the receive buffers +!> It unpacks a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_unpack_buffer_field4d_real(exchangeGroup, field, haloLayer, recvList, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field4DReal), pointer :: field + integer, intent(in) :: haloLayer + type (mpas_communication_list), intent(inout) :: recvList + integer, optional, intent(out) :: iErr + + type (field4DReal), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded, mpi_ierr + integer :: j, k, l + integer :: threadNum + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + threadNum = mpas_threading_get_thread_num() + + call mpas_threading_barrier() + bufferOffset = recvList % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == recvList % procID ) then + !$omp do schedule(runtime) private(j, k, l, iBuffer) + do iExch = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(3) + do k = 1, fieldCursor % dimSizes(2) + do l = 1, fieldCursor % dimSizes(1) + iBuffer = (exchListPtr % srcList(iExch) - 1) * fieldCursor % dimSizes(1) & + * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) & + + (j - 1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (k - 1) * fieldCursor % dimSizes(1) + l + bufferOffset + fieldCursor % array(l, k, j, exchListPtr % destList(iExch)) = recvList % rbuffer(iBuffer) + end do + end do + end do + end do + !$omp end do + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) & + * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3)) + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + if ( threadNum == 0 ) then + recvList % bufferOffset = recvList % bufferOffset + nAdded + end if + + end subroutine mpas_dmpar_exch_group_unpack_buffer_field4d_real!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_unpack_buffer_field5d_real +! +!> \brief MPAS dmpar exchange unpack 5D real field routine +!> \author Doug Jacobsen +!> \date 01/06/2016 +!> \details +!> This private routine unpacks a 5D real field from the receive buffers. +!> It unpacks a specific halo layer, as defined by the haloLayer input argument. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_unpack_buffer_field5d_real(exchangeGroup, field, haloLayer, recvList, iErr)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + type (field5DReal), pointer :: field + integer, intent(in) :: haloLayer + type (mpas_communication_list), intent(inout) :: recvList + integer, optional, intent(out) :: iErr + + type (field5DReal), pointer :: fieldCursor + + type (mpas_exchange_list), pointer :: exchListPtr + + integer :: iExch, iBuffer, bufferOffset + integer :: nAdded, mpi_ierr + integer :: j, k, l, m + integer :: threadNum + + if ( present(iErr) ) then + iErr = MPAS_DMPAR_NOERR + end if + + threadNum = mpas_threading_get_thread_num() + + call mpas_threading_barrier() + bufferOffset = recvList % bufferOffset + nAdded = 0 + fieldCursor => field + do while ( associated(fieldCursor) ) + exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList + do while ( associated(exchListPtr) ) + if ( exchListPtr % endPointID == recvList % procID ) then + !$omp do schedule(runtime) private(j, k, l, m, iBuffer) + do iExch = 1, exchListPtr % nList + do j = 1, fieldCursor % dimSizes(4) + do k = 1, fieldCursor % dimSizes(3) + do l = 1, fieldCursor % dimSizes(2) + do m = 1, fieldCursor % dimSizes(1) + iBuffer = (exchListPtr % srcList(iExch) - 1) * fieldCursor % dimSizes(1) & + * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4)& + + (j - 1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + * fieldCursor % dimSizes(3) & + + (k - 1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + + (l - 1) * fieldCursor % dimSizes(1) + m + bufferOffset + fieldCursor % array(m, l, k, j, exchListPtr % destList(iExch)) = recvList % rbuffer(iBuffer) + end do + end do + end do + end do + end do + !$omp end do + nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) & + * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4)) + end if + exchListPtr => exchListPtr % next + end do + fieldCursor => fieldCursor % next + end do + bufferOffset = bufferOffset + nAdded + if ( threadNum == 0 ) then + recvList % bufferOffset = recvList % bufferOffset + nAdded + end if + + end subroutine mpas_dmpar_exch_group_unpack_buffer_field5d_real!}}} + + +!----------------------------------------------------------------------- +! routine mpas_dmpar_exch_group_print_buffers +! +!> \brief MPAS dmpar exchange group buffer info routine. +!> \author Doug Jacobsen +!> \date 04/07/2016 +!> \details +!> This private routine prints out information about communication list buffers. +!> It is mostly used for debugging. +! +!----------------------------------------------------------------------- + subroutine mpas_dmpar_exch_group_print_buffers(exchangeGroup)!{{{ + type (mpas_exchange_group), intent(inout) :: exchangeGroup + + type (mpas_exchange_list), pointer :: exchListPtr + type (mpas_communication_list), pointer :: commListPtr + + integer :: iBuffer, threadNum + + threadNum = mpas_threading_get_thread_num() + + call mpas_threading_barrier() + + if ( threadNum == 0 ) then + write(stderrUnit, *) '' + write(stderrUnit, *) ' -------- Buffer Summary -------' + write(stderrUnit, *) ' Thread: ', threadNum, ' writing out send lists' + commListPtr => exchangeGroup % sendList + do while ( associated(commListPtr) ) + write(stderrUnit, *) '' + write(stderrUnit, *) ' Send list header: ' + write(stderrUnit, *) ' proc: ', commListPtr % procID + write(stderrUnit, *) ' size check: ', commListPtr % nlist, size( commListPtr % rbuffer ) + write(stderrUnit, *) ' bufferOffset: ', commListPtr % bufferOffset + write(stderrUnit, *) ' reqId: ', commListPtr % reqId + write(stderrUnit, *) ' ibuffer assc: ', associated( commListPtr % ibuffer ) + write(stderrUnit, *) ' rbuffer assc: ', associated( commListPtr % rbuffer ) + write(stderrUnit, *) ' next assc: ', associated( commListPtr % next ) + do iBuffer = 1, size(commListPtr % rbuffer) + write(stderrUnit, *) ' IDX: ', iBuffer, ' VAL: ', commListPtr % rbuffer(iBuffer) + end do + commListPtr => commListPtr % next + end do + + write(stderrUnit, *) '' + write(stderrUnit, *) ' Thread: ', threadNum, ' writing out recv lists' + commListPtr => exchangeGroup % recvList + do while ( associated(commListPtr) ) + write(stderrUnit, *) '' + write(stderrUnit, *) ' Recv list header: ' + write(stderrUnit, *) ' proc: ', commListPtr % procID + write(stderrUnit, *) ' size check: ', commListPtr % nlist, size( commListPtr % rbuffer ) + write(stderrUnit, *) ' bufferOffset: ', commListPtr % bufferOffset + write(stderrUnit, *) ' reqId: ', commListPtr % reqId + write(stderrUnit, *) ' ibuffer assc: ', associated( commListPtr % ibuffer ) + write(stderrUnit, *) ' rbuffer assc: ', associated( commListPtr % rbuffer ) + write(stderrUnit, *) ' next assc: ', associated( commListPtr % next ) + do iBuffer = 1, size(commListPtr % rbuffer) + write(stderrUnit, *) ' IDX: ', iBuffer, ' VAL: ', commListPtr % rbuffer(iBuffer) + end do + commListPtr => commListPtr % next + end do + + write(stderrUnit, *) '' + write(stderrUnit, *) ' -------- End Buffer Summary -------' + write(stderrUnit, *) '' + + end if + + call mpas_threading_barrier() + + end subroutine mpas_dmpar_exch_group_print_buffers!}}} + + +end module mpas_dmpar diff --git a/src/framework/mpas_dmpar_types.inc b/src/framework/mpas_dmpar_types.inc index bb29e9764a..37e6be9804 100644 --- a/src/framework/mpas_dmpar_types.inc +++ b/src/framework/mpas_dmpar_types.inc @@ -1,6 +1,22 @@ + integer, parameter :: MPAS_DMPAR_NOERR = 0 + integer, parameter :: MPAS_DMPAR_MISSING_GROUP = 1 + integer, parameter :: MPAS_DMPAR_EXISTING_GROUP = 2 + integer, parameter :: MPAS_DMPAR_MISSING_FIELD = 3 + integer, parameter :: MPAS_DMPAR_FIELD_TIMELEVEL_ERR = 4 + integer, parameter :: MPAS_DMPAR_FIELD_HALO_ERR = 5 + integer, parameter :: MPAS_DMPAR_BUFFER_EXISTS = 6 + type dm_info integer :: nprocs, my_proc_id, comm, info - logical :: using_external_comm + logical :: initialized_mpi + + ! Add variables specific to block decomposition. {{{ + ! These are used in mpas_block_decomp.F + integer :: total_blocks + logical :: explicitDecomp + integer, dimension(:), allocatable :: block_proc_list + integer, dimension(:), allocatable :: block_local_id_list + !}}} end type dm_info @@ -28,8 +44,31 @@ type mpas_communication_list integer :: procID integer :: nlist - real (kind=RKIND), dimension(:), pointer :: rbuffer - integer, dimension(:), pointer :: ibuffer + integer :: bufferOffset + real (kind=RKIND), dimension(:), pointer :: rbuffer => null() + integer, dimension(:), pointer :: ibuffer => null() integer :: reqID - type (mpas_communication_list), pointer :: next + type (mpas_communication_list), pointer :: next => null() + logical :: received end type mpas_communication_list + + type mpas_exchange_field_list + integer :: nLen + character (len=StrKIND) :: fieldName + integer :: fieldType, nDims + logical, dimension(:), pointer :: haloLayers => null() + logical, dimension(:), pointer :: timeLevels => null() + type (mpas_exchange_field_list), pointer :: next => null() + end type mpas_exchange_field_list + + type mpas_exchange_group + integer :: nLen + character (len=StrKIND) :: groupName + type (mpas_pool_type), pointer :: fieldPool => null() + type (mpas_exchange_field_list), pointer :: fieldList => null() + + ! Each group should have it's own communication lists, since they have the buffers for the communications + type (mpas_communication_list), pointer :: sendList => null() + type (mpas_communication_list), pointer :: recvList => null() + type (mpas_exchange_group), pointer :: next => null() + end type mpas_exchange_group diff --git a/src/framework/mpas_domain_routines.F b/src/framework/mpas_domain_routines.F index e20951b24c..5d6c563cf7 100644 --- a/src/framework/mpas_domain_routines.F +++ b/src/framework/mpas_domain_routines.F @@ -22,6 +22,7 @@ module mpas_domain_routines use mpas_kind_types use mpas_derived_types use mpas_pool_routines + use mpas_dmpar contains @@ -49,6 +50,7 @@ subroutine mpas_allocate_domain(dom)!{{{ allocate(dom % packages) allocate(dom % clock) allocate(dom % streamManager) + allocate(dom % ioContext) call mpas_pool_create_pool(dom % configs) call mpas_pool_create_pool(dom % packages) @@ -117,6 +119,7 @@ subroutine mpas_deallocate_domain(dom)!{{{ type (domain_type), pointer :: dom !< Input/Output: Domain to deallocate type (block_type), pointer :: block_ptr + type (mpas_exchange_group), pointer :: exchGroupPtr block_ptr => dom % blocklist do while (associated(block_ptr)) @@ -124,9 +127,16 @@ subroutine mpas_deallocate_domain(dom)!{{{ block_ptr => block_ptr % next end do + exchGroupPtr => dom % exchangeGroups + do while (associated(exchGroupPtr)) + call mpas_dmpar_exch_group_destroy(dom, exchGroupPtr % groupName) + exchGroupPtr => dom % exchangeGroups + end do + call mpas_pool_destroy_pool(dom % configs) call mpas_pool_destroy_pool(dom % packages) deallocate(dom % clock) + deallocate(dom % ioContext) end subroutine mpas_deallocate_domain!}}} diff --git a/src/framework/mpas_domain_types.inc b/src/framework/mpas_domain_types.inc index 607632fee3..349d44c7ba 100644 --- a/src/framework/mpas_domain_types.inc +++ b/src/framework/mpas_domain_types.inc @@ -5,18 +5,28 @@ type (MPAS_Clock_type), pointer :: clock type (MPAS_streamManager_type), pointer :: streamManager - type (mpas_decomp_list), pointer :: decompositions + type (mpas_decomp_list), pointer :: decompositions => null() + type (mpas_io_context_type), pointer :: ioContext => null() ! Also store parallelization info here type (dm_info), pointer :: dminfo + ! Store exchange group information here + type (mpas_exchange_group), pointer :: exchangeGroups => null() + ! Domain specific constants - logical :: on_a_sphere - real (kind=RKIND) :: sphere_radius - character (len=StrKIND) :: namelist_filename !< Constant: Name of namelist file - character (len=StrKIND) :: streams_filename !< Constant: Name of stream configuration file - character (len=StrKIND) :: mesh_spec !< mesh_spec attribute, read in from input file. - character (len=StrKIND) :: parent_id !< parent_id attribute, read in from input file. + logical :: on_a_sphere = .true. + logical :: is_periodic = .false. + real (kind=RKIND) :: sphere_radius = 1.0_RKIND + real (kind=RKIND) :: x_period = 0.0_RKIND + real (kind=RKIND) :: y_period = 0.0_RKIND + character (len=StrKIND) :: namelist_filename = '' !< Constant: Name of namelist file + character (len=StrKIND) :: streams_filename = '' !< Constant: Name of stream configuration file + character (len=StrKIND) :: mesh_spec = '' !< mesh_spec attribute, read in from input file. + character (len=StrKIND) :: parent_id = '' !< parent_id attribute, read in from input file. + + ! Pointer to timer root + type (mpas_timer_root), pointer :: timer_root => null() ! Back pointer to core type (core_type), pointer :: core => null() diff --git a/src/framework/mpas_field_accessor.F b/src/framework/mpas_field_accessor.F new file mode 100644 index 0000000000..b982b7e01c --- /dev/null +++ b/src/framework/mpas_field_accessor.F @@ -0,0 +1,292 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! + +#define COMMA , +#define ACCESSOR_ERROR_WRITE(M) write(stderrUnit,*) 'ERROR: '//M + +!*********************************************************************** +! +! mpas_field_accessor +! +!> \brief Module providing quick access to members of fields by name +!> \author Michael Duda, Doug Jacobsen +!> \date 28 March 2016 +!> \details +!> This module provides routines for accessing members of field types +!> (e.g., missingValue) given only the name of the field and a pool +!> in which the field may be found. +! +!----------------------------------------------------------------------- +module mpas_field_accessor + + use mpas_derived_types, only : mpas_pool_type, mpas_pool_field_info_type, & + MPAS_POOL_REAL, MPAS_POOL_INTEGER, MPAS_POOL_CHARACTER, MPAS_POOL_LOGICAL, & + field0DReal, field1DReal, field2DReal, field3DReal, field4DReal, field5DReal, & + field0DInteger, field1DInteger, field2DInteger, field3DInteger, & + field0DChar, field1DChar, & + field0DLogical + use mpas_kind_types, only : RKIND, StrKIND + use mpas_pool_routines, only : mpas_pool_get_field_info, mpas_pool_get_field + use mpas_io_units, only : stderrUnit + + interface mpas_field_access_missing_value + module procedure mpas_field_access_msgval_real + module procedure mpas_field_access_msgval_int + module procedure mpas_field_access_msgval_char + module procedure mpas_field_access_msgval_logical + end interface mpas_field_access_missing_value + + + contains + + + !----------------------------------------------------------------------- + ! subroutine mpas_field_access_missing_value + ! + !> \brief Accesses the 'missingValue' member for a field given the field name + !> \author Doug Jacobsen, Michael Duda + !> \date 28 March 2016 + !> \details + !> This routine returns the value of the 'missingValue' member from the field type + !> for the specified field. The named field must exist in the specified pool; + !> if it does not, an error message will be printed. + ! + !----------------------------------------------------------------------- + subroutine mpas_field_access_msgval_real(fieldPool, fieldName, missingValue) + + implicit none + + type (mpas_pool_type), intent(in) :: fieldPool + character(len=*), intent(in) :: fieldName + real(kind=RKIND), intent(out) :: missingValue + + type (mpas_pool_field_info_type) :: fieldInfo + type (field0DReal), pointer :: r0 => null() + type (field1DReal), pointer :: r1 => null() + type (field2DReal), pointer :: r2 => null() + type (field3DReal), pointer :: r3 => null() + type (field4DReal), pointer :: r4 => null() + type (field5DReal), pointer :: r5 => null() + + + ! Initialize fieldType so we can detect whether returned info is valid + fieldInfo % fieldType = MPAS_POOL_REAL - 1 + call mpas_pool_get_field_info(fieldPool, trim(fieldName), fieldInfo) + + if (fieldInfo % fieldType /= MPAS_POOL_REAL) then + ACCESSOR_ERROR_WRITE('Cannot return missingValue for field '//trim(fieldName)) + ACCESSOR_ERROR_WRITE('Either '//trim(fieldName)//' was not found in the specified pool or is not a real-type field') + return + end if + + ! At this point, we know that the field exists in the pool and is a real-valued field, + ! so we should not need extensive error checking below... + + select case(fieldInfo % nDims) + case (0) + call mpas_pool_get_field(fieldPool, trim(fieldName), r0) + missingValue = r0 % missingValue + case (1) + call mpas_pool_get_field(fieldPool, trim(fieldName), r1) + missingValue = r1 % missingValue + case (2) + call mpas_pool_get_field(fieldPool, trim(fieldName), r2) + missingValue = r2 % missingValue + case (3) + call mpas_pool_get_field(fieldPool, trim(fieldName), r3) + missingValue = r3 % missingValue + case (4) + call mpas_pool_get_field(fieldPool, trim(fieldName), r4) + missingValue = r4 % missingValue + case (5) + call mpas_pool_get_field(fieldPool, trim(fieldName), r5) + missingValue = r5 % missingValue + case default + ACCESSOR_ERROR_WRITE('Unhandled dimensionality (6-d or more) in mpas_field_access_msgval_real') + end select + + end subroutine mpas_field_access_msgval_real + + + !----------------------------------------------------------------------- + ! subroutine mpas_field_access_missing_value + ! + !> \brief Accesses the 'missingValue' member for a field given the field name + !> \author Doug Jacobsen, Michael Duda + !> \date 28 March 2016 + !> \details + !> This routine returns the value of the 'missingValue' member from the field type + !> for the specified field. The named field must exist in the specified pool; + !> if it does not, an error message will be printed. + ! + !----------------------------------------------------------------------- + subroutine mpas_field_access_msgval_int(fieldPool, fieldName, missingValue) + + implicit none + + type (mpas_pool_type), intent(in) :: fieldPool + character(len=*), intent(in) :: fieldName + integer, intent(out) :: missingValue + + type (mpas_pool_field_info_type) :: fieldInfo + type (field0DInteger), pointer :: i0 => null() + type (field1DInteger), pointer :: i1 => null() + type (field2DInteger), pointer :: i2 => null() + type (field3DInteger), pointer :: i3 => null() + + + ! Initialize fieldType so we can detect whether returned info is valid + fieldInfo % fieldType = MPAS_POOL_INTEGER - 1 + call mpas_pool_get_field_info(fieldPool, trim(fieldName), fieldInfo) + + if (fieldInfo % fieldType /= MPAS_POOL_INTEGER) then + ACCESSOR_ERROR_WRITE('Cannot return missingValue for field '//trim(fieldName)) + ACCESSOR_ERROR_WRITE('Either '//trim(fieldName)//' was not found in the specified pool or is not an integer-type field') + return + end if + + ! At this point, we know that the field exists in the pool and is an integer-valued field, + ! so we should not need extensive error checking below... + + select case(fieldInfo % nDims) + case (0) + call mpas_pool_get_field(fieldPool, trim(fieldName), i0) + missingValue = i0 % missingValue + case (1) + call mpas_pool_get_field(fieldPool, trim(fieldName), i1) + missingValue = i1 % missingValue + case (2) + call mpas_pool_get_field(fieldPool, trim(fieldName), i2) + missingValue = i2 % missingValue + case (3) + call mpas_pool_get_field(fieldPool, trim(fieldName), i3) + missingValue = i3 % missingValue + case default + ACCESSOR_ERROR_WRITE('Unhandled dimensionality (4-d or more) in mpas_field_access_msgval_int') + end select + + end subroutine mpas_field_access_msgval_int + + + !----------------------------------------------------------------------- + ! subroutine mpas_field_access_missing_value + ! + !> \brief Accesses the 'missingValue' member for a field given the field name + !> \author Doug Jacobsen, Michael Duda + !> \date 28 March 2016 + !> \details + !> This routine returns the value of the 'missingValue' member from the field type + !> for the specified field. The named field must exist in the specified pool; + !> if it does not, an error message will be printed. + ! + !----------------------------------------------------------------------- + subroutine mpas_field_access_msgval_char(fieldPool, fieldName, missingValue) + + implicit none + + type (mpas_pool_type), intent(in) :: fieldPool + character(len=*), intent(in) :: fieldName + character(len=*), intent(out) :: missingValue + + type (mpas_pool_field_info_type) :: fieldInfo + type (field0DChar), pointer :: c0 => null() + type (field1DChar), pointer :: c1 => null() + + + ! Initialize fieldType so we can detect whether returned info is valid + fieldInfo % fieldType = MPAS_POOL_CHARACTER - 1 + call mpas_pool_get_field_info(fieldPool, trim(fieldName), fieldInfo) + + if (fieldInfo % fieldType /= MPAS_POOL_CHARACTER) then + ACCESSOR_ERROR_WRITE('Cannot return missingValue for field '//trim(fieldName)) + ACCESSOR_ERROR_WRITE('Either '//trim(fieldName)//' was not found in the specified pool or is not a char-type field') + return + end if + + ! At this point, we know that the field exists in the pool and is a character-valued field, + ! so we should not need extensive error checking below... + + select case(fieldInfo % nDims) + case (0) + call mpas_pool_get_field(fieldPool, trim(fieldName), c0) + if (len(missingValue) < len_trim(c0 % missingValue)) then + ACCESSOR_ERROR_WRITE('Truncating missingValue for field '//trim(fieldName)) + ACCESSOR_ERROR_WRITE('Actual argument for missingValue is too short') + missingValue = c0 % missingValue(1:len(missingValue)) + else + missingValue = trim(c0 % missingValue) + end if + case (1) + call mpas_pool_get_field(fieldPool, trim(fieldName), c1) + if (len(missingValue) < len_trim(c1 % missingValue)) then + ACCESSOR_ERROR_WRITE('Truncating missingValue for field '//trim(fieldName)) + ACCESSOR_ERROR_WRITE('Actual argument for missingValue is too short') + missingValue = c1 % missingValue(1:len(missingValue)) + else + missingValue = trim(c1 % missingValue) + end if + case default + ACCESSOR_ERROR_WRITE('Unhandled dimensionality (2-d or more) in mpas_field_access_msgval_char') + end select + + end subroutine mpas_field_access_msgval_char + + + !----------------------------------------------------------------------- + ! subroutine mpas_field_access_missing_value + ! + !> \brief Accesses the 'missingValue' member for a field given the field name + !> \author Doug Jacobsen, Michael Duda + !> \date 28 March 2016 + !> \details + !> This routine returns the value of the 'missingValue' member from the field type + !> for the specified field. The named field must exist in the specified pool; + !> if it does not, an error message will be printed. + ! + !----------------------------------------------------------------------- + subroutine mpas_field_access_msgval_logical(fieldPool, fieldName, missingValue) + + implicit none + + type (mpas_pool_type), intent(in) :: fieldPool + character(len=*), intent(in) :: fieldName + logical, intent(out) :: missingValue + + type (mpas_pool_field_info_type) :: fieldInfo + type (field0DLogical), pointer :: l0 => null() + + +#ifdef POOL_LOGICAL_FIELD_SUPPORT + ! Initialize fieldType so we can detect whether returned info is valid + fieldInfo % fieldType = MPAS_POOL_LOGICAL - 1 + call mpas_pool_get_field_info(fieldPool, trim(fieldName), fieldInfo) + + if (fieldInfo % fieldType /= MPAS_POOL_LOGICAL) then + ACCESSOR_ERROR_WRITE('Cannot return missingValue for field '//trim(fieldName)) + ACCESSOR_ERROR_WRITE('Either '//trim(fieldName)//' was not found in the specified pool or is not a logical-type field') + return + end if + + ! At this point, we know that the field exists in the pool and is a logical-valued field, + ! so we should not need extensive error checking below... + + select case(fieldInfo % nDims) + case (0) + call mpas_pool_get_field(fieldPool, trim(fieldName), l0) + missingValue = l0 % missingValue + case default + ACCESSOR_ERROR_WRITE('Unhandled dimensionality (1-d or more) in mpas_field_access_msgval_logical') + end select +#else + ACCESSOR_ERROR_WRITE('Support for accessing missingValue for logical fields is not implemented') +#endif + + end subroutine mpas_field_access_msgval_logical + + +end module mpas_field_accessor diff --git a/src/framework/mpas_field_routines.F b/src/framework/mpas_field_routines.F index 0e854fbd57..9429e8522e 100644 --- a/src/framework/mpas_field_routines.F +++ b/src/framework/mpas_field_routines.F @@ -14,12 +14,15 @@ !> \date 03/10/2015 !> \details !> This module defines routines related to MPAS field types (defined in mpas_data_types). +!> All routines defined in this module are non-blocking for threads. ! !----------------------------------------------------------------------- module mpas_field_routines use mpas_kind_types use mpas_derived_types + use mpas_threading + use mpas_attlist interface mpas_allocate_mold module procedure mpas_allocate_mold_1dreal @@ -118,11 +121,13 @@ module mpas_field_routines !> This routine allocates a 1D scratch integer field. ! !----------------------------------------------------------------------- - subroutine mpas_allocate_scratch_field1d_integer(f, single_block_in)!{{{ + subroutine mpas_allocate_scratch_field1d_integer(f, single_block_in, init_array_in)!{{{ type (field1dInteger), pointer :: f !< Input: Field to allocate logical, intent(in), optional :: single_block_in !< Input: Logical flag that determines if a single block should be allocated or all blocks. - logical :: single_block + logical, intent(in), optional :: init_array_in !< Input: Logical flag that determines if allocated arrays are initialized + logical :: single_block, init_array type (field1dInteger), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -134,17 +139,33 @@ subroutine mpas_allocate_scratch_field1d_integer(f, single_block_in)!{{{ single_block = .false. end if - if(.not. single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(.not.associated(f_cursor % array)) then - allocate(f_cursor % array(f_cursor % dimSizes(1))) - end if - f_cursor => f_cursor % next - end do + if (present(init_array_in)) then + init_array = init_array_in else - if(.not.associated(f % array)) then - allocate(f % array(f % dimSizes(1))) + init_array = .true. + end if + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not. single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(.not.associated(f_cursor % array)) then + allocate(f_cursor % array(f_cursor % dimSizes(1))) + if ( init_array ) then + f_cursor % array(:) = f_cursor % defaultValue + end if + end if + f_cursor => f_cursor % next + end do + else + if(.not.associated(f % array)) then + allocate(f % array(f % dimSizes(1))) + if ( init_array ) then + f % array(:) = f % defaultValue + end if + end if end if end if @@ -162,11 +183,13 @@ end subroutine mpas_allocate_scratch_field1d_integer!}}} !> This routine allocates a 2D scratch integer field. ! !----------------------------------------------------------------------- - subroutine mpas_allocate_scratch_field2d_integer(f, single_block_in)!{{{ + subroutine mpas_allocate_scratch_field2d_integer(f, single_block_in, init_array_in)!{{{ type (field2dInteger), pointer :: f !< Input: Field to allocate logical, intent(in), optional :: single_block_in !< Input: Logical flag that determines if a single block should be allocated, or all blocks. - logical :: single_block + logical, intent(in), optional :: init_array_in !< Input: Logical flag that determines if allocated arrays are initialized + logical :: single_block, init_array type (field2dInteger), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -178,17 +201,33 @@ subroutine mpas_allocate_scratch_field2d_integer(f, single_block_in)!{{{ single_block = .false. end if - if(.not. single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(.not.associated(f_cursor % array)) then - allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2))) - end if - f_cursor => f_cursor % next - end do + if (present(init_array_in)) then + init_array = init_array_in else - if(.not.associated(f % array)) then - allocate(f % array(f % dimSizes(1), f % dimSizes(2))) + init_array = .true. + end if + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not. single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(.not.associated(f_cursor % array)) then + allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2))) + if ( init_array ) then + f_cursor % array(:, :) = f_cursor % defaultValue + end if + end if + f_cursor => f_cursor % next + end do + else + if(.not.associated(f % array)) then + allocate(f % array(f % dimSizes(1), f % dimSizes(2))) + if ( init_array ) then + f % array(:, :) = f % defaultValue + end if + end if end if end if @@ -206,11 +245,13 @@ end subroutine mpas_allocate_scratch_field2d_integer!}}} !> This routine allocates a 3D scratch integer field. ! !----------------------------------------------------------------------- - subroutine mpas_allocate_scratch_field3d_integer(f, single_block_in)!{{{ + subroutine mpas_allocate_scratch_field3d_integer(f, single_block_in, init_array_in)!{{{ type (field3dInteger), pointer :: f !< Input: Field to allocate logical, intent(in), optional :: single_block_in !< Input: Logical flag that determines if a single block should be allocated, or all blocks. - logical :: single_block + logical, intent(in), optional :: init_array_in !< Input: Logical flag that determines if allocated arrays are initialized + logical :: single_block, init_array type (field3dInteger), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -222,17 +263,33 @@ subroutine mpas_allocate_scratch_field3d_integer(f, single_block_in)!{{{ single_block = .false. end if - if(.not. single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(.not.associated(f_cursor % array)) then - allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2), f_cursor % dimSizes(3))) - end if - f_cursor => f_cursor % next - end do + if (present(init_array_in)) then + init_array = init_array_in else - if(.not.associated(f % array)) then - allocate(f % array(f % dimSizes(1), f % dimSizes(2), f % dimSizes(3))) + init_array = .true. + end if + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not. single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(.not.associated(f_cursor % array)) then + allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2), f_cursor % dimSizes(3))) + if ( init_array ) then + f_cursor % array(:, :, :) = f_cursor % defaultValue + end if + end if + f_cursor => f_cursor % next + end do + else + if(.not.associated(f % array)) then + allocate(f % array(f % dimSizes(1), f % dimSizes(2), f % dimSizes(3))) + if ( init_array ) then + f % array(:, :, :) = f % defaultValue + end if + end if end if end if @@ -250,11 +307,13 @@ end subroutine mpas_allocate_scratch_field3d_integer!}}} !> This routine allocates a 1D scratch real field. ! !----------------------------------------------------------------------- - subroutine mpas_allocate_scratch_field1d_real(f, single_block_in)!{{{ + subroutine mpas_allocate_scratch_field1d_real(f, single_block_in, init_array_in)!{{{ type (field1dReal), pointer :: f !< Input: Field to allocate logical, intent(in), optional :: single_block_in !< Input: Logical flag that determines if a single block should be allocated, or all blocks. - logical :: single_block + logical, intent(in), optional :: init_array_in !< Input: Logical flag that determines if allocated arrays are initialized + logical :: single_block, init_array type (field1dReal), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -266,17 +325,33 @@ subroutine mpas_allocate_scratch_field1d_real(f, single_block_in)!{{{ single_block = .false. end if - if(.not. single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(.not.associated(f_cursor % array)) then - allocate(f_cursor % array(f_cursor % dimSizes(1))) - end if - f_cursor => f_cursor % next - end do + if (present(init_array_in)) then + init_array = init_array_in else - if(.not.associated(f % array)) then - allocate(f % array(f % dimSizes(1))) + init_array = .true. + end if + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not. single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(.not.associated(f_cursor % array)) then + allocate(f_cursor % array(f_cursor % dimSizes(1))) + if ( init_array ) then + f_cursor % array(:) = f_cursor % defaultValue + end if + end if + f_cursor => f_cursor % next + end do + else + if(.not.associated(f % array)) then + allocate(f % array(f % dimSizes(1))) + if ( init_array ) then + f % array(:) = f % defaultValue + end if + end if end if end if @@ -294,11 +369,13 @@ end subroutine mpas_allocate_scratch_field1d_real!}}} !> This routine allocates a 2D scratch real field. ! !----------------------------------------------------------------------- - subroutine mpas_allocate_scratch_field2d_real(f, single_block_in)!{{{ + subroutine mpas_allocate_scratch_field2d_real(f, single_block_in, init_array_in)!{{{ type (field2dReal), pointer :: f !< Input: Field to allocate logical, intent(in), optional :: single_block_in !< Input: Logical flag that determines if a single block should be allocated, or all blocks. - logical :: single_block + logical, intent(in), optional :: init_array_in !< Input: Logical flag that determines if allocated arrays are initialized + logical :: single_block, init_array type (field2dReal), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -310,17 +387,33 @@ subroutine mpas_allocate_scratch_field2d_real(f, single_block_in)!{{{ single_block = .false. end if - if(.not. single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(.not.associated(f_cursor % array)) then - allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2))) - end if - f_cursor => f_cursor % next - end do + if (present(init_array_in)) then + init_array = init_array_in else - if(.not.associated(f % array)) then - allocate(f % array(f % dimSizes(1), f % dimSizes(2))) + init_array = .true. + end if + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not. single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(.not.associated(f_cursor % array)) then + allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2))) + if ( init_array ) then + f_cursor % array(:, :) = f_cursor % defaultValue + end if + end if + f_cursor => f_cursor % next + end do + else + if(.not.associated(f % array)) then + allocate(f % array(f % dimSizes(1), f % dimSizes(2))) + if ( init_array ) then + f % array(:, :) = f % defaultValue + end if + end if end if end if @@ -338,11 +431,13 @@ end subroutine mpas_allocate_scratch_field2d_real!}}} !> This routine allocates a 3D scratch real field. ! !----------------------------------------------------------------------- - subroutine mpas_allocate_scratch_field3d_real(f, single_block_in)!{{{ + subroutine mpas_allocate_scratch_field3d_real(f, single_block_in, init_array_in)!{{{ type (field3dReal), pointer :: f !< Input: Field to allocate logical, intent(in), optional :: single_block_in !< Input: Logical flag that determines if a single block should be allocated, or all blocks. - logical :: single_block + logical, intent(in), optional :: init_array_in !< Input: Logical flag that determines if allocated arrays are initialized + logical :: single_block, init_array type (field3dReal), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -354,17 +449,33 @@ subroutine mpas_allocate_scratch_field3d_real(f, single_block_in)!{{{ single_block = .false. end if - if(.not. single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(.not.associated(f_cursor % array)) then - allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2), f_cursor % dimSizes(3))) - end if - f_cursor => f_cursor % next - end do + if (present(init_array_in)) then + init_array = init_array_in else - if(.not.associated(f % array)) then - allocate(f % array(f % dimSizes(1), f % dimSizes(2), f % dimSizes(3))) + init_array = .true. + end if + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not. single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(.not.associated(f_cursor % array)) then + allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2), f_cursor % dimSizes(3))) + if ( init_array ) then + f_cursor % array(:, :, :) = f_cursor % defaultValue + end if + end if + f_cursor => f_cursor % next + end do + else + if(.not.associated(f % array)) then + allocate(f % array(f % dimSizes(1), f % dimSizes(2), f % dimSizes(3))) + if ( init_array ) then + f % array(:, :, :) = f % defaultValue + end if + end if end if end if @@ -382,11 +493,13 @@ end subroutine mpas_allocate_scratch_field3d_real!}}} !> This routine allocates a 4D scratch real field. ! !----------------------------------------------------------------------- - subroutine mpas_allocate_scratch_field4d_real(f, single_block_in)!{{{ + subroutine mpas_allocate_scratch_field4d_real(f, single_block_in, init_array_in)!{{{ type (field4dReal), pointer :: f !< Input: Field to allocate logical, intent(in), optional :: single_block_in !< Input: Logical flag that determines if a single block should be allocated, or all blocks. - logical :: single_block + logical, intent(in), optional :: init_array_in !< Input: Logical flag that determines if allocated arrays are initialized + logical :: single_block, init_array type (field4dReal), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -398,17 +511,33 @@ subroutine mpas_allocate_scratch_field4d_real(f, single_block_in)!{{{ single_block = .false. end if - if(.not. single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(.not.associated(f_cursor % array)) then - allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2), f_cursor % dimSizes(3), f_cursor % dimSizes(4))) - end if - f_cursor => f_cursor % next - end do + if (present(init_array_in)) then + init_array = init_array_in else - if(.not.associated(f % array)) then - allocate(f % array(f % dimSizes(1), f % dimSizes(2), f % dimSizes(3), f % dimSizes(4))) + init_array = .true. + end if + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not. single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(.not.associated(f_cursor % array)) then + allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2), f_cursor % dimSizes(3), f_cursor % dimSizes(4))) + if ( init_array ) then + f_cursor % array(:, :, :, :) = f_cursor % defaultValue + end if + end if + f_cursor => f_cursor % next + end do + else + if(.not.associated(f % array)) then + allocate(f % array(f % dimSizes(1), f % dimSizes(2), f % dimSizes(3), f % dimSizes(4))) + if ( init_array ) then + f % array(:, :, :, :) = f % defaultValue + end if + end if end if end if @@ -426,11 +555,13 @@ end subroutine mpas_allocate_scratch_field4d_real!}}} !> This routine allocates a 5D scratch real field. ! !----------------------------------------------------------------------- - subroutine mpas_allocate_scratch_field5d_real(f, single_block_in)!{{{ + subroutine mpas_allocate_scratch_field5d_real(f, single_block_in, init_array_in)!{{{ type (field5dReal), pointer :: f !< Input: Field to allocate logical, intent(in), optional :: single_block_in !< Input: Logical flag that determines if a single block should be allocated, or all blocks. - logical :: single_block + logical, intent(in), optional :: init_array_in !< Input: Logical flag that determines if allocated arrays are initialized + logical :: single_block, init_array type (field5dReal), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -442,17 +573,33 @@ subroutine mpas_allocate_scratch_field5d_real(f, single_block_in)!{{{ single_block = .false. end if - if(.not. single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(.not.associated(f_cursor % array)) then - allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2), f_cursor % dimSizes(3), f_cursor % dimSizes(4), f_cursor % dimSizes(5))) - end if - f_cursor => f_cursor % next - end do + if (present(init_array_in)) then + init_array = init_array_in else - if(.not.associated(f % array)) then - allocate(f % array(f % dimSizes(1), f % dimSizes(2), f % dimSizes(3), f % dimSizes(4), f % dimSizes(5))) + init_array = .true. + end if + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not. single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(.not.associated(f_cursor % array)) then + allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2), f_cursor % dimSizes(3), f_cursor % dimSizes(4), f_cursor % dimSizes(5))) + if ( init_array ) then + f_cursor % array(:, :, :, :, :) = f_cursor % defaultValue + end if + end if + f_cursor => f_cursor % next + end do + else + if(.not.associated(f % array)) then + allocate(f % array(f % dimSizes(1), f % dimSizes(2), f % dimSizes(3), f % dimSizes(4), f % dimSizes(5))) + if ( init_array ) then + f % array(:, :, :, :, :) = f % defaultValue + end if + end if end if end if @@ -470,11 +617,13 @@ end subroutine mpas_allocate_scratch_field5d_real!}}} !> This routine allocates a 1D scratch character field. ! !----------------------------------------------------------------------- - subroutine mpas_allocate_scratch_field1d_char(f, single_block_in)!{{{ + subroutine mpas_allocate_scratch_field1d_char(f, single_block_in, init_array_in)!{{{ type (field1dChar), pointer :: f !< Input: Field to allocate logical, intent(in), optional :: single_block_in !< Input: Logical flag that determines if a single block should be allocated, or all blocks. - logical :: single_block + logical, intent(in), optional :: init_array_in !< Input: Logical flag that determines if allocated arrays are initialized + logical :: single_block, init_array type (field1dChar), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -486,17 +635,33 @@ subroutine mpas_allocate_scratch_field1d_char(f, single_block_in)!{{{ single_block = .false. end if - if(.not. single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(.not.associated(f_cursor % array)) then - allocate(f_cursor % array(f_cursor % dimSizes(1))) - end if - f_cursor => f_cursor % next - end do + if (present(init_array_in)) then + init_array = init_array_in else - if(.not.associated(f % array)) then - allocate(f % array(f % dimSizes(1))) + init_array = .true. + end if + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not. single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(.not.associated(f_cursor % array)) then + allocate(f_cursor % array(f_cursor % dimSizes(1))) + if ( init_array ) then + f_cursor % array(:) = f_cursor % defaultValue + end if + end if + f_cursor => f_cursor % next + end do + else + if(.not.associated(f % array)) then + allocate(f % array(f % dimSizes(1))) + if ( init_array ) then + f % array(:) = f % defaultValue + end if + end if end if end if @@ -519,6 +684,7 @@ subroutine mpas_deallocate_scratch_field1d_integer(f, single_block_in)!{{{ logical, intent(in), optional :: single_block_in !< Input: Logical that determines if a single block should be deallocated, or all blocks. logical :: single_block type (field1dInteger), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -530,18 +696,22 @@ subroutine mpas_deallocate_scratch_field1d_integer(f, single_block_in)!{{{ single_block = .false. end if - if(.not.single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not.single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if - f_cursor => f_cursor % next - end do - else - if(associated(f % array)) then - deallocate(f % array) + f_cursor => f_cursor % next + end do + else + if(associated(f % array)) then + deallocate(f % array) + end if end if end if @@ -564,6 +734,7 @@ subroutine mpas_deallocate_scratch_field2d_integer(f, single_block_in)!{{{ logical, intent(in), optional :: single_block_in !< Input: Logical that determines if a single block should be deallocated, or all blocks. logical :: single_block type (field2dInteger), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -575,18 +746,22 @@ subroutine mpas_deallocate_scratch_field2d_integer(f, single_block_in)!{{{ single_block = .false. end if - if(.not.single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not.single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if - f_cursor => f_cursor % next - end do - else - if(associated(f % array)) then - deallocate(f % array) + f_cursor => f_cursor % next + end do + else + if(associated(f % array)) then + deallocate(f % array) + end if end if end if @@ -609,6 +784,7 @@ subroutine mpas_deallocate_scratch_field3d_integer(f, single_block_in)!{{{ logical, intent(in), optional :: single_block_in !< Input: Logical that determines if a single block should be deallocated, or all blocks. logical :: single_block type (field3dInteger), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -620,18 +796,22 @@ subroutine mpas_deallocate_scratch_field3d_integer(f, single_block_in)!{{{ single_block = .false. end if - if(.not.single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not.single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if - f_cursor => f_cursor % next - end do - else - if(associated(f % array)) then - deallocate(f % array) + f_cursor => f_cursor % next + end do + else + if(associated(f % array)) then + deallocate(f % array) + end if end if end if @@ -654,6 +834,7 @@ subroutine mpas_deallocate_scratch_field1d_real(f, single_block_in)!{{{ logical, intent(in), optional :: single_block_in !< Input: Logical that determines if a single block should be deallocated, or all blocks. logical :: single_block type (field1dReal), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -665,18 +846,22 @@ subroutine mpas_deallocate_scratch_field1d_real(f, single_block_in)!{{{ single_block = .false. end if - if(.not.single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not.single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if - f_cursor => f_cursor % next - end do - else - if(associated(f % array)) then - deallocate(f % array) + f_cursor => f_cursor % next + end do + else + if(associated(f % array)) then + deallocate(f % array) + end if end if end if @@ -699,6 +884,7 @@ subroutine mpas_deallocate_scratch_field2d_real(f, single_block_in)!{{{ logical, intent(in), optional :: single_block_in !< Input: Logical that determines if a single block should be deallocated, or all blocks. logical :: single_block type (field2dReal), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -710,18 +896,22 @@ subroutine mpas_deallocate_scratch_field2d_real(f, single_block_in)!{{{ single_block = .false. end if - if(.not.single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not.single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if - f_cursor => f_cursor % next - end do - else - if(associated(f % array)) then - deallocate(f % array) + f_cursor => f_cursor % next + end do + else + if(associated(f % array)) then + deallocate(f % array) + end if end if end if @@ -744,6 +934,7 @@ subroutine mpas_deallocate_scratch_field3d_real(f, single_block_in)!{{{ logical, intent(in), optional :: single_block_in !< Input: Logical that determines if a single block should be deallocated, or all blocks. logical :: single_block type (field3dReal), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -755,18 +946,22 @@ subroutine mpas_deallocate_scratch_field3d_real(f, single_block_in)!{{{ single_block = .false. end if - if(.not.single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not.single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if - f_cursor => f_cursor % next - end do - else - if(associated(f % array)) then - deallocate(f % array) + f_cursor => f_cursor % next + end do + else + if(associated(f % array)) then + deallocate(f % array) + end if end if end if @@ -789,6 +984,7 @@ subroutine mpas_deallocate_scratch_field4d_real(f, single_block_in)!{{{ logical, intent(in), optional :: single_block_in !< Input: Logical that determines if a single block should be deallocated, or all blocks. logical :: single_block type (field4dReal), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -800,18 +996,22 @@ subroutine mpas_deallocate_scratch_field4d_real(f, single_block_in)!{{{ single_block = .false. end if - if(.not.single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not.single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if - f_cursor => f_cursor % next - end do - else - if(associated(f % array)) then - deallocate(f % array) + f_cursor => f_cursor % next + end do + else + if(associated(f % array)) then + deallocate(f % array) + end if end if end if @@ -834,6 +1034,7 @@ subroutine mpas_deallocate_scratch_field5d_real(f, single_block_in)!{{{ logical, intent(in), optional :: single_block_in !< Input: Logical that determines if a single block should be deallocated, or all blocks. logical :: single_block type (field5dReal), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -845,18 +1046,22 @@ subroutine mpas_deallocate_scratch_field5d_real(f, single_block_in)!{{{ single_block = .false. end if - if(.not.single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not.single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if - f_cursor => f_cursor % next - end do - else - if(associated(f % array)) then - deallocate(f % array) + f_cursor => f_cursor % next + end do + else + if(associated(f % array)) then + deallocate(f % array) + end if end if end if @@ -879,6 +1084,7 @@ subroutine mpas_deallocate_scratch_field1d_char(f, single_block_in)!{{{ logical, intent(in), optional :: single_block_in !< Input: Logical that determines if a single block should be deallocated, or all blocks. logical :: single_block type (field1dChar), pointer :: f_cursor + integer :: threadNum if(f % isPersistent) then return @@ -890,18 +1096,22 @@ subroutine mpas_deallocate_scratch_field1d_char(f, single_block_in)!{{{ single_block = .false. end if - if(.not.single_block) then - f_cursor => f - do while(associated(f_cursor)) - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if(.not.single_block) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if - f_cursor => f_cursor % next - end do - else - if(associated(f % array)) then - deallocate(f % array) + f_cursor => f_cursor % next + end do + else + if(associated(f % array)) then + deallocate(f % array) + end if end if end if @@ -922,19 +1132,32 @@ end subroutine mpas_deallocate_scratch_field1d_char!}}} subroutine mpas_deallocate_field0d_logical(f)!{{{ type (field0dLogical), pointer :: f !< Input: Field to deallocate type (field0dLogical), pointer :: f_cursor + integer :: threadNum + integer :: i, iErr - f_cursor => f - - do while(associated(f_cursor)) - if(associated(f % next)) then - f => f % next - else - nullify(f) - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + f_cursor => f + + do while(associated(f_cursor)) + if(associated(f % next)) then + f => f % next + else + nullify(f) + end if + + if ( associated(f_cursor % attLists) ) then + do i = 1, size(f_cursor % attLists, dim=1) + call mpas_deallocate_attlist(f_cursor % attLists(i) % attList, iErr) + end do + deallocate(f_cursor % attLists) + end if - deallocate(f_cursor) - f_cursor => f - end do + deallocate(f_cursor) + f_cursor => f + end do + end if end subroutine mpas_deallocate_field0d_logical!}}} @@ -953,19 +1176,32 @@ end subroutine mpas_deallocate_field0d_logical!}}} subroutine mpas_deallocate_field0d_integer(f)!{{{ type (field0dInteger), pointer :: f !< Input: Field to deallocate type (field0dInteger), pointer :: f_cursor + integer :: threadNum + integer :: i, iErr - f_cursor => f - - do while(associated(f_cursor)) - if(associated(f % next)) then - f => f % next - else - nullify(f) - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + f_cursor => f + + do while(associated(f_cursor)) + if(associated(f % next)) then + f => f % next + else + nullify(f) + end if + + if ( associated(f_cursor % attLists) ) then + do i = 1, size(f_cursor % attLists, dim=1) + call mpas_deallocate_attlist(f_cursor % attLists(i) % attList, iErr) + end do + deallocate(f_cursor % attLists) + end if - deallocate(f_cursor) - f_cursor => f - end do + deallocate(f_cursor) + f_cursor => f + end do + end if end subroutine mpas_deallocate_field0d_integer!}}} @@ -984,23 +1220,36 @@ end subroutine mpas_deallocate_field0d_integer!}}} subroutine mpas_deallocate_field1d_integer(f)!{{{ type (field1dInteger), pointer :: f !< Input: Field to deallocate type (field1dInteger), pointer :: f_cursor + integer :: threadNum + integer :: i, iErr - f_cursor => f - do while(associated(f_cursor)) - if(associated(f % next)) then - f => f % next - else - nullify(f) - end if - - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + threadNum = mpas_threading_get_thread_num() - deallocate(f_cursor) + if ( threadNum == 0 ) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f % next)) then + f => f % next + else + nullify(f) + end if + + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if + + if ( associated(f_cursor % attLists) ) then + do i = 1, size(f_cursor % attLists, dim=1) + call mpas_deallocate_attlist(f_cursor % attLists(i) % attList, iErr) + end do + deallocate(f_cursor % attLists) + end if - f_cursor => f - end do + deallocate(f_cursor) + + f_cursor => f + end do + end if end subroutine mpas_deallocate_field1d_integer!}}} @@ -1019,23 +1268,36 @@ end subroutine mpas_deallocate_field1d_integer!}}} subroutine mpas_deallocate_field2d_integer(f)!{{{ type (field2dInteger), pointer :: f !< Input: Field to deallocate type (field2dInteger), pointer :: f_cursor + integer :: threadNum + integer :: i, iErr - f_cursor => f - do while(associated(f_cursor)) - if(associated(f % next)) then - f => f % next - else - nullify(f) - end if - - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + threadNum = mpas_threading_get_thread_num() - deallocate(f_cursor) + if ( threadNum == 0 ) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f % next)) then + f => f % next + else + nullify(f) + end if + + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if + + if ( associated(f_cursor % attLists) ) then + do i = 1, size(f_cursor % attLists, dim=1) + call mpas_deallocate_attlist(f_cursor % attLists(i) % attList, iErr) + end do + deallocate(f_cursor % attLists) + end if - f_cursor => f - end do + deallocate(f_cursor) + + f_cursor => f + end do + end if end subroutine mpas_deallocate_field2d_integer!}}} @@ -1054,23 +1316,36 @@ end subroutine mpas_deallocate_field2d_integer!}}} subroutine mpas_deallocate_field3d_integer(f)!{{{ type (field3dInteger), pointer :: f !< Input: Field to deallocate type (field3dInteger), pointer :: f_cursor + integer :: threadNum + integer :: i, iErr - f_cursor => f - do while(associated(f_cursor)) - if(associated(f % next)) then - f => f % next - else - nullify(f) - end if - - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + threadNum = mpas_threading_get_thread_num() - deallocate(f_cursor) + if ( threadNum == 0 ) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f % next)) then + f => f % next + else + nullify(f) + end if + + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if + + if ( associated(f_cursor % attLists) ) then + do i = 1, size(f_cursor % attLists, dim=1) + call mpas_deallocate_attlist(f_cursor % attLists(i) % attList, iErr) + end do + deallocate(f_cursor % attLists) + end if - f_cursor => f - end do + deallocate(f_cursor) + + f_cursor => f + end do + end if end subroutine mpas_deallocate_field3d_integer!}}} @@ -1089,20 +1364,33 @@ end subroutine mpas_deallocate_field3d_integer!}}} subroutine mpas_deallocate_field0d_real(f)!{{{ type (field0dReal), pointer :: f !< Input: Field to deallocate type (field0dReal), pointer :: f_cursor + integer :: threadNum + integer :: i, iErr - f_cursor => f + threadNum = mpas_threading_get_thread_num() - do while(associated(f_cursor)) - if(associated(f % next)) then - f => f % next - else - nullify(f) - end if + f_cursor => f - deallocate(f_cursor) + if ( threadNum == 0 ) then + do while(associated(f_cursor)) + if(associated(f % next)) then + f => f % next + else + nullify(f) + end if + + if ( associated(f_cursor % attLists) ) then + do i = 1, size(f_cursor % attLists, dim=1) + call mpas_deallocate_attlist(f_cursor % attLists(i) % attList, iErr) + end do + deallocate(f_cursor % attLists) + end if - f_cursor => f - end do + deallocate(f_cursor) + + f_cursor => f + end do + end if end subroutine mpas_deallocate_field0d_real!}}} @@ -1121,23 +1409,36 @@ end subroutine mpas_deallocate_field0d_real!}}} subroutine mpas_deallocate_field1d_real(f)!{{{ type (field1dReal), pointer :: f !< Input: Field to deallocate type (field1dReal), pointer :: f_cursor + integer :: threadNum + integer :: i, iErr - f_cursor => f - do while(associated(f_cursor)) - if(associated(f % next)) then - f => f % next - else - nullify(f) - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f % next)) then + f => f % next + else + nullify(f) + end if + + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + if ( associated(f_cursor % attLists) ) then + do i = 1, size(f_cursor % attLists, dim=1) + call mpas_deallocate_attlist(f_cursor % attLists(i) % attList, iErr) + end do + deallocate(f_cursor % attLists) + end if - deallocate(f_cursor) + deallocate(f_cursor) - f_cursor => f - end do + f_cursor => f + end do + end if end subroutine mpas_deallocate_field1d_real!}}} @@ -1152,27 +1453,40 @@ end subroutine mpas_deallocate_field1d_real!}}} !> \details !> This routine deallocates a 2D real field. ! - +!----------------------------------------------------------------------- subroutine mpas_deallocate_field2d_real(f)!{{{ type (field2dReal), pointer :: f !< Input: Field to deallocate type (field2dReal), pointer :: f_cursor + integer :: threadNum + integer :: i, iErr - f_cursor => f - do while(associated(f_cursor)) - if(associated(f % next)) then - f => f % next - else - nullify(f) - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f % next)) then + f => f % next + else + nullify(f) + end if + + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + if ( associated(f_cursor % attLists) ) then + do i = 1, size(f_cursor % attLists, dim=1) + call mpas_deallocate_attlist(f_cursor % attLists(i) % attList, iErr) + end do + deallocate(f_cursor % attLists) + end if - deallocate(f_cursor) + deallocate(f_cursor) - f_cursor => f - end do + f_cursor => f + end do + end if end subroutine mpas_deallocate_field2d_real!}}} @@ -1191,23 +1505,36 @@ end subroutine mpas_deallocate_field2d_real!}}} subroutine mpas_deallocate_field3d_real(f)!{{{ type (field3dReal), pointer :: f !< Input: Field to deallocate type (field3dReal), pointer :: f_cursor + integer :: threadNum + integer :: i, iErr - f_cursor => f - do while(associated(f_cursor)) - if(associated(f % next)) then - f => f % next - else - nullify(f) - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f % next)) then + f => f % next + else + nullify(f) + end if + + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + if ( associated(f_cursor % attLists) ) then + do i = 1, size(f_cursor % attLists, dim=1) + call mpas_deallocate_attlist(f_cursor % attLists(i) % attList, iErr) + end do + deallocate(f_cursor % attLists) + end if - deallocate(f_cursor) + deallocate(f_cursor) - f_cursor => f - end do + f_cursor => f + end do + end if end subroutine mpas_deallocate_field3d_real!}}} @@ -1226,23 +1553,36 @@ end subroutine mpas_deallocate_field3d_real!}}} subroutine mpas_deallocate_field4d_real(f)!{{{ type (field4dReal), pointer :: f !< Input: Field to deallocate type (field4dReal), pointer :: f_cursor + integer :: threadNum + integer :: i, iErr - f_cursor => f - do while(associated(f_cursor)) - if(associated(f % next)) then - f => f % next - else - nullify(f) - end if + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f % next)) then + f => f % next + else + nullify(f) + end if - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if + + if ( associated(f_cursor % attLists) ) then + do i = 1, size(f_cursor % attLists, dim=1) + call mpas_deallocate_attlist(f_cursor % attLists(i) % attList, iErr) + end do + deallocate(f_cursor % attLists) + end if - deallocate(f_cursor) + deallocate(f_cursor) - f_cursor => f - end do + f_cursor => f + end do + end if end subroutine mpas_deallocate_field4d_real!}}} @@ -1261,23 +1601,36 @@ end subroutine mpas_deallocate_field4d_real!}}} subroutine mpas_deallocate_field5d_real(f)!{{{ type (field5dReal), pointer :: f !< Input: Field to deallocate type (field5dReal), pointer :: f_cursor + integer :: threadNum + integer :: i, iErr - f_cursor => f - do while(associated(f_cursor)) - if(associated(f % next)) then - f => f % next - else - nullify(f) - end if + threadNum = mpas_threading_get_thread_num() - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + if ( threadNum == 0 ) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f % next)) then + f => f % next + else + nullify(f) + end if + + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if - deallocate(f_cursor) + if ( associated(f_cursor % attLists) ) then + do i = 1, size(f_cursor % attLists, dim=1) + call mpas_deallocate_attlist(f_cursor % attLists(i) % attList, iErr) + end do + deallocate(f_cursor % attLists) + end if - f_cursor => f - end do + deallocate(f_cursor) + + f_cursor => f + end do + end if end subroutine mpas_deallocate_field5d_real!}}} @@ -1296,19 +1649,32 @@ end subroutine mpas_deallocate_field5d_real!}}} subroutine mpas_deallocate_field0d_char(f)!{{{ type (field0dChar), pointer :: f !< Input: Field to deallocate type (field0dChar), pointer :: f_cursor + integer :: threadNum + integer :: i, iErr - f_cursor => f + threadNum = mpas_threading_get_thread_num() - do while(associated(f_cursor)) - if(associated(f % next)) then - f => f % next - else - nullify(f) - end if + if ( threadNum == 0 ) then + f_cursor => f + + do while(associated(f_cursor)) + if(associated(f % next)) then + f => f % next + else + nullify(f) + end if + + if ( associated(f_cursor % attLists) ) then + do i = 1, size(f_cursor % attLists, dim=1) + call mpas_deallocate_attlist(f_cursor % attLists(i) % attList, iErr) + end do + deallocate(f_cursor % attLists) + end if - deallocate(f_cursor) - f_cursor => f - end do + deallocate(f_cursor) + f_cursor => f + end do + end if end subroutine mpas_deallocate_field0d_char!}}} @@ -1327,23 +1693,36 @@ end subroutine mpas_deallocate_field0d_char!}}} subroutine mpas_deallocate_field1d_char(f)!{{{ type (field1dChar), pointer :: f !< Input: Field to deallocate type (field1dChar), pointer :: f_cursor + integer :: threadNum + integer :: i, iErr - f_cursor => f - do while(associated(f_cursor)) - if(associated(f % next)) then - f => f % next - else - nullify(f) - end if + threadNum = mpas_threading_get_thread_num() - if(associated(f_cursor % array)) then - deallocate(f_cursor % array) - end if + if ( threadNum == 0 ) then + f_cursor => f + do while(associated(f_cursor)) + if(associated(f % next)) then + f => f % next + else + nullify(f) + end if + + if(associated(f_cursor % array)) then + deallocate(f_cursor % array) + end if + + if ( associated(f_cursor % attLists) ) then + do i = 1, size(f_cursor % attLists, dim=1) + call mpas_deallocate_attlist(f_cursor % attLists(i) % attList, iErr) + end do + deallocate(f_cursor % attLists) + end if - deallocate(f_cursor) + deallocate(f_cursor) - f_cursor => f - end do + f_cursor => f + end do + end if end subroutine mpas_deallocate_field1d_char!}}} @@ -1368,8 +1747,13 @@ subroutine mpas_allocate_mold_1dreal(dst, src)!{{{ real(kind=RKIND), dimension(:), pointer :: dst real(kind=RKIND), dimension(:), intent(in) :: src + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() - allocate(dst(size(src))) + if ( threadNum == 0 ) then + allocate(dst(size(src))) + end if end subroutine mpas_allocate_mold_1dreal!}}} @@ -1396,10 +1780,15 @@ subroutine mpas_allocate_mold_2dreal(dst, src)!{{{ real(kind=RKIND), dimension(:,:), intent(in) :: src integer, dimension(2) :: dims + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() dims = shape(src) - allocate(dst(dims(1),dims(2))) + if ( threadNum == 0 ) then + allocate(dst(dims(1),dims(2))) + end if end subroutine mpas_allocate_mold_2dreal!}}} @@ -1426,10 +1815,15 @@ subroutine mpas_allocate_mold_3dreal(dst, src)!{{{ real(kind=RKIND), dimension(:,:,:), intent(in) :: src integer, dimension(3) :: dims + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() dims = shape(src) - allocate(dst(dims(1),dims(2),dims(3))) + if ( threadNum == 0 ) then + allocate(dst(dims(1),dims(2),dims(3))) + end if end subroutine mpas_allocate_mold_3dreal!}}} @@ -1456,10 +1850,15 @@ subroutine mpas_allocate_mold_4dreal(dst, src)!{{{ real(kind=RKIND), dimension(:,:,:,:), intent(in) :: src integer, dimension(4) :: dims + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() dims = shape(src) - allocate(dst(dims(1),dims(2),dims(3),dims(4))) + if ( threadNum == 0 ) then + allocate(dst(dims(1),dims(2),dims(3),dims(4))) + end if end subroutine mpas_allocate_mold_4dreal!}}} @@ -1486,10 +1885,15 @@ subroutine mpas_allocate_mold_5dreal(dst, src)!{{{ real(kind=RKIND), dimension(:,:,:,:,:), intent(in) :: src integer, dimension(5) :: dims + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() dims = shape(src) - allocate(dst(dims(1),dims(2),dims(3),dims(4),dims(5))) + if ( threadNum == 0 ) then + allocate(dst(dims(1),dims(2),dims(3),dims(4),dims(5))) + end if end subroutine mpas_allocate_mold_5dreal!}}} @@ -1514,8 +1918,13 @@ subroutine mpas_allocate_mold_1dinteger(dst, src)!{{{ integer, dimension(:), pointer :: dst integer, dimension(:), intent(in) :: src + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() - allocate(dst(size(src))) + if ( threadNum == 0 ) then + allocate(dst(size(src))) + end if end subroutine mpas_allocate_mold_1dinteger!}}} @@ -1542,10 +1951,15 @@ subroutine mpas_allocate_mold_2dinteger(dst, src)!{{{ integer, dimension(:,:), intent(in) :: src integer, dimension(2) :: dims + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() dims = shape(src) - allocate(dst(dims(1),dims(2))) + if ( threadNum == 0 ) then + allocate(dst(dims(1),dims(2))) + end if end subroutine mpas_allocate_mold_2dinteger!}}} @@ -1572,10 +1986,15 @@ subroutine mpas_allocate_mold_3dinteger(dst, src)!{{{ integer, dimension(:,:,:), intent(in) :: src integer, dimension(3) :: dims + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() dims = shape(src) - allocate(dst(dims(1),dims(2),dims(3))) + if ( threadNum == 0 ) then + allocate(dst(dims(1),dims(2),dims(3))) + end if end subroutine mpas_allocate_mold_3dinteger!}}} @@ -1600,11 +2019,15 @@ subroutine mpas_allocate_mold_1dchar(dst, src)!{{{ character(len=StrKIND), dimension(:), pointer :: dst character(len=StrKIND), dimension(:), intent(in) :: src + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() - allocate(dst(size(src))) + if ( threadNum == 0 ) then + allocate(dst(size(src))) + end if - end subroutine mpas_allocate_mold_1dchar -!}}} + end subroutine mpas_allocate_mold_1dchar!}}} !*********************************************************************** ! @@ -1627,6 +2050,7 @@ subroutine mpas_duplicate_field0d_real(src, dst, copy_array_only) !{{{ type (field0DReal), pointer :: src_cursor, dst_cursor logical :: local_copy_only + integer :: threadNum, iConstituent #include "duplicate_field_scalar.inc" @@ -1654,6 +2078,7 @@ subroutine mpas_duplicate_field1d_real(src, dst, copy_array_only) !{{{ type (field1DReal), pointer :: src_cursor, dst_cursor logical :: local_copy_only + integer :: threadNum, iConstituent #include "duplicate_field_array.inc" @@ -1681,6 +2106,7 @@ subroutine mpas_duplicate_field2d_real(src, dst, copy_array_only) !{{{ type (field2DReal), pointer :: src_cursor, dst_cursor logical :: local_copy_only + integer :: threadNum, iConstituent #include "duplicate_field_array.inc" @@ -1708,6 +2134,7 @@ subroutine mpas_duplicate_field3d_real(src, dst, copy_array_only) !{{{ type (field3DReal), pointer :: src_cursor, dst_cursor logical :: local_copy_only + integer :: threadNum, iConstituent #include "duplicate_field_array.inc" @@ -1735,6 +2162,7 @@ subroutine mpas_duplicate_field4d_real(src, dst, copy_array_only) !{{{ type (field4DReal), pointer :: src_cursor, dst_cursor logical :: local_copy_only + integer :: threadNum, iConstituent #include "duplicate_field_array.inc" @@ -1762,6 +2190,7 @@ subroutine mpas_duplicate_field5d_real(src, dst, copy_array_only) !{{{ type (field5DReal), pointer :: src_cursor, dst_cursor logical :: local_copy_only + integer :: threadNum, iConstituent #include "duplicate_field_array.inc" @@ -1789,6 +2218,7 @@ subroutine mpas_duplicate_field0d_integer(src, dst, copy_array_only) !{{{ type (field0DInteger), pointer :: src_cursor, dst_cursor logical :: local_copy_only + integer :: threadNum, iConstituent #include "duplicate_field_scalar.inc" @@ -1816,6 +2246,7 @@ subroutine mpas_duplicate_field1d_integer(src, dst, copy_array_only) !{{{ type (field1DInteger), pointer :: src_cursor, dst_cursor logical :: local_copy_only + integer :: threadNum, iConstituent #include "duplicate_field_array.inc" @@ -1843,6 +2274,7 @@ subroutine mpas_duplicate_field2d_integer(src, dst, copy_array_only) !{{{ type (field2DInteger), pointer :: src_cursor, dst_cursor logical :: local_copy_only + integer :: threadNum, iConstituent #include "duplicate_field_array.inc" @@ -1870,6 +2302,7 @@ subroutine mpas_duplicate_field3d_integer(src, dst, copy_array_only) !{{{ type (field3DInteger), pointer :: src_cursor, dst_cursor logical :: local_copy_only + integer :: threadNum, iConstituent #include "duplicate_field_array.inc" @@ -1897,6 +2330,7 @@ subroutine mpas_duplicate_field0d_char(src, dst, copy_array_only) !{{{ type (field0DChar), pointer :: src_cursor, dst_cursor logical :: local_copy_only + integer :: threadNum, iConstituent #include "duplicate_field_scalar.inc" @@ -1924,6 +2358,7 @@ subroutine mpas_duplicate_field1d_char(src, dst, copy_array_only) !{{{ type (field1DChar), pointer :: src_cursor, dst_cursor logical :: local_copy_only + integer :: threadNum, iConstituent #include "duplicate_field_array.inc" @@ -1951,6 +2386,7 @@ subroutine mpas_duplicate_field0d_logical(src, dst, copy_array_only) !{{{ type (field0DLogical), pointer :: src_cursor, dst_cursor logical :: local_copy_only + integer :: threadNum, iConstituent #include "duplicate_field_scalar.inc" @@ -1977,7 +2413,7 @@ subroutine mpas_shift_time_levs_0dreal(fldarr)!{{{ type (field0DReal), dimension(:), pointer :: fldarr - integer :: i, nlevs + integer :: i, nlevs, threadNum type (field0DReal), dimension(:), pointer :: fldarr_ptr real(kind=RKIND) :: scalar @@ -2006,7 +2442,7 @@ subroutine mpas_shift_time_levs_1dreal(fldarr)!{{{ type (field1DReal), dimension(:), pointer :: fldarr - integer :: i, nlevs + integer :: i, nlevs, threadNum type (field1DReal), dimension(:), pointer :: fldarr_ptr real(kind=RKIND), dimension(:), pointer :: arr_ptr @@ -2035,7 +2471,7 @@ subroutine mpas_shift_time_levs_2dreal(fldarr)!{{{ type (field2DReal), dimension(:), pointer :: fldarr - integer :: i, nlevs + integer :: i, nlevs, threadNum type (field2DReal), dimension(:), pointer :: fldarr_ptr real(kind=RKIND), dimension(:,:), pointer :: arr_ptr @@ -2064,7 +2500,7 @@ subroutine mpas_shift_time_levs_3dreal(fldarr)!{{{ type (field3DReal), dimension(:), pointer :: fldarr - integer :: i, nlevs + integer :: i, nlevs, threadNum type (field3DReal), dimension(:), pointer :: fldarr_ptr real(kind=RKIND), dimension(:,:,:), pointer :: arr_ptr @@ -2093,7 +2529,7 @@ subroutine mpas_shift_time_levs_4dreal(fldarr)!{{{ type (field4DReal), dimension(:), pointer :: fldarr - integer :: i, nlevs + integer :: i, nlevs, threadNum type (field4DReal), dimension(:), pointer :: fldarr_ptr real(kind=RKIND), dimension(:,:,:,:), pointer :: arr_ptr @@ -2122,7 +2558,7 @@ subroutine mpas_shift_time_levs_5dreal(fldarr)!{{{ type (field5DReal), dimension(:), pointer :: fldarr - integer :: i, nlevs + integer :: i, nlevs, threadNum type (field5DReal), dimension(:), pointer :: fldarr_ptr real(kind=RKIND), dimension(:,:,:,:,:), pointer :: arr_ptr @@ -2151,7 +2587,7 @@ subroutine mpas_shift_time_levs_0dinteger(fldarr)!{{{ type (field0DInteger), dimension(:), pointer :: fldarr - integer :: i, nlevs + integer :: i, nlevs, threadNum type (field0DInteger), dimension(:), pointer :: fldarr_ptr integer :: scalar @@ -2180,7 +2616,7 @@ subroutine mpas_shift_time_levs_1dinteger(fldarr)!{{{ type (field1DInteger), dimension(:), pointer :: fldarr - integer :: i, nlevs + integer :: i, nlevs, threadNum type (field1DInteger), dimension(:), pointer :: fldarr_ptr integer, dimension(:), pointer :: arr_ptr @@ -2209,7 +2645,7 @@ subroutine mpas_shift_time_levs_2dinteger(fldarr)!{{{ type (field2DInteger), dimension(:), pointer :: fldarr - integer :: i, nlevs + integer :: i, nlevs, threadNum type (field2DInteger), dimension(:), pointer :: fldarr_ptr integer, dimension(:,:), pointer :: arr_ptr @@ -2238,7 +2674,7 @@ subroutine mpas_shift_time_levs_3dinteger(fldarr)!{{{ type (field3DInteger), dimension(:), pointer :: fldarr - integer :: i, nlevs + integer :: i, nlevs, threadNum type (field3DInteger), dimension(:), pointer :: fldarr_ptr integer, dimension(:,:,:), pointer :: arr_ptr @@ -2267,7 +2703,7 @@ subroutine mpas_shift_time_levs_0dchar(fldarr)!{{{ type (field0DChar), dimension(:), pointer :: fldarr - integer :: i, nlevs + integer :: i, nlevs, threadNum type (field0DChar), dimension(:), pointer :: fldarr_ptr character (len=StrKIND) :: scalar @@ -2296,7 +2732,7 @@ subroutine mpas_shift_time_levs_1dchar(fldarr)!{{{ type (field1DChar), dimension(:), pointer :: fldarr - integer :: i, nlevs + integer :: i, nlevs, threadNum type (field1DChar), dimension(:), pointer :: fldarr_ptr character (len=StrKIND), dimension(:), pointer :: arr_ptr @@ -2325,7 +2761,7 @@ subroutine mpas_shift_time_levs_0dlogical(fldarr)!{{{ type (field0DLogical), dimension(:), pointer :: fldarr - integer :: i, nlevs + integer :: i, nlevs, threadNum type (field0DLogical), dimension(:), pointer :: fldarr_ptr logical :: scalar diff --git a/src/framework/mpas_field_types.inc b/src/framework/mpas_field_types.inc index c8b333de90..7940728041 100644 --- a/src/framework/mpas_field_types.inc +++ b/src/framework/mpas_field_types.inc @@ -21,12 +21,13 @@ character (len=StrKIND), dimension(5) :: dimNames integer, dimension(5) :: dimSizes real (kind=RKIND) :: defaultValue + real (kind=RKIND) :: missingValue logical :: isDecomposed logical :: hasTimeDimension logical :: isActive logical :: isVarArray logical :: isPersistent - type (att_list_type), pointer :: attList => null() + type (att_lists_type), dimension(:), pointer :: attLists => null() ! Pointers to the prev and next blocks for this field on this task type (field5DReal), pointer :: prev, next @@ -53,12 +54,13 @@ character (len=StrKIND), dimension(4) :: dimNames integer, dimension(4) :: dimSizes real (kind=RKIND) :: defaultValue + real (kind=RKIND) :: missingValue logical :: isDecomposed logical :: hasTimeDimension logical :: isActive logical :: isVarArray logical :: isPersistent - type (att_list_type), pointer :: attList => null() + type (att_lists_type), dimension(:), pointer :: attLists => null() ! Pointers to the prev and next blocks for this field on this task type (field4DReal), pointer :: prev, next @@ -86,12 +88,13 @@ character (len=StrKIND), dimension(3) :: dimNames integer, dimension(3) :: dimSizes real (kind=RKIND) :: defaultValue + real (kind=RKIND) :: missingValue logical :: isDecomposed logical :: hasTimeDimension logical :: isActive logical :: isVarArray logical :: isPersistent - type (att_list_type), pointer :: attList => null() + type (att_lists_type), dimension(:), pointer :: attLists => null() ! Pointers to the prev and next blocks for this field on this task type (field3DReal), pointer :: prev, next @@ -118,12 +121,13 @@ character (len=StrKIND), dimension(2) :: dimNames integer, dimension(2) :: dimSizes real (kind=RKIND) :: defaultValue + real (kind=RKIND) :: missingValue logical :: isDecomposed logical :: hasTimeDimension logical :: isActive logical :: isVarArray logical :: isPersistent - type (att_list_type), pointer :: attList => null() + type (att_lists_type), dimension(:), pointer :: attLists => null() ! Pointers to the prev and next blocks for this field on this task type (field2DReal), pointer :: prev, next @@ -150,12 +154,13 @@ character (len=StrKIND), dimension(1) :: dimNames integer, dimension(1) :: dimSizes real (kind=RKIND) :: defaultValue + real (kind=RKIND) :: missingValue logical :: isDecomposed logical :: hasTimeDimension logical :: isActive logical :: isVarArray logical :: isPersistent - type (att_list_type), pointer :: attList => null() + type (att_lists_type), dimension(:), pointer :: attLists => null() ! Pointers to the prev and next blocks for this field on this task type (field1DReal), pointer :: prev, next @@ -180,11 +185,12 @@ character (len=StrKIND) :: fieldName character (len=StrKIND), dimension(:), pointer :: constituentNames => null() real (kind=RKIND) :: defaultValue + real (kind=RKIND) :: missingValue logical :: isDecomposed logical :: hasTimeDimension logical :: isActive logical :: isVarArray - type (att_list_type), pointer :: attList => null() + type (att_lists_type), dimension(:), pointer :: attLists => null() ! Pointers to the prev and next blocks for this field on this task type (field0DReal), pointer :: prev, next @@ -210,13 +216,14 @@ character (len=StrKIND), dimension(:), pointer :: constituentNames => null() character (len=StrKIND), dimension(3) :: dimNames integer :: defaultValue + integer :: missingValue integer, dimension(3) :: dimSizes logical :: isDecomposed logical :: hasTimeDimension logical :: isActive logical :: isVarArray logical :: isPersistent - type (att_list_type), pointer :: attList => null() + type (att_lists_type), dimension(:), pointer :: attLists => null() ! Pointers to the prev and next blocks for this field on this task type (field3DInteger), pointer :: prev, next @@ -242,13 +249,14 @@ character (len=StrKIND), dimension(:), pointer :: constituentNames => null() character (len=StrKIND), dimension(2) :: dimNames integer :: defaultValue + integer :: missingValue integer, dimension(2) :: dimSizes logical :: isDecomposed logical :: hasTimeDimension logical :: isActive logical :: isVarArray logical :: isPersistent - type (att_list_type), pointer :: attList => null() + type (att_lists_type), dimension(:), pointer :: attLists => null() ! Pointers to the prev and next blocks for this field on this task type (field2DInteger), pointer :: prev, next @@ -274,13 +282,14 @@ character (len=StrKIND), dimension(:), pointer :: constituentNames => null() character (len=StrKIND), dimension(1) :: dimNames integer :: defaultValue + integer :: missingValue integer, dimension(1) :: dimSizes logical :: isDecomposed logical :: hasTimeDimension logical :: isActive logical :: isVarArray logical :: isPersistent - type (att_list_type), pointer :: attList => null() + type (att_lists_type), dimension(:), pointer :: attLists => null() ! Pointers to the prev and next blocks for this field on this task type (field1DInteger), pointer :: prev, next @@ -305,11 +314,12 @@ character (len=StrKIND) :: fieldName character (len=StrKIND), dimension(:), pointer :: constituentNames => null() integer :: defaultValue + integer :: missingValue logical :: isDecomposed logical :: hasTimeDimension logical :: isActive logical :: isVarArray - type (att_list_type), pointer :: attList => null() + type (att_lists_type), dimension(:), pointer :: attLists => null() ! Pointers to the prev and next blocks for this field on this task type (field0DInteger), pointer :: prev, next @@ -336,12 +346,13 @@ character (len=StrKIND), dimension(1) :: dimNames integer, dimension(1) :: dimSizes character (len=StrKIND) :: defaultValue + character (len=StrKIND) :: missingValue logical :: isDecomposed logical :: hasTimeDimension logical :: isActive logical :: isVarArray logical :: isPersistent - type (att_list_type), pointer :: attList => null() + type (att_lists_type), dimension(:), pointer :: attLists => null() ! Pointers to the prev and next blocks for this field on this task type (field1DChar), pointer :: prev, next @@ -366,11 +377,12 @@ character (len=StrKIND) :: fieldName character (len=StrKIND), dimension(:), pointer :: constituentNames => null() character (len=StrKIND) :: defaultValue + character (len=StrKIND) :: missingValue logical :: isDecomposed logical :: hasTimeDimension logical :: isActive logical :: isVarArray - type (att_list_type), pointer :: attList => null() + type (att_lists_type), dimension(:), pointer :: attLists => null() ! Pointers to the prev and next blocks for this field on this task type (field0DChar), pointer :: prev, next @@ -395,11 +407,12 @@ character (len=StrKIND) :: fieldName character (len=StrKIND), dimension(:), pointer :: constituentNames => null() logical :: defaultValue + logical :: missingValue logical :: isDecomposed logical :: hasTimeDimension logical :: isActive logical :: isVarArray - type (att_list_type), pointer :: attList => null() + type (att_lists_type), dimension(:), pointer :: attLists => null() ! Pointers to the prev and next blocks for this field on this task type (field0DLogical), pointer :: prev, next diff --git a/src/framework/mpas_forcing.F b/src/framework/mpas_forcing.F index 5d182ef51d..9861d93701 100644 --- a/src/framework/mpas_forcing.F +++ b/src/framework/mpas_forcing.F @@ -13,6 +13,7 @@ module mpas_forcing #define COMMA , +#define FORCING_DEBUG_DO(M) !M #define FORCING_DEBUG_WRITE(M) !write(stderrUnit,*) M #define FORCING_WARNING_WRITE(M) write(stderrUnit,*) 'WARNING: '//M #define FORCING_ERROR_WRITE(M) write(stderrUnit,*) 'ERROR: '//M @@ -23,6 +24,7 @@ module mpas_forcing use mpas_timekeeping use mpas_io_streams use mpas_stream_manager + use mpas_abort, only : mpas_dmpar_global_abort implicit none @@ -1000,8 +1002,11 @@ subroutine get_initial_forcing_data(&!{{{ integer :: & iTime, & ! index of the forcing time stops + iTime2, & ierr + FORCING_DEBUG_WRITE('-------------------------------------------') + ! loop over the forcing time slots do iTime = 1, forcingStream % nTimeStencil @@ -1057,13 +1062,41 @@ subroutine get_initial_forcing_data(&!{{{ endif ! other initialization types - if (trim(forcingStream % forcingInitializationType) == "next" .and. & - iTime <= forcingStream % nTimeStencilLower) then - forcingTimeCycle = forcingStream % forcingTimes(forcingStream % nTimeStencilLower+1) + if (trim(forcingStream % forcingInitializationType) == "next") then + + if (forcingGroup % forcingCycleStartInclusive) then + + if (forcingTimeCycle .lt. forcingGroup % forcingCycleStart) then + + do iTime2 = 1, forcingStream % nTimeStencil + if (forcingStream % forcingTimes(iTime2) .ge. forcingGroup % forcingCycleStart) then + forcingTimeCycle = forcingStream % forcingTimes(iTime2) + exit + endif + enddo ! iTime2 + + endif + + else + + if (forcingTimeCycle .le. forcingGroup % forcingCycleStart) then + + do iTime2 = 1, forcingStream % nTimeStencil + if (forcingStream % forcingTimes(iTime2) .gt. forcingGroup % forcingCycleStart) then + forcingTimeCycle = forcingStream % forcingTimes(iTime2) + exit + endif + enddo ! iTime2 + + endif + + endif + endif ! load the data into the slot call mpas_get_time(forcingTimeCycle, dateTimeString=forcingTimeStr) + FORCING_DEBUG_WRITE('-- Forcing: get_initial_forcing_data: READ ATTEMPT: ' COMMA trim(forcingTimeStr)) call MPAS_stream_mgr_read(& streamManager, & forcingStream % forcingStreamID, & @@ -1078,10 +1111,12 @@ subroutine get_initial_forcing_data(&!{{{ call MPAS_dmpar_global_abort('Forcing: Error: MPAS_stream_mgr_read') endif - FORCING_DEBUG_WRITE('-- Forcing: get_initial_forcing_data: ' COMMA iTime COMMA trim(forcingStream % forcingStreamID) COMMA " " COMMA trim(forcingTimeStr) COMMA ierr) + FORCING_DEBUG_WRITE('-- Forcing: get_initial_forcing_data: ' COMMA iTime COMMA trim(forcingStream % forcingStreamID) COMMA " " COMMA trim(forcingTimeStr)) enddo ! iTime + FORCING_DEBUG_WRITE('-------------------------------------------') + end subroutine get_initial_forcing_data!}}} !||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| @@ -1336,7 +1371,8 @@ subroutine get_forcing(&!{{{ forcingTimeNew ! the next forcing time character(len=strKIND) :: & - forcingTimeCycleStr ! the cycled forcing time + forcingTimeCycleStr, & ! the cycled forcing time + forcingTimeStr ! time string of forcing integer :: & iTime, & ! forcing times index @@ -1397,10 +1433,13 @@ subroutine get_forcing(&!{{{ call MPAS_dmpar_global_abort('Forcing: Error: MPAS_stream_mgr_read') endif - FORCING_DEBUG_WRITE('-- Forcing: mpas_forcing_get_forcing: READ: '//trim(forcingTimeCycleStr)//' ' COMMA ierr) + FORCING_DEBUG_WRITE('-- Forcing: mpas_forcing_get_forcing: READ: ' COMMA trim(forcingStream % forcingStreamID) COMMA " " COMMA trim(forcingTimeCycleStr)) endif ! forcingAlarmID ringing + FORCING_DEBUG_DO(call mpas_get_time(currentTime, dateTimeString=forcingTimeStr)) + FORCING_DEBUG_WRITE('-- Forcing: mpas_forcing_get_forcing: FORCING: ' COMMA trim(forcingStream % forcingStreamID) COMMA " " COMMA trim(forcingTimeStr) COMMA " " COMMA trim(forcingTimeStrAll)) + ! interpolate data call forcing_data_interpolation(& forcingGroup % domain_ptr, & @@ -1542,7 +1581,7 @@ subroutine get_interpolants_linear(interpolants, forcingStream, currentTime)!{{{ call mpas_get_timeInterval(diff2, currentTime, dt=diffr2) interpolants(1) = diffr2 / diffr - interpolants(2) = diffr1 / diffr + interpolants(2) = 1.0_RKIND - interpolants(1) !diffr1 / diffr end subroutine get_interpolants_linear!}}} @@ -2464,25 +2503,54 @@ subroutine mpas_forcing_write_restart_times(forcingGroupHead, forcingTimeRestart character(len=strKIND) :: forcingClockTimeStr + integer :: restartTimestampUnit + ! open restart time file - open(22,file=trim(forcingTimeRestartFilename), form='formatted', status='replace') + forcingGroup => forcingGroupHead + do while (associated(forcingGroup)) + + if (forcingGroup % domain_ptr % dminfo % my_proc_id == IO_NODE) then + + call mpas_new_unit(restartTimestampUnit) + open(restartTimestampUnit,file=trim(forcingTimeRestartFilename), form='formatted', status='replace') + exit + + endif + + forcingGroup => forcingGroup % next + enddo ! loop over forcing groups forcingGroup => forcingGroupHead do while (associated(forcingGroup)) - ! get the forcing clock time - forcingClockTime = MPAS_get_clock_time(forcingGroup % forcingClock, MPAS_NOW) + if (forcingGroup % domain_ptr % dminfo % my_proc_id == IO_NODE) then - call MPAS_get_time(forcingClockTime, dateTimeString=forcingClockTimeStr) + ! get the forcing clock time + forcingClockTime = MPAS_get_clock_time(forcingGroup % forcingClock, MPAS_NOW) - ! write the forcing time to the restart file - write(22,*) trim(forcingGroup % forcingGroupName), " ", trim(forcingClockTimeStr) + call MPAS_get_time(forcingClockTime, dateTimeString=forcingClockTimeStr) + + ! write the forcing time to the restart file + write(restartTimestampUnit,*) trim(forcingGroup % forcingGroupName), " ", trim(forcingClockTimeStr) + + endif forcingGroup => forcingGroup % next end do - close(22) + ! close the file + forcingGroup => forcingGroupHead + do while (associated(forcingGroup)) + + if (forcingGroup % domain_ptr % dminfo % my_proc_id == IO_NODE) then + close(restartTimestampUnit) + call mpas_release_unit(restartTimestampUnit) + exit + endif + + forcingGroup => forcingGroup % next + end do end subroutine mpas_forcing_write_restart_times!}}} @@ -2494,7 +2562,7 @@ end subroutine mpas_forcing_write_restart_times!}}} !> \author Adrian K. Turner, LANL !> \date 9th December 2014 !> \details -!> read in the forcing group restart times from an external file and +!> read in the forcing group restart times from an external file and !> set the correct forcing group clock to this time ! !----------------------------------------------------------------------- @@ -2541,7 +2609,7 @@ subroutine read_restart_times(&!{{{ if (trim(forcingGroup % forcingGroupName) == trim(forcingGroupName)) then ! set the forcing group time - FORCING_DEBUG_WRITE('-- Forcing: read_restart_times: set time') + FORCING_DEBUG_WRITE('-- Forcing: read_restart_times: set time' COMMA " " COMMA trim(forcingClockTimeStr)) call MPAS_set_time(forcingClockTime, dateTimeString=trim(forcingClockTimeStr)) FORCING_DEBUG_WRITE('-- Forcing: read_restart_times: create clock') call mpas_create_clock(forcingGroup % forcingClock, startTime=forcingClockTime, timeStep=timeStep, stopTime=stopTime) diff --git a/src/framework/mpas_framework.F b/src/framework/mpas_framework.F index 57240d2991..5a0fdf055e 100644 --- a/src/framework/mpas_framework.F +++ b/src/framework/mpas_framework.F @@ -25,7 +25,7 @@ module mpas_framework use mpas_timekeeping use mpas_io use mpas_io_units - use mpas_configure + use mpas_block_decomp contains @@ -82,9 +82,10 @@ subroutine mpas_framework_init_phase2(domain, io_system, calendar)!{{{ integer :: pio_num_iotasks integer :: pio_stride + call mpas_timer_init(domain) + #ifdef MPAS_DEBUG call mpas_pool_set_error_level(MPAS_POOL_WARN) - call mpas_pool_set_error_level(MPAS_POOL_FATAL) #endif call mpas_pool_get_config(domain % configs, 'config_calendar_type', config_calendar_type) @@ -102,7 +103,8 @@ subroutine mpas_framework_init_phase2(domain, io_system, calendar)!{{{ if (pio_num_iotasks == 0) then pio_num_iotasks = domain % dminfo % nprocs end if - call MPAS_io_init(domain % dminfo, pio_num_iotasks, pio_stride, io_system) + domain % ioContext % dminfo => domain % dminfo + call MPAS_io_init(domain % ioContext, pio_num_iotasks, pio_stride, io_system) end subroutine mpas_framework_init_phase2!}}} @@ -125,12 +127,14 @@ subroutine mpas_framework_finalize(dminfo, domain, io_system)!{{{ type (domain_type), pointer :: domain type (iosystem_desc_t), optional, pointer :: io_system - call MPAS_io_finalize(io_system) + call MPAS_io_finalize(domain % ioContext, .false.) call mpas_deallocate_domain(domain) call mpas_dmpar_finalize(dminfo) + call mpas_finish_block_proc_list(dminfo) + call mpas_timekeeping_finalize() end subroutine mpas_framework_finalize!}}} diff --git a/src/framework/mpas_io.F b/src/framework/mpas_io.F index bc8090d41c..499f412c6b 100644 --- a/src/framework/mpas_io.F +++ b/src/framework/mpas_io.F @@ -11,6 +11,7 @@ module mpas_io use mpas_attlist use mpas_dmpar use mpas_io_units + use mpas_abort, only : mpas_dmpar_global_abort use pio use piolib_mod @@ -22,6 +23,14 @@ module mpas_io #else integer, parameter :: PIO_REALKIND = PIO_DOUBLE #endif + + integer, parameter :: MPAS_INT_FILLVAL = NF_FILL_INT + character, parameter :: MPAS_CHAR_FILLVAL = achar(NF_FILL_CHAR) +#ifdef SINGLE_PRECISION + real (kind=RKIND), parameter :: MPAS_REAL_FILLVAL = NF_FILL_FLOAT +#else + real (kind=RKIND), parameter :: MPAS_REAL_FILLVAL = NF_FILL_DOUBLE +#endif interface MPAS_io_get_var module procedure MPAS_io_get_var_int0d @@ -36,6 +45,7 @@ module mpas_io module procedure MPAS_io_get_var_real4d module procedure MPAS_io_get_var_real5d module procedure MPAS_io_get_var_char0d + module procedure MPAS_io_get_var_char1d end interface MPAS_io_get_var interface MPAS_io_put_var @@ -51,6 +61,7 @@ module mpas_io module procedure MPAS_io_put_var_real4d module procedure MPAS_io_put_var_real5d module procedure MPAS_io_put_var_char0d + module procedure MPAS_io_put_var_char1d end interface MPAS_io_put_var interface MPAS_io_get_att @@ -69,46 +80,64 @@ module mpas_io module procedure MPAS_io_put_att_text end interface MPAS_io_put_att - type (iosystem_desc_t), pointer, private, save :: pio_iosystem - type (decomplist_type), pointer, private :: decomp_list => null() - type (dm_info), private :: local_dminfo - integer, private:: master_pio_iotype = -999 - - contains - subroutine MPAS_io_init(dminfo, io_task_count, io_task_stride, io_system, ierr) + subroutine MPAS_io_init(ioContext, io_task_count, io_task_stride, io_system, ierr) implicit none - type (dm_info), intent(in) :: dminfo + type (mpas_io_context_type), intent(inout) :: ioContext integer, intent(in) :: io_task_count integer, intent(in) :: io_task_stride type (iosystem_desc_t), optional, pointer :: io_system integer, intent(out), optional :: ierr + integer :: local_ierr + + local_ierr = 0 + ! write(stderrUnit,*) 'Called MPAS_io_init()' if (present(ierr)) ierr = MPAS_IO_NOERR - local_dminfo = dminfo - if (present(io_system)) then - pio_iosystem => io_system + ioContext % pio_iosystem => io_system else !write(stderrUnit,*) 'MGD PIO_init' - allocate(pio_iosystem) - call PIO_init(local_dminfo % my_proc_id, & ! comp_rank - local_dminfo % comm, & ! comp_comm + if ( io_task_count < 0 .or. io_task_count > ioContext % dminfo % nprocs ) then + write(stderrUnit, *) ' ERROR: PIO io_task_count has a value of ', io_task_count + write(stderrUnit, *) ' It must be between 1 and ', ioContext % dminfo % nprocs + local_ierr = 1 + end if + + if ( io_task_stride <= 0 .or. io_task_stride > ioContext % dminfo % nprocs ) then + write(stderrUnit, *) ' ERROR: PIO io_task_stride has a value of ', io_task_stride + write(stderrUnit, *) ' It must be between 1 and ', ioContext % dminfo % nprocs + local_ierr = 1 + end if + + if ( io_task_stride * io_task_count > ioContext % dminfo % nprocs ) then + write(stderrUnit, *) ' ERROR: PIO io_task_stride * io_task_count has a value of ', io_task_stride * io_task_count + write(stderrUnit, *) ' It must be between 1 and ', ioContext % dminfo % nprocs + local_ierr = 1 + end if + + if ( local_ierr /= 0 ) then + call mpas_dmpar_global_abort('ERROR: Invalid PIO configuration.') + end if + + allocate(ioContext % pio_iosystem) + call PIO_init(ioContext % dminfo % my_proc_id, & ! comp_rank + ioContext % dminfo % comm, & ! comp_comm io_task_count, & ! num_iotasks 0, & ! num_aggregator io_task_stride, & ! stride PIO_rearr_box, & ! rearr - pio_iosystem) ! iosystem + ioContext % pio_iosystem) ! iosystem end if - call pio_seterrorhandling(pio_iosystem, PIO_BCAST_ERROR) + call pio_seterrorhandling(ioContext % pio_iosystem, PIO_BCAST_ERROR) end subroutine MPAS_io_init @@ -124,10 +153,11 @@ end subroutine MPAS_io_init !> This routine sets the master io type for use with PIO. ! !----------------------------------------------------------------------- - subroutine MPAS_io_set_iotype(io_type_in, ierr) + subroutine MPAS_io_set_iotype(ioContext, io_type_in, ierr) implicit none + type (mpas_io_context_type), intent(inout) :: ioContext integer, intent(in) :: io_type_in integer, intent(out), optional :: ierr @@ -135,7 +165,7 @@ subroutine MPAS_io_set_iotype(io_type_in, ierr) ierr = MPAS_IO_NOERR end if - master_pio_iotype = io_type_in + ioContext % master_pio_iotype = io_type_in end subroutine MPAS_io_set_iotype @@ -152,28 +182,30 @@ end subroutine MPAS_io_set_iotype !> "unset" value. ! !----------------------------------------------------------------------- - subroutine MPAS_io_unset_iotype(ierr) + subroutine MPAS_io_unset_iotype(ioContext, ierr) implicit none + type (mpas_io_context_type), intent(inout) :: ioContext integer, intent(out), optional :: ierr if (present(ierr)) then ierr = MPAS_IO_NOERR end if - master_pio_iotype = -999 + ioContext % master_pio_iotype = -999 end subroutine MPAS_io_unset_iotype - type (MPAS_IO_Handle_type) function MPAS_io_open(filename, mode, ioformat, clobber_file, truncate_file, ierr) + type (MPAS_IO_Handle_type) function MPAS_io_open(filename, mode, ioformat, ioContext, clobber_file, truncate_file, ierr) implicit none character (len=*), intent(in) :: filename integer, intent(in) :: mode integer, intent(in) :: ioformat + type (mpas_io_context_type), pointer :: ioContext logical, intent(in), optional :: clobber_file logical, intent(in), optional :: truncate_file integer, intent(out), optional :: ierr @@ -218,9 +250,10 @@ type (MPAS_IO_Handle_type) function MPAS_io_open(filename, mode, ioformat, clobb MPAS_io_open % filename = filename MPAS_io_open % iomode = mode MPAS_io_open % ioformat = ioformat + MPAS_io_open % ioContext => ioContext - if (master_pio_iotype /= -999) then - pio_iotype = master_pio_iotype + if (ioContext % master_pio_iotype /= -999) then + pio_iotype = ioContext % master_pio_iotype pio_mode = PIO_64BIT_OFFSET else if (ioformat == MPAS_IO_PNETCDF) then @@ -240,10 +273,10 @@ type (MPAS_IO_Handle_type) function MPAS_io_open(filename, mode, ioformat, clobb if (mode == MPAS_IO_WRITE) then !write(stderrUnit,*) 'MGD PIO_createfile' - if (local_dminfo % my_proc_id == 0) then + if (ioContext % dminfo % my_proc_id == 0) then inquire(file=trim(filename), exist=exists) end if - call mpas_dmpar_bcast_logical(local_dminfo, exists) + call mpas_dmpar_bcast_logical(ioContext % dminfo, exists) ! If the file exists and we are not allowed to clobber it, return an ! appropriate error code @@ -253,10 +286,10 @@ type (MPAS_IO_Handle_type) function MPAS_io_open(filename, mode, ioformat, clobb end if if (exists .and. (.not. local_truncate)) then - pio_ierr = PIO_openfile(pio_iosystem, MPAS_io_open % pio_file, pio_iotype, trim(filename), PIO_write) + pio_ierr = PIO_openfile(ioContext % pio_iosystem, MPAS_io_open % pio_file, pio_iotype, trim(filename), PIO_write) MPAS_io_open % preexisting_file = .true. else - pio_ierr = PIO_createfile(pio_iosystem, MPAS_io_open % pio_file, pio_iotype, trim(filename), pio_mode) + pio_ierr = PIO_createfile(ioContext % pio_iosystem, MPAS_io_open % pio_file, pio_iotype, trim(filename), pio_mode) if (exists) then write(stderrUnit,'(a)') 'MPAS I/O: Truncating existing data in output file '//trim(filename) end if @@ -269,7 +302,7 @@ type (MPAS_IO_Handle_type) function MPAS_io_open(filename, mode, ioformat, clobb return end if !write(stderrUnit,*) 'MGD PIO_openfile' - pio_ierr = PIO_openfile(pio_iosystem, MPAS_io_open % pio_file, pio_iotype, trim(filename), PIO_nowrite) + pio_ierr = PIO_openfile(ioContext % pio_iosystem, MPAS_io_open % pio_file, pio_iotype, trim(filename), PIO_nowrite) endif if (pio_ierr /= PIO_noerr) then if (present(ierr)) ierr = MPAS_IO_ERR_PIO @@ -286,10 +319,10 @@ type (MPAS_IO_Handle_type) function MPAS_io_open(filename, mode, ioformat, clobb end if ! Here we're depending on the undocumented behavior of PIO to return a - ! -1 dimension ID when an unlimited dimension is not found. This + ! negative dimension ID when an unlimited dimension is not found. This ! might change in the future, causing this code to break, though it ! shouldn't break for files with unlimited dimensions. - if ( MPAS_io_open % pio_unlimited_dimid /= -1 ) then + if ( MPAS_io_open % pio_unlimited_dimid >= 0 ) then pio_ierr = PIO_inq_dimlen(MPAS_io_open % pio_file, MPAS_io_open % pio_unlimited_dimid, MPAS_io_open % preexisting_records) if (pio_ierr /= PIO_noerr) then if (present(ierr)) ierr = MPAS_IO_ERR_PIO @@ -848,7 +881,7 @@ subroutine MPAS_io_def_var(handle, fieldname, fieldtype, dimnames, precision, ie call MPAS_io_inq_var(handle, fieldname, inq_fieldtype, inq_ndims, inq_dimnames, ierr=pio_ierr) if (pio_ierr /= MPAS_IO_ERR_PIO) then - ! Verify that the type and dimensions matche... + ! Verify that the type and dimensions match... if (fieldtype == MPAS_IO_DOUBLE) then if (local_precision == MPAS_IO_SINGLE_PRECISION) then pio_type = MPAS_IO_REAL @@ -933,8 +966,12 @@ subroutine MPAS_io_def_var(handle, fieldname, fieldtype, dimnames, precision, ie pio_type = PIO_double end if else if (new_fieldlist_node % fieldhandle % field_type == MPAS_IO_REAL) then - !TODO: handle up-conversion from single-precision to double-precision in file? - pio_type = PIO_real + if (local_precision == MPAS_IO_DOUBLE_PRECISION) then + pio_type = PIO_double + new_fieldlist_node % fieldhandle % field_type = MPAS_IO_DOUBLE + else + pio_type = PIO_real + end if else if (new_fieldlist_node % fieldhandle % field_type == MPAS_IO_INT) then pio_type = PIO_int else if (new_fieldlist_node % fieldhandle % field_type == MPAS_IO_CHAR) then @@ -1030,7 +1067,7 @@ subroutine MPAS_io_set_var_indices(handle, fieldname, indices, ierr) implicit none - type (MPAS_IO_Handle_type), intent(in) :: handle + type (MPAS_IO_Handle_type), intent(inout) :: handle character (len=*), intent(in) :: fieldname integer, dimension(:), intent(in) :: indices integer, intent(out), optional :: ierr @@ -1038,12 +1075,12 @@ subroutine MPAS_io_set_var_indices(handle, fieldname, indices, ierr) type (fieldlist_type), pointer :: field_cursor integer :: pio_type integer :: ndims - integer (kind=PIO_OFFSET) :: pd, indx + integer (kind=MPAS_IO_OFFSET_KIND) :: pd, indx integer :: i integer :: early_return, early_return_global - integer (kind=PIO_OFFSET) :: i1, i2, i3, i4, i5 + integer (kind=MPAS_IO_OFFSET_KIND) :: i1, i2, i3, i4, i5 integer, dimension(:), pointer :: dimlist - integer (kind=PIO_OFFSET), dimension(:), pointer :: compdof + integer (kind=MPAS_IO_OFFSET_KIND), dimension(:), pointer :: compdof type (decomplist_type), pointer :: decomp_cursor, new_decomp ! write(stderrUnit,*) 'Called MPAS_io_set_var_indices()' @@ -1086,7 +1123,7 @@ subroutine MPAS_io_set_var_indices(handle, fieldname, indices, ierr) ! ! Check whether a suitable decomposition already exists ! - decomp_cursor => decomp_list + decomp_cursor => handle % ioContext % decomp_list !if (.not. associated(decomp_cursor)) write(stderrUnit,*) 'No existing decompositions to check...' early_return = 0 DECOMP_LOOP: do while (associated(decomp_cursor)) @@ -1161,7 +1198,7 @@ subroutine MPAS_io_set_var_indices(handle, fieldname, indices, ierr) ! ! If all tasks have set early_return to 1, then we have a usable decomposition and can return ! - call mpas_dmpar_min_int(local_dminfo, early_return, early_return_global) + call mpas_dmpar_min_int(handle % ioContext % dminfo, early_return, early_return_global) if (early_return_global == 1) then return end if @@ -1204,11 +1241,11 @@ subroutine MPAS_io_set_var_indices(handle, fieldname, indices, ierr) do i=1,ndims-1 dimlist(i) = field_cursor % fieldhandle % dims(i) % dimsize new_decomp % decomphandle % dims(i) = dimlist(i) - pd = pd * int(dimlist(i),PIO_OFFSET) + pd = pd * int(dimlist(i),MPAS_IO_OFFSET_KIND) end do new_decomp % decomphandle % dims(ndims) = field_cursor % fieldhandle % dims(ndims) % dimsize dimlist(ndims) = size(indices) - pd = pd * int(dimlist(ndims),PIO_OFFSET) + pd = pd * int(dimlist(ndims),MPAS_IO_OFFSET_KIND) allocate(compdof(pd)) @@ -1220,10 +1257,10 @@ subroutine MPAS_io_set_var_indices(handle, fieldname, indices, ierr) do i2=1,dimlist(2) do i1=1,dimlist(1) compdof(indx) = i1 & - + (i2-1)*int(dimlist(1),PIO_OFFSET) & - + (i3-1)*int(dimlist(2),PIO_OFFSET)*int(dimlist(1),PIO_OFFSET) & - + (i4-1)*int(dimlist(3),PIO_OFFSET)*int(dimlist(2),PIO_OFFSET)*int(dimlist(1),PIO_OFFSET) & - + int(indices(i5)-1,PIO_OFFSET)*int(dimlist(4),PIO_OFFSET)*int(dimlist(3),PIO_OFFSET)*int(dimlist(2),PIO_OFFSET)*int(dimlist(1),PIO_OFFSET) + + (i2-1)*int(dimlist(1),MPAS_IO_OFFSET_KIND) & + + (i3-1)*int(dimlist(2),MPAS_IO_OFFSET_KIND)*int(dimlist(1),MPAS_IO_OFFSET_KIND) & + + (i4-1)*int(dimlist(3),MPAS_IO_OFFSET_KIND)*int(dimlist(2),MPAS_IO_OFFSET_KIND)*int(dimlist(1),MPAS_IO_OFFSET_KIND) & + + int(indices(i5)-1,MPAS_IO_OFFSET_KIND)*int(dimlist(4),MPAS_IO_OFFSET_KIND)*int(dimlist(3),MPAS_IO_OFFSET_KIND)*int(dimlist(2),MPAS_IO_OFFSET_KIND)*int(dimlist(1),MPAS_IO_OFFSET_KIND) indx = indx + 1 end do end do @@ -1236,9 +1273,9 @@ subroutine MPAS_io_set_var_indices(handle, fieldname, indices, ierr) do i2=1,dimlist(2) do i1=1,dimlist(1) compdof(indx) = i1 & - + (i2-1)*int(dimlist(1),PIO_OFFSET) & - + (i3-1)*int(dimlist(2),PIO_OFFSET)*int(dimlist(1),PIO_OFFSET) & - + int(indices(i4)-1,PIO_OFFSET)*int(dimlist(3),PIO_OFFSET)*int(dimlist(2),PIO_OFFSET)*int(dimlist(1),PIO_OFFSET) + + (i2-1)*int(dimlist(1),MPAS_IO_OFFSET_KIND) & + + (i3-1)*int(dimlist(2),MPAS_IO_OFFSET_KIND)*int(dimlist(1),MPAS_IO_OFFSET_KIND) & + + int(indices(i4)-1,MPAS_IO_OFFSET_KIND)*int(dimlist(3),MPAS_IO_OFFSET_KIND)*int(dimlist(2),MPAS_IO_OFFSET_KIND)*int(dimlist(1),MPAS_IO_OFFSET_KIND) indx = indx + 1 end do end do @@ -1248,7 +1285,7 @@ subroutine MPAS_io_set_var_indices(handle, fieldname, indices, ierr) do i3=1,dimlist(3) do i2=1,dimlist(2) do i1=1,dimlist(1) - compdof(indx) = i1 + (i2-1)*int(dimlist(1),PIO_OFFSET) + int(indices(i3)-1,PIO_OFFSET)*int(dimlist(2),PIO_OFFSET)*int(dimlist(1),PIO_OFFSET) + compdof(indx) = i1 + (i2-1)*int(dimlist(1),MPAS_IO_OFFSET_KIND) + int(indices(i3)-1,MPAS_IO_OFFSET_KIND)*int(dimlist(2),MPAS_IO_OFFSET_KIND)*int(dimlist(1),MPAS_IO_OFFSET_KIND) indx = indx + 1 end do end do @@ -1256,27 +1293,27 @@ subroutine MPAS_io_set_var_indices(handle, fieldname, indices, ierr) else if (ndims == 2) then do i2=1,dimlist(2) do i1=1,dimlist(1) - compdof(indx) = i1 + int(indices(i2)-1,PIO_OFFSET)*int(dimlist(1),PIO_OFFSET) + compdof(indx) = i1 + int(indices(i2)-1,MPAS_IO_OFFSET_KIND)*int(dimlist(1),MPAS_IO_OFFSET_KIND) indx = indx + 1 end do end do else if (ndims == 1) then do i1=1,dimlist(1) - compdof(indx) = int(indices(i1),PIO_OFFSET) + compdof(indx) = int(indices(i1),MPAS_IO_OFFSET_KIND) indx = indx + 1 end do end if dimlist(ndims) = field_cursor % fieldhandle % dims(ndims) % dimsize - call PIO_initdecomp(pio_iosystem, pio_type, dimlist, compdof, new_decomp % decomphandle % pio_iodesc) + call PIO_initdecomp(handle % ioContext % pio_iosystem, pio_type, dimlist, compdof, new_decomp % decomphandle % pio_iodesc) ! Add new decomposition to the list - if (.not. associated(decomp_list)) then - decomp_list => new_decomp + if (.not. associated(handle % ioContext % decomp_list)) then + handle % ioContext % decomp_list => new_decomp !write(stderrUnit,*) 'Adding first item to the decomp_list' else - new_decomp % next => decomp_list - decomp_list => new_decomp + new_decomp % next => handle % ioContext % decomp_list + handle % ioContext % decomp_list => new_decomp !write(stderrUnit,*) 'Adding new decomp to the head of the list' end if @@ -1292,7 +1329,7 @@ end subroutine MPAS_io_set_var_indices subroutine MPAS_io_get_var_generic(handle, fieldname, intVal, intArray1d, intArray2d, intArray3d, intArray4d, & realVal, realArray1d, realArray2d, realArray3d, realArray4d, realArray5d, & - charVal, ierr) + charVal, charArray1d, ierr) implicit none @@ -1310,6 +1347,7 @@ subroutine MPAS_io_get_var_generic(handle, fieldname, intVal, intArray1d, intArr real (kind=RKIND), dimension(:,:,:,:), intent(out), optional :: realArray4d real (kind=RKIND), dimension(:,:,:,:,:), intent(out), optional :: realArray5d character (len=*), intent(out), optional :: charVal + character (len=*), dimension(:), intent(out), optional :: charArray1d integer, intent(out), optional :: ierr integer :: pio_ierr @@ -1328,6 +1366,8 @@ subroutine MPAS_io_get_var_generic(handle, fieldname, intVal, intArray1d, intArr character (len=StrKIND), dimension(1) :: tempchar type (fieldlist_type), pointer :: field_cursor + integer i, j + real (kind=R4KIND) :: singleVal real (kind=R4KIND), dimension(:), allocatable :: singleArray1d real (kind=R4KIND), dimension(:,:), allocatable :: singleArray2d @@ -1335,6 +1375,13 @@ subroutine MPAS_io_get_var_generic(handle, fieldname, intVal, intArray1d, intArr real (kind=R4KIND), dimension(:,:,:,:), allocatable :: singleArray4d real (kind=R4KIND), dimension(:,:,:,:,:), allocatable :: singleArray5d + real (kind=R8KIND) :: doubleVal + real (kind=R8KIND), dimension(:), allocatable :: doubleArray1d + real (kind=R8KIND), dimension(:,:), allocatable :: doubleArray2d + real (kind=R8KIND), dimension(:,:,:), allocatable :: doubleArray3d + real (kind=R8KIND), dimension(:,:,:,:), allocatable :: doubleArray4d + real (kind=R8KIND), dimension(:,:,:,:,:), allocatable :: doubleArray5d + ! Sanity checks if (.not. handle % initialized) then if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE @@ -1363,7 +1410,11 @@ subroutine MPAS_io_get_var_generic(handle, fieldname, intVal, intArray1d, intArr ! write(stderrUnit,*) 'Checking for unlimited dim' if (field_cursor % fieldhandle % has_unlimited_dim) then +#ifdef USE_PIO2 + call PIO_setframe(handle % pio_file, field_cursor % fieldhandle % field_desc, handle % frame_number) +#else call PIO_setframe(field_cursor % fieldhandle % field_desc, handle % frame_number) +#endif start1(1) = handle % frame_number count1(1) = 1 @@ -1383,6 +1434,14 @@ subroutine MPAS_io_get_var_generic(handle, fieldname, intVal, intArray1d, intArr pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % field_desc, singleVal) end if realVal = real(singleVal,RKIND) + else if ((field_cursor % fieldhandle % precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + if (field_cursor % fieldhandle % has_unlimited_dim) then + pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start1, doubleVal) + else + pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % field_desc, doubleVal) + end if + realVal = real(doubleVal,RKIND) else if (field_cursor % fieldhandle % has_unlimited_dim) then pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start1, realVal) @@ -1409,6 +1468,51 @@ subroutine MPAS_io_get_var_generic(handle, fieldname, intVal, intArray1d, intArr pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % fieldid, start1, count1, tempchar) charVal(1:count1(1)) = tempchar(1)(1:count1(1)) end if + else if (present(charArray1d)) then +! write (0,*) ' value is char1' + if (field_cursor % fieldhandle % has_unlimited_dim) then + ! Can only read one string at a time, since the sizes differ so much (i.e. StrLen != StrKIND) + do i = 1, field_cursor % fieldhandle % dims(2) % dimsize + start3(1) = 1 + start3(2) = i + start3(3) = handle % frame_number + count3(1) = field_cursor % fieldhandle % dims(1) % dimsize ! Should be StrLen + count3(2) = 1 + count3(3) = 1 + pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % fieldid, start3, count3, tempchar) + + ! Copy all characters up to the first C null char or the end of + ! the string, whichever comes first + charArray1d(i)(:) = ' ' + do j = 1, len(tempchar(1)) + if ( tempchar(1)(j:j) /= CHAR(0)) then + charArray1d(i)(j:j) = tempchar(1)(j:j) + else + exit + end if + end do + end do + else + ! Can only read one string at a time, since the sizes differ so much (i.e. StrLen != StrKIND) + do i = 1, field_cursor % fieldhandle % dims(2) % dimsize + start2(1) = 1 + start2(2) = i + count2(1) = field_cursor % fieldhandle % dims(1) % dimsize ! Should be StrLen + count2(2) = 1 + pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % fieldid, start2, count2, tempchar) + + ! Copy all characters up to the first C null char or the end of + ! the string, whichever comes first + charArray1d(i)(:) = ' ' + do j = 1, len(tempchar(1)) + if ( tempchar(1)(j:j) /= CHAR(0)) then + charArray1d(i)(j:j) = tempchar(1)(j:j) + else + exit + end if + end do + end do + end if else if (present(realArray1d)) then ! write (0,*) ' value is real1' if ((field_cursor % fieldhandle % precision == MPAS_IO_SINGLE_PRECISION) .and. & @@ -1432,6 +1536,27 @@ subroutine MPAS_io_get_var_generic(handle, fieldname, intVal, intArray1d, intArr end if realArray1d(:) = real(singleArray1d(:),RKIND) deallocate(singleArray1d) + else if ((field_cursor % fieldhandle % precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + allocate(doubleArray1d(size(realArray1d,1))) + if (associated(field_cursor % fieldhandle % decomp)) then + call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & + doubleArray1d, pio_ierr) + else + if (field_cursor % fieldhandle % has_unlimited_dim) then + start2(1) = 1 + start2(2) = handle % frame_number + count2(1) = field_cursor % fieldhandle % dims(1) % dimsize + count2(2) = 1 + pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % fieldid, start2, count2, doubleArray1d) + else + start1(:) = 1 + count1(1) = field_cursor % fieldhandle % dims(1) % dimsize + pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % fieldid, start1, count1, doubleArray1d) + end if + end if + realArray1d(:) = real(doubleArray1d(:),RKIND) + deallocate(doubleArray1d) else if (associated(field_cursor % fieldhandle % decomp)) then call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & @@ -1475,6 +1600,29 @@ subroutine MPAS_io_get_var_generic(handle, fieldname, intVal, intArray1d, intArr end if realArray2d(:,:) = real(singleArray2d(:,:),RKIND) deallocate(singleArray2d) + else if ((field_cursor % fieldhandle % precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + allocate(doubleArray2d(size(realArray2d,1), size(realArray2d,2))) + if (associated(field_cursor % fieldhandle % decomp)) then + call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & + doubleArray2d, pio_ierr) + else + if (field_cursor % fieldhandle % has_unlimited_dim) then + start3(:) = 1 + start3(3) = handle % frame_number + count3(1) = field_cursor % fieldhandle % dims(1) % dimsize + count3(2) = field_cursor % fieldhandle % dims(2) % dimsize + count3(3) = 1 + pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % fieldid, start3, count3, doubleArray2d) + else + start2(:) = 1 + count2(1) = field_cursor % fieldhandle % dims(1) % dimsize + count2(2) = field_cursor % fieldhandle % dims(2) % dimsize + pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % fieldid, start2, count2, doubleArray2d) + end if + end if + realArray2d(:,:) = real(doubleArray2d(:,:),RKIND) + deallocate(doubleArray2d) else if (associated(field_cursor % fieldhandle % decomp)) then call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & @@ -1522,6 +1670,31 @@ subroutine MPAS_io_get_var_generic(handle, fieldname, intVal, intArray1d, intArr end if realArray3d(:,:,:) = real(singleArray3d(:,:,:),RKIND) deallocate(singleArray3d) + else if ((field_cursor % fieldhandle % precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + allocate(doubleArray3d(size(realArray3d,1),size(realArray3d,2),size(realArray3d,3))) + if (associated(field_cursor % fieldhandle % decomp)) then + call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & + doubleArray3d, pio_ierr) + else + if (field_cursor % fieldhandle % has_unlimited_dim) then + start4(:) = 1 + start4(4) = handle % frame_number + count4(1) = field_cursor % fieldhandle % dims(1) % dimsize + count4(2) = field_cursor % fieldhandle % dims(2) % dimsize + count4(3) = field_cursor % fieldhandle % dims(3) % dimsize + count4(4) = 1 + pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % fieldid, start4, count4, doubleArray3d) + else + start3(:) = 1 + count3(1) = field_cursor % fieldhandle % dims(1) % dimsize + count3(2) = field_cursor % fieldhandle % dims(2) % dimsize + count3(3) = field_cursor % fieldhandle % dims(3) % dimsize + pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % fieldid, start3, count3, doubleArray3d) + end if + end if + realArray3d(:,:,:) = real(doubleArray3d(:,:,:),RKIND) + deallocate(doubleArray3d) else if (associated(field_cursor % fieldhandle % decomp)) then call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & @@ -1573,6 +1746,33 @@ subroutine MPAS_io_get_var_generic(handle, fieldname, intVal, intArray1d, intArr end if realArray4d(:,:,:,:) = real(singleArray4d(:,:,:,:),RKIND) deallocate(singleArray4d) + else if ((field_cursor % fieldhandle % precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + allocate(doubleArray4d(size(realArray4d,1),size(realArray4d,2),size(realArray4d,3),size(realArray4d,4))) + if (associated(field_cursor % fieldhandle % decomp)) then + call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & + doubleArray4d, pio_ierr) + else + if (field_cursor % fieldhandle % has_unlimited_dim) then + start5(:) = 1 + start5(5) = handle % frame_number + count5(1) = field_cursor % fieldhandle % dims(1) % dimsize + count5(2) = field_cursor % fieldhandle % dims(2) % dimsize + count5(3) = field_cursor % fieldhandle % dims(3) % dimsize + count5(4) = field_cursor % fieldhandle % dims(4) % dimsize + count5(5) = 1 + pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % fieldid, start5, count5, doubleArray4d) + else + start4(:) = 1 + count4(1) = field_cursor % fieldhandle % dims(1) % dimsize + count4(2) = field_cursor % fieldhandle % dims(2) % dimsize + count4(3) = field_cursor % fieldhandle % dims(3) % dimsize + count4(4) = field_cursor % fieldhandle % dims(4) % dimsize + pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % fieldid, start4, count4, doubleArray4d) + end if + end if + realArray4d(:,:,:,:) = real(doubleArray4d(:,:,:,:),RKIND) + deallocate(doubleArray4d) else if (associated(field_cursor % fieldhandle % decomp)) then call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & @@ -1628,6 +1828,35 @@ subroutine MPAS_io_get_var_generic(handle, fieldname, intVal, intArray1d, intArr end if realArray5d(:,:,:,:,:) = real(singleArray5d(:,:,:,:,:),RKIND) deallocate(singleArray5d) + else if ((field_cursor % fieldhandle % precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + allocate(doubleArray5d(size(realArray5d,1),size(realArray5d,2),size(realArray5d,3),size(realArray5d,4),size(realArray5d,5))) + if (associated(field_cursor % fieldhandle % decomp)) then + call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & + doubleArray5d, pio_ierr) + else + if (field_cursor % fieldhandle % has_unlimited_dim) then + start6(:) = 1 + start6(6) = handle % frame_number + count6(1) = field_cursor % fieldhandle % dims(1) % dimsize + count6(2) = field_cursor % fieldhandle % dims(2) % dimsize + count6(3) = field_cursor % fieldhandle % dims(3) % dimsize + count6(4) = field_cursor % fieldhandle % dims(4) % dimsize + count6(5) = field_cursor % fieldhandle % dims(5) % dimsize + count6(6) = 1 + pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % fieldid, start6, count6, doubleArray5d) + else + start5(:) = 1 + count5(1) = field_cursor % fieldhandle % dims(1) % dimsize + count5(2) = field_cursor % fieldhandle % dims(2) % dimsize + count5(3) = field_cursor % fieldhandle % dims(3) % dimsize + count5(4) = field_cursor % fieldhandle % dims(4) % dimsize + count5(5) = field_cursor % fieldhandle % dims(5) % dimsize + pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % fieldid, start5, count5, doubleArray5d) + end if + end if + realArray5d(:,:,:,:,:) = real(doubleArray5d(:,:,:,:,:),RKIND) + deallocate(doubleArray5d) else if (associated(field_cursor % fieldhandle % decomp)) then call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & @@ -1953,6 +2182,23 @@ subroutine MPAS_io_get_var_char0d(handle, fieldname, val, ierr) end subroutine MPAS_io_get_var_char0d + subroutine MPAS_io_get_var_char1d(handle, fieldname, val, ierr) + + implicit none + + type (MPAS_IO_Handle_type), intent(inout) :: handle + character (len=*), intent(in) :: fieldname + character (len=*), dimension(:), intent(out) :: val + integer, intent(out), optional :: ierr + +! write(stderrUnit,*) 'Called MPAS_io_get_var_char1d()' + if (present(ierr)) ierr = MPAS_IO_NOERR + + call MPAS_io_get_var_generic(handle, fieldname, charArray1d=val, ierr=ierr) + + end subroutine MPAS_io_get_var_char1d + + logical function MPAS_io_would_clobber_records(handle, ierr) implicit none @@ -1981,7 +2227,7 @@ end function MPAS_io_would_clobber_records subroutine MPAS_io_put_var_generic(handle, fieldname, intVal, intArray1d, intArray2d, intArray3d, intArray4d, & realVal, realArray1d, realArray2d, realArray3d, realArray4d, realArray5d, & - charVal, ierr) + charVal, charArray1d, ierr) implicit none @@ -1999,6 +2245,7 @@ subroutine MPAS_io_put_var_generic(handle, fieldname, intVal, intArray1d, intArr real (kind=RKIND), dimension(:,:,:,:), intent(in), optional :: realArray4d real (kind=RKIND), dimension(:,:,:,:,:), intent(in), optional :: realArray5d character (len=*), intent(in), optional :: charVal + character (len=*), dimension(:), intent(in), optional :: charArray1d integer, intent(out), optional :: ierr integer :: pio_ierr @@ -2016,6 +2263,8 @@ subroutine MPAS_io_put_var_generic(handle, fieldname, intVal, intArray1d, intArr integer, dimension(6) :: count6 type (fieldlist_type), pointer :: field_cursor + integer :: i + real (kind=R4KIND) :: singleVal real (kind=R4KIND), dimension(:), allocatable :: singleArray1d real (kind=R4KIND), dimension(:,:), allocatable :: singleArray2d @@ -2023,6 +2272,13 @@ subroutine MPAS_io_put_var_generic(handle, fieldname, intVal, intArray1d, intArr real (kind=R4KIND), dimension(:,:,:,:), allocatable :: singleArray4d real (kind=R4KIND), dimension(:,:,:,:,:), allocatable :: singleArray5d + real (kind=R8KIND) :: doubleVal + real (kind=R8KIND), dimension(:), allocatable :: doubleArray1d + real (kind=R8KIND), dimension(:,:), allocatable :: doubleArray2d + real (kind=R8KIND), dimension(:,:,:), allocatable :: doubleArray3d + real (kind=R8KIND), dimension(:,:,:,:), allocatable :: doubleArray4d + real (kind=R8KIND), dimension(:,:,:,:,:), allocatable :: doubleArray5d + ! Sanity checks if (.not. handle % initialized) then if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE @@ -2062,7 +2318,11 @@ subroutine MPAS_io_put_var_generic(handle, fieldname, intVal, intArray1d, intArr if (field_cursor % fieldhandle % has_unlimited_dim) then +#ifdef USE_PIO2 + call PIO_setframe(handle % pio_file, field_cursor % fieldhandle % field_desc, handle % frame_number) +#else call PIO_setframe(field_cursor % fieldhandle % field_desc, handle % frame_number) +#endif start1(1) = handle % frame_number count1(1) = 1 @@ -2083,6 +2343,14 @@ subroutine MPAS_io_put_var_generic(handle, fieldname, intVal, intArray1d, intArr else pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, singleVal) end if + else if ((field_cursor % fieldhandle % precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + doubleVal = real(realVal,R8KIND) + if (field_cursor % fieldhandle % has_unlimited_dim) then + pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start1, doubleVal) + else + pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, doubleVal) + end if else if (field_cursor % fieldhandle % has_unlimited_dim) then pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start1, realVal) @@ -2105,6 +2373,28 @@ subroutine MPAS_io_put_var_generic(handle, fieldname, intVal, intArray1d, intArr count1(1) = field_cursor % fieldhandle % dims(1) % dimsize pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start1, charVal(1:count1(1))) end if + else if (present(charArray1d)) then + if (field_cursor % fieldhandle % has_unlimited_dim) then + ! Write one string at a time because the sizes differ so much (i.e. StrLen != StrKIND) + do i = 1, field_cursor % fieldhandle % dims(2) % dimsize + start3(1) = 1 + start3(2) = i + start3(3) = handle % frame_number + count3(1) = field_cursor % fieldhandle % dims(1) % dimsize ! Should be StrLen + count3(2) = 1 + count3(3) = 1 + pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start3, charArray1d(i)(1:count3(1))) + end do + else + ! Write one string at a time because the sizes differ so much (i.e. StrLen != StrKIND) + do i = 1, field_cursor % fieldhandle % dims(2) % dimsize + start2(1) = 1 + start2(2) = i + count2(1) = field_cursor % fieldhandle % dims(1) % dimsize ! Should be StrLen + count2(2) = 1 + pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start2, charArray1d(i)(1:count2(1))) + end do + end if else if (present(realArray1d)) then if ((field_cursor % fieldhandle % precision == MPAS_IO_SINGLE_PRECISION) .and. & (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_SINGLE_PRECISION)) then @@ -2127,6 +2417,27 @@ subroutine MPAS_io_put_var_generic(handle, fieldname, intVal, intArray1d, intArr end if end if deallocate(singleArray1d) + else if ((field_cursor % fieldhandle % precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + allocate(doubleArray1d(size(realArray1d))) + doubleArray1d(:) = real(realArray1d(:),R8KIND) + if (associated(field_cursor % fieldhandle % decomp)) then + call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & + doubleArray1d, pio_ierr) + else + if (field_cursor % fieldhandle % has_unlimited_dim) then + start2(1) = 1 + start2(2) = handle % frame_number + count2(1) = field_cursor % fieldhandle % dims(1) % dimsize + count2(2) = 1 + pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start2, count2, doubleArray1d) + else + start1(1) = 1 + count1(1) = field_cursor % fieldhandle % dims(1) % dimsize + pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start1, count1, doubleArray1d) + end if + end if + deallocate(doubleArray1d) else if (associated(field_cursor % fieldhandle % decomp)) then call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & @@ -2170,6 +2481,30 @@ subroutine MPAS_io_put_var_generic(handle, fieldname, intVal, intArray1d, intArr end if end if deallocate(singleArray2d) + else if ((field_cursor % fieldhandle % precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + allocate(doubleArray2d(size(realArray2d,1), size(realArray2d,2))) + doubleArray2d(:,:) = real(realArray2d(:,:),R8KIND) + if (associated(field_cursor % fieldhandle % decomp)) then + call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & + doubleArray2d, pio_ierr) + else + if (field_cursor % fieldhandle % has_unlimited_dim) then + start3(1) = 1 + start3(2) = 1 + start3(3) = handle % frame_number + count3(1) = field_cursor % fieldhandle % dims(1) % dimsize + count3(2) = field_cursor % fieldhandle % dims(2) % dimsize + count3(3) = 1 + pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start3, count3, doubleArray2d) + else + start2(:) = 1 + count2(1) = field_cursor % fieldhandle % dims(1) % dimsize + count2(2) = field_cursor % fieldhandle % dims(2) % dimsize + pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start2, count2, doubleArray2d) + end if + end if + deallocate(doubleArray2d) else if (associated(field_cursor % fieldhandle % decomp)) then call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & @@ -2219,6 +2554,33 @@ subroutine MPAS_io_put_var_generic(handle, fieldname, intVal, intArray1d, intArr end if end if deallocate(singleArray3d) + else if ((field_cursor % fieldhandle % precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + allocate(doubleArray3d(size(realArray3d,1), size(realArray3d,2), size(realArray3d,3))) + doubleArray3d(:,:,:) = real(realArray3d(:,:,:),R8KIND) + if (associated(field_cursor % fieldhandle % decomp)) then + call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & + doubleArray3d, pio_ierr) + else + if (field_cursor % fieldhandle % has_unlimited_dim) then + start4(1) = 1 + start4(2) = 1 + start4(3) = 1 + start4(4) = handle % frame_number + count4(1) = field_cursor % fieldhandle % dims(1) % dimsize + count4(2) = field_cursor % fieldhandle % dims(2) % dimsize + count4(3) = field_cursor % fieldhandle % dims(3) % dimsize + count4(4) = 1 + pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start4, count4, doubleArray3d) + else + start3(:) = 1 + count3(1) = field_cursor % fieldhandle % dims(1) % dimsize + count3(2) = field_cursor % fieldhandle % dims(2) % dimsize + count3(3) = field_cursor % fieldhandle % dims(3) % dimsize + pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start3, count3, doubleArray3d) + end if + end if + deallocate(doubleArray3d) else if (associated(field_cursor % fieldhandle % decomp)) then call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & @@ -2274,6 +2636,36 @@ subroutine MPAS_io_put_var_generic(handle, fieldname, intVal, intArray1d, intArr end if end if deallocate(singleArray4d) + else if ((field_cursor % fieldhandle % precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + allocate(doubleArray4d(size(realArray4d,1), size(realArray4d,2), size(realArray4d,3), size(realArray4d,4))) + doubleArray4d(:,:,:,:) = real(realArray4d(:,:,:,:),R8KIND) + if (associated(field_cursor % fieldhandle % decomp)) then + call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & + doubleArray4d, pio_ierr) + else + if (field_cursor % fieldhandle % has_unlimited_dim) then + start5(1) = 1 + start5(2) = 1 + start5(3) = 1 + start5(4) = 1 + start5(5) = handle % frame_number + count5(1) = field_cursor % fieldhandle % dims(1) % dimsize + count5(2) = field_cursor % fieldhandle % dims(2) % dimsize + count5(3) = field_cursor % fieldhandle % dims(3) % dimsize + count5(4) = field_cursor % fieldhandle % dims(4) % dimsize + count5(5) = 1 + pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start5, count5, doubleArray4d) + else + start4(:) = 1 + count4(1) = field_cursor % fieldhandle % dims(1) % dimsize + count4(2) = field_cursor % fieldhandle % dims(2) % dimsize + count4(3) = field_cursor % fieldhandle % dims(3) % dimsize + count4(4) = field_cursor % fieldhandle % dims(4) % dimsize + pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start4, count4, doubleArray4d) + end if + end if + deallocate(doubleArray4d) else if (associated(field_cursor % fieldhandle % decomp)) then call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & @@ -2335,6 +2727,39 @@ subroutine MPAS_io_put_var_generic(handle, fieldname, intVal, intArray1d, intArr end if end if deallocate(singleArray5d) + else if ((field_cursor % fieldhandle % precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + allocate(doubleArray5d(size(realArray5d,1), size(realArray5d,2), size(realArray5d,3), size(realArray5d,4), size(realArray5d,5))) + doubleArray5d(:,:,:,:,:) = real(realArray5d(:,:,:,:,:),R8KIND) + if (associated(field_cursor % fieldhandle % decomp)) then + call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & + doubleArray5d, pio_ierr) + else + if (field_cursor % fieldhandle % has_unlimited_dim) then + start6(1) = 1 + start6(2) = 1 + start6(3) = 1 + start6(4) = 1 + start6(5) = 1 + start6(6) = handle % frame_number + count6(1) = field_cursor % fieldhandle % dims(1) % dimsize + count6(2) = field_cursor % fieldhandle % dims(2) % dimsize + count6(3) = field_cursor % fieldhandle % dims(3) % dimsize + count6(4) = field_cursor % fieldhandle % dims(4) % dimsize + count6(5) = field_cursor % fieldhandle % dims(5) % dimsize + count6(6) = 1 + pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start6, count6, doubleArray5d) + else + start5(:) = 1 + count5(1) = field_cursor % fieldhandle % dims(1) % dimsize + count5(2) = field_cursor % fieldhandle % dims(2) % dimsize + count5(3) = field_cursor % fieldhandle % dims(3) % dimsize + count5(4) = field_cursor % fieldhandle % dims(4) % dimsize + count5(5) = field_cursor % fieldhandle % dims(5) % dimsize + pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start5, count5, doubleArray5d) + end if + end if + deallocate(doubleArray5d) else if (associated(field_cursor % fieldhandle % decomp)) then call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, & @@ -2664,6 +3089,23 @@ subroutine MPAS_io_put_var_char0d(handle, fieldname, val, ierr) end subroutine MPAS_io_put_var_char0d + subroutine MPAS_io_put_var_char1d(handle, fieldname, val, ierr) + + implicit none + + type (MPAS_IO_Handle_type), intent(inout) :: handle + character (len=*), intent(in) :: fieldname + character (len=*), dimension(:), intent(in) :: val + integer, intent(out), optional :: ierr + +! write(stderrUnit,*) 'Called MPAS_io_put_var_char1d()' + if (present(ierr)) ierr = MPAS_IO_NOERR + + call MPAS_io_put_var_generic(handle, fieldname, charArray1d=val, ierr=ierr) + + end subroutine MPAS_io_put_var_char1d + + subroutine MPAS_io_get_att_int0d(handle, attName, attValue, fieldname, ierr) implicit none @@ -2676,7 +3118,12 @@ subroutine MPAS_io_get_att_int0d(handle, attName, attValue, fieldname, ierr) integer :: pio_ierr integer :: varid - integer :: xtype, len + integer :: xtype +#ifdef USE_PIO2 + integer (kind=MPAS_IO_OFFSET_KIND) :: len +#else + integer :: len +#endif type (fieldlist_type), pointer :: field_cursor type (attlist_type), pointer :: att_cursor, new_att_node @@ -2810,7 +3257,12 @@ subroutine MPAS_io_get_att_int1d(handle, attName, attValue, fieldname, ierr) integer :: pio_ierr integer :: varid - integer :: xtype, len, attlen + integer :: xtype +#ifdef USE_PIO2 + integer (kind=MPAS_IO_OFFSET_KIND) :: len, attlen +#else + integer :: len, attlen +#endif type (fieldlist_type), pointer :: field_cursor type (attlist_type), pointer :: att_cursor, new_att_node @@ -2943,7 +3395,7 @@ subroutine MPAS_io_get_att_int1d(handle, attName, attValue, fieldname, ierr) end subroutine MPAS_io_get_att_int1d - subroutine MPAS_io_get_att_real0d(handle, attName, attValue, fieldname, ierr) + subroutine MPAS_io_get_att_real0d(handle, attName, attValue, fieldname, precision, ierr) implicit none @@ -2951,11 +3403,20 @@ subroutine MPAS_io_get_att_real0d(handle, attName, attValue, fieldname, ierr) character (len=*), intent(in) :: attName real (kind=RKIND), intent(out) :: attValue character (len=*), intent(in), optional :: fieldname + integer, intent(in), optional :: precision integer, intent(out), optional :: ierr integer :: pio_ierr integer :: varid - integer :: xtype, len + integer :: local_precision + real (kind=R4KIND) :: singleVal + real (kind=R8KIND) :: doubleVal + integer :: xtype +#ifdef USE_PIO2 + integer (kind=MPAS_IO_OFFSET_KIND) :: len +#else + integer :: len +#endif type (fieldlist_type), pointer :: field_cursor type (attlist_type), pointer :: att_cursor, new_att_node @@ -3021,18 +3482,48 @@ subroutine MPAS_io_get_att_real0d(handle, attName, attValue, fieldname, ierr) varid = PIO_global end if + if (present(precision)) then + local_precision = precision + else + local_precision = MPAS_IO_NATIVE_PRECISION + end if + ! Query attribute value pio_ierr = PIO_inq_att(handle % pio_file, varid, attName, xtype, len) if (pio_ierr /= PIO_noerr) then if (present(ierr)) ierr = MPAS_IO_ERR_PIO return end if - if (xtype /= PIO_REALKIND) then - if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE - return - end if - pio_ierr = PIO_get_att(handle % pio_file, varid, attName, attValue) + if ((local_precision == MPAS_IO_SINGLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_SINGLE_PRECISION)) then + + if (xtype /= PIO_REAL) then + if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE + return + end if + pio_ierr = PIO_get_att(handle % pio_file, varid, attName, singleVal) + attValue = real(singleVal,RKIND) + + else if ((local_precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + + if (xtype /= PIO_DOUBLE) then + if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE + return + end if + pio_ierr = PIO_get_att(handle % pio_file, varid, attName, doubleVal) + attValue = real(doubleVal,RKIND) + + else + + if (xtype /= PIO_DOUBLE .and. xtype /= PIO_REAL) then + if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE + return + end if + pio_ierr = PIO_get_att(handle % pio_file, varid, attName, attValue) + + end if if (pio_ierr /= PIO_noerr) then if (present(ierr)) ierr = MPAS_IO_ERR_PIO return @@ -3045,6 +3536,7 @@ subroutine MPAS_io_get_att_real0d(handle, attName, attValue, fieldname, ierr) new_att_node % atthandle % attName = attName new_att_node % atthandle % attType = MPAS_ATT_REAL new_att_node % atthandle % attValueReal = attValue + new_att_node % atthandle % precision = local_precision if (present(fieldname)) then if (.not. associated(field_cursor % fieldhandle % attlist_head)) then @@ -3077,7 +3569,7 @@ subroutine MPAS_io_get_att_real0d(handle, attName, attValue, fieldname, ierr) end subroutine MPAS_io_get_att_real0d - subroutine MPAS_io_get_att_real1d(handle, attName, attValue, fieldname, ierr) + subroutine MPAS_io_get_att_real1d(handle, attName, attValue, fieldname, precision, ierr) implicit none @@ -3085,11 +3577,20 @@ subroutine MPAS_io_get_att_real1d(handle, attName, attValue, fieldname, ierr) character (len=*), intent(in) :: attName real (kind=RKIND), dimension(:), pointer :: attValue character (len=*), intent(in), optional :: fieldname + integer, intent(in), optional :: precision integer, intent(out), optional :: ierr integer :: pio_ierr integer :: varid - integer :: xtype, len, attlen + integer :: local_precision + real (kind=R4KIND), dimension(:), allocatable :: singleVal + real (kind=R8KIND), dimension(:), allocatable :: doubleVal + integer :: xtype +#ifdef USE_PIO2 + integer (kind=MPAS_IO_OFFSET_KIND) :: len, attlen +#else + integer :: len, attlen +#endif type (fieldlist_type), pointer :: field_cursor type (attlist_type), pointer :: att_cursor, new_att_node @@ -3157,6 +3658,12 @@ subroutine MPAS_io_get_att_real1d(handle, attName, attValue, fieldname, ierr) varid = PIO_global end if + if (present(precision)) then + local_precision = precision + else + local_precision = MPAS_IO_NATIVE_PRECISION + end if + ! Query attribute value pio_ierr = PIO_inq_att(handle % pio_file, varid, attName, xtype, len) if (pio_ierr /= PIO_noerr) then @@ -3164,19 +3671,48 @@ subroutine MPAS_io_get_att_real1d(handle, attName, attValue, fieldname, ierr) return end if - if (xtype /= PIO_REALKIND) then - if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE - return - end if - pio_ierr = PIO_inq_attlen(handle % pio_file, varid, attName, attlen) if (pio_ierr /= PIO_noerr) then if (present(ierr)) ierr = MPAS_IO_ERR_PIO return end if - allocate(attValue(attlen)) - pio_ierr = PIO_get_att(handle % pio_file, varid, attName, attValue) + if ((local_precision == MPAS_IO_SINGLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_SINGLE_PRECISION)) then + + if (xtype /= PIO_REAL) then + if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE + return + end if + allocate(attValue(attlen)) + allocate(singleVal(attlen)) + pio_ierr = PIO_get_att(handle % pio_file, varid, attName, singleVal) + attValue(:) = real(singleVal(:),RKIND) + deallocate(singleVal) + + else if ((local_precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + + if (xtype /= PIO_DOUBLE) then + if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE + return + end if + allocate(attValue(attlen)) + allocate(doubleVal(attlen)) + pio_ierr = PIO_get_att(handle % pio_file, varid, attName, doubleVal) + attValue(:) = real(doubleVal(:),RKIND) + deallocate(doubleVal) + + else + + if (xtype /= PIO_DOUBLE .and. xtype /= PIO_REAL) then + if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE + return + end if + allocate(attValue(attlen)) + pio_ierr = PIO_get_att(handle % pio_file, varid, attName, attValue) + + end if if (pio_ierr /= PIO_noerr) then if (present(ierr)) ierr = MPAS_IO_ERR_PIO return @@ -3190,6 +3726,7 @@ subroutine MPAS_io_get_att_real1d(handle, attName, attValue, fieldname, ierr) new_att_node % atthandle % attType = MPAS_ATT_REALA allocate(new_att_node % atthandle % attValueRealA(attlen)) new_att_node % atthandle % attValueRealA = attValue + new_att_node % atthandle % precision = local_precision if (present(fieldname)) then if (.not. associated(field_cursor % fieldhandle % attlist_head)) then @@ -3234,7 +3771,12 @@ subroutine MPAS_io_get_att_text(handle, attName, attValue, fieldname, ierr) integer :: pio_ierr integer :: varid - integer :: xtype, len + integer :: xtype +#ifdef USE_PIO2 + integer (kind=MPAS_IO_OFFSET_KIND) :: len +#else + integer :: len +#endif type (fieldlist_type), pointer :: field_cursor type (attlist_type), pointer :: att_cursor, new_att_node @@ -3356,7 +3898,7 @@ subroutine MPAS_io_get_att_text(handle, attName, attValue, fieldname, ierr) end subroutine MPAS_io_get_att_text - subroutine MPAS_io_put_att_int0d(handle, attName, attValue, fieldname, ierr) + subroutine MPAS_io_put_att_int0d(handle, attName, attValue, fieldname, syncVal, ierr) implicit none @@ -3364,13 +3906,17 @@ subroutine MPAS_io_put_att_int0d(handle, attName, attValue, fieldname, ierr) character (len=*), intent(in) :: attName integer, intent(in) :: attValue character (len=*), intent(in), optional :: fieldname + logical, intent(in), optional :: syncVal integer, intent(out), optional :: ierr integer :: pio_ierr integer :: varid + integer :: attValueLocal type (fieldlist_type), pointer :: field_cursor type (attlist_type), pointer :: attlist_cursor, new_attlist_node + attValueLocal = attValue + ! write(stderrUnit,*) 'Called MPAS_io_put_att_int0d()' if (present(ierr)) ierr = MPAS_IO_NOERR @@ -3482,7 +4028,13 @@ subroutine MPAS_io_put_att_int0d(handle, attName, attValue, fieldname, ierr) end if end if - pio_ierr = PIO_put_att(handle % pio_file, varid, attName, attValue) + if ( present(syncVal) ) then + if ( syncVal ) then + call mpas_dmpar_bcast_int(handle % iocontext % dminfo, attValueLocal, IO_NODE) + end if + end if + + pio_ierr = PIO_put_att(handle % pio_file, varid, attName, attValueLocal) if (pio_ierr /= PIO_noerr) then if (present(ierr)) ierr = MPAS_IO_ERR_PIO return @@ -3493,7 +4045,7 @@ subroutine MPAS_io_put_att_int0d(handle, attName, attValue, fieldname, ierr) end subroutine MPAS_io_put_att_int0d - subroutine MPAS_io_put_att_int1d(handle, attName, attValue, fieldname, ierr) + subroutine MPAS_io_put_att_int1d(handle, attName, attValue, fieldname, syncVal, ierr) implicit none @@ -3501,13 +4053,18 @@ subroutine MPAS_io_put_att_int1d(handle, attName, attValue, fieldname, ierr) character (len=*), intent(in) :: attName integer, dimension(:), intent(in) :: attValue character (len=*), intent(in), optional :: fieldname + logical, intent(in), optional :: syncVal integer, intent(out), optional :: ierr integer :: pio_ierr integer :: varid + integer, dimension(:), pointer :: attValueLocal type (fieldlist_type), pointer :: field_cursor type (attlist_type), pointer :: attlist_cursor, new_attlist_node + allocate(attValueLocal(size(attValue, dim=1))) + attValueLocal(:) = attValue(:) + ! write(stderrUnit,*) 'Called MPAS_io_put_att_int1d()' if (present(ierr)) ierr = MPAS_IO_NOERR @@ -3628,18 +4185,26 @@ subroutine MPAS_io_put_att_int1d(handle, attName, attValue, fieldname, ierr) end if end if - pio_ierr = PIO_put_att(handle % pio_file, varid, attName, attValue) + if ( present(syncVal) ) then + if ( syncVal ) then + call mpas_dmpar_bcast_ints(handle % iocontext % dminfo, size(attValueLocal, dim=1), attValueLocal, IO_NODE) + end if + end if + + pio_ierr = PIO_put_att(handle % pio_file, varid, attName, attValueLocal) if (pio_ierr /= PIO_noerr) then if (present(ierr)) ierr = MPAS_IO_ERR_PIO return end if + deallocate(attValueLocal) + ! Maybe we should add attribute to list only after a successfull call to PIO? end subroutine MPAS_io_put_att_int1d - subroutine MPAS_io_put_att_real0d(handle, attName, attValue, fieldname, ierr) + subroutine MPAS_io_put_att_real0d(handle, attName, attValue, fieldname, syncVal, precision, ierr) implicit none @@ -3647,13 +4212,20 @@ subroutine MPAS_io_put_att_real0d(handle, attName, attValue, fieldname, ierr) character (len=*), intent(in) :: attName real (kind=RKIND), intent(in) :: attValue character (len=*), intent(in), optional :: fieldname + logical, intent(in), optional :: syncVal + integer, intent(in), optional :: precision integer, intent(out), optional :: ierr integer :: pio_ierr integer :: varid + real (kind=RKIND) :: attValueLocal + real (kind=R4KIND) :: singleVal + real (kind=R8KIND) :: doubleVal type (fieldlist_type), pointer :: field_cursor type (attlist_type), pointer :: attlist_cursor, new_attlist_node + attValueLocal = attValue + ! write(stderrUnit,*) 'Called MPAS_io_put_att_real0d()' if (present(ierr)) ierr = MPAS_IO_NOERR @@ -3679,6 +4251,11 @@ subroutine MPAS_io_put_att_real0d(handle, attName, attValue, fieldname, ierr) new_attlist_node % attHandle % attType = MPAS_ATT_REAL new_attlist_node % attHandle % attValueReal = attValue + if (present(precision)) then + new_attlist_node % attHandle % precision = precision + else + new_attlist_node % attHandle % precision = MPAS_IO_NATIVE_PRECISION + end if ! ! For variable attributes, find the structure for fieldname @@ -3765,7 +4342,24 @@ subroutine MPAS_io_put_att_real0d(handle, attName, attValue, fieldname, ierr) end if end if - pio_ierr = PIO_put_att(handle % pio_file, varid, attName, attValue) + if ( present(syncVal) ) then + if ( syncVal ) then + call mpas_dmpar_bcast_real(handle % iocontext % dminfo, attValueLocal, IO_NODE) + end if + end if + + if ((new_attlist_node % attHandle % precision == MPAS_IO_SINGLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_SINGLE_PRECISION)) then + singleVal = real(attValueLocal,R4KIND) + pio_ierr = PIO_put_att(handle % pio_file, varid, attName, singleVal) + else if ((new_attlist_node % attHandle % precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + doubleVal = real(attValueLocal,R8KIND) + pio_ierr = PIO_put_att(handle % pio_file, varid, attName, doubleVal) + else + pio_ierr = PIO_put_att(handle % pio_file, varid, attName, attValueLocal) + end if + if (pio_ierr /= PIO_noerr) then if (present(ierr)) ierr = MPAS_IO_ERR_PIO return @@ -3776,7 +4370,7 @@ subroutine MPAS_io_put_att_real0d(handle, attName, attValue, fieldname, ierr) end subroutine MPAS_io_put_att_real0d - subroutine MPAS_io_put_att_real1d(handle, attName, attValue, fieldname, ierr) + subroutine MPAS_io_put_att_real1d(handle, attName, attValue, fieldname, syncVal, precision, ierr) implicit none @@ -3784,13 +4378,22 @@ subroutine MPAS_io_put_att_real1d(handle, attName, attValue, fieldname, ierr) character (len=*), intent(in) :: attName real (kind=RKIND), dimension(:), intent(in) :: attValue character (len=*), intent(in), optional :: fieldname + logical, intent(in), optional :: syncVal + integer, intent(in), optional :: precision integer, intent(out), optional :: ierr integer :: pio_ierr integer :: varid + real (kind=RKIND), dimension(:), allocatable :: attValueLocal + real (kind=R4KIND), dimension(:), allocatable :: singleVal + real (kind=R8KIND), dimension(:), allocatable :: doubleVal type (fieldlist_type), pointer :: field_cursor type (attlist_type), pointer :: attlist_cursor, new_attlist_node + allocate(attValueLocal( size(attValue, dim=1) ) ) + + attValueLocal(:) = attValue(:) + ! write(stderrUnit,*) 'Called MPAS_io_put_att_real1d()' if (present(ierr)) ierr = MPAS_IO_NOERR @@ -3817,6 +4420,11 @@ subroutine MPAS_io_put_att_real1d(handle, attName, attValue, fieldname, ierr) allocate(new_attlist_node % attHandle % attValueRealA(size(attValue))) new_attlist_node % attHandle % attValueRealA = attValue + if (present(precision)) then + new_attlist_node % attHandle % precision = precision + else + new_attlist_node % attHandle % precision = MPAS_IO_NATIVE_PRECISION + end if ! ! For variable attributes, find the structure for fieldname @@ -3911,18 +4519,40 @@ subroutine MPAS_io_put_att_real1d(handle, attName, attValue, fieldname, ierr) end if end if - pio_ierr = PIO_put_att(handle % pio_file, varid, attName, attValue) + if ( present(syncVal) ) then + if ( syncVal ) then + call mpas_dmpar_bcast_reals(handle % iocontext % dminfo, size(attValueLocal, dim=1), attValueLocal, IO_NODE) + end if + end if + + if ((new_attlist_node % attHandle % precision == MPAS_IO_SINGLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_SINGLE_PRECISION)) then + allocate(singleVal(size(attValueLocal))) + singleVal(:) = real(attValueLocal(:),R4KIND) + pio_ierr = PIO_put_att(handle % pio_file, varid, attName, singleVal) + deallocate(singleVal) + else if ((new_attlist_node % attHandle % precision == MPAS_IO_DOUBLE_PRECISION) .and. & + (MPAS_IO_NATIVE_PRECISION /= MPAS_IO_DOUBLE_PRECISION)) then + allocate(doubleVal(size(attValueLocal))) + doubleVal(:) = real(attValueLocal(:),R8KIND) + pio_ierr = PIO_put_att(handle % pio_file, varid, attName, doubleVal) + deallocate(doubleVal) + else + pio_ierr = PIO_put_att(handle % pio_file, varid, attName, attValueLocal) + end if if (pio_ierr /= PIO_noerr) then if (present(ierr)) ierr = MPAS_IO_ERR_PIO return end if + deallocate(attValueLocal) + ! Maybe we should add attribute to list only after a successfull call to PIO? end subroutine MPAS_io_put_att_real1d - subroutine MPAS_io_put_att_text(handle, attName, attValue, fieldname, ierr) + subroutine MPAS_io_put_att_text(handle, attName, attValue, fieldname, syncVal, ierr) implicit none @@ -3930,13 +4560,26 @@ subroutine MPAS_io_put_att_text(handle, attName, attValue, fieldname, ierr) character (len=*), intent(in) :: attName character (len=*), intent(in) :: attValue character (len=*), intent(in), optional :: fieldname + logical, intent(in), optional :: syncVal integer, intent(out), optional :: ierr integer :: pio_ierr integer :: varid + integer :: valLen + character (len=StrKind) :: attValueLocal, trimmedVal type (fieldlist_type), pointer :: field_cursor type (attlist_type), pointer :: attlist_cursor, new_attlist_node + valLen = len_trim(attValue) + trimmedVal = trim(attValue) + + if ( valLen > StrKind ) then + write(stderrUnit, *) ' WARNING: Attribute ''' // trim(attName) // ''' has a value longer than StrKIND. It will be cut to a length of ', StrKIND + attValueLocal = trimmedVal(1:StrKIND) + else + attValueLocal = trimmedVal + end if + ! write(stderrUnit,*) 'Called MPAS_io_put_att_text()' if (present(ierr)) ierr = MPAS_IO_NOERR @@ -4048,7 +4691,13 @@ subroutine MPAS_io_put_att_text(handle, attName, attValue, fieldname, ierr) end if end if - pio_ierr = PIO_put_att(handle % pio_file, varid, attName, trim(attValue)) + if ( present(syncVal) ) then + if ( syncVal ) then + call mpas_dmpar_bcast_char(handle % iocontext % dminfo, attValueLocal, IO_NODE) + end if + end if + + pio_ierr = PIO_put_att(handle % pio_file, varid, attName, trim(attValueLocal)) if (pio_ierr /= PIO_noerr) then if (present(ierr)) ierr = MPAS_IO_ERR_PIO @@ -4065,7 +4714,7 @@ subroutine MPAS_io_put_att_text(handle, attName, attValue, fieldname, ierr) return end if - pio_ierr = PIO_put_att(handle % pio_file, varid, attName, trim(attValue)) + pio_ierr = PIO_put_att(handle % pio_file, varid, attName, trim(attValueLocal)) if (pio_ierr /= PIO_noerr) then return end if @@ -4211,11 +4860,12 @@ subroutine MPAS_io_close(handle, ierr) end subroutine MPAS_io_close - subroutine MPAS_io_finalize(io_system, ierr) + subroutine MPAS_io_finalize(ioContext, finalize_iosystem, ierr) implicit none - type (iosystem_desc_t), optional, pointer :: io_system + type (mpas_io_context_type), pointer :: ioContext + logical, optional :: finalize_iosystem integer, intent(out), optional :: ierr integer :: pio_ierr @@ -4224,7 +4874,7 @@ subroutine MPAS_io_finalize(io_system, ierr) ! write(stderrUnit,*) 'Called MPAS_io_finalize()' if (present(ierr)) ierr = MPAS_IO_NOERR - decomp_cursor => decomp_list + decomp_cursor => ioContext % decomp_list do while (associated(decomp_cursor)) decomp_del => decomp_cursor decomp_cursor => decomp_cursor % next @@ -4232,24 +4882,37 @@ subroutine MPAS_io_finalize(io_system, ierr) !if (.not. associated(decomp_del % decomphandle)) write(stderrUnit,*) 'OOPS... do not have decomphandle' deallocate(decomp_del % decomphandle % dims) deallocate(decomp_del % decomphandle % indices) - call PIO_freedecomp(pio_iosystem, decomp_del % decomphandle % pio_iodesc) + call PIO_freedecomp(ioContext % pio_iosystem, decomp_del % decomphandle % pio_iodesc) deallocate(decomp_del % decomphandle) deallocate(decomp_del) end do !write(stderrUnit,*) 'MGD PIO_finalize' - if (.not.present(io_system)) then - call PIO_finalize(pio_iosystem, pio_ierr) - if (pio_ierr /= PIO_noerr) then - if (present(ierr)) ierr = MPAS_IO_ERR_PIO - return - end if - deallocate(pio_iosystem) + if (present(finalize_iosystem)) then + if ( finalize_iosystem ) then + call PIO_finalize(ioContext % pio_iosystem, pio_ierr) + if (pio_ierr /= PIO_noerr) then + if (present(ierr)) ierr = MPAS_IO_ERR_PIO + return + end if + deallocate(ioContext % pio_iosystem) + end if end if end subroutine MPAS_io_finalize + type (dm_info) function MPAS_io_handle_dminfo(handle) + + implicit none + + type (MPAS_IO_Handle_type), intent(in) :: handle + + MPAS_io_handle_dminfo = handle % ioContext % dminfo + + end function MPAS_io_handle_dminfo + + subroutine MPAS_io_err_mesg(ierr, fatal) implicit none @@ -4304,7 +4967,7 @@ subroutine MPAS_io_err_mesg(ierr, fatal) write(stderrUnit,*) 'MPAS IO Error: Unrecognized error code...' end select - if (fatal .and. (ierr /= MPAS_IO_NOERR)) call mpas_dmpar_abort(local_dminfo) + if (fatal .and. (ierr /= MPAS_IO_NOERR)) call mpas_dmpar_global_abort('ERROR In MPAS_IO') end subroutine MPAS_io_err_mesg diff --git a/src/framework/mpas_io_streams.F b/src/framework/mpas_io_streams.F index d8f64a21a7..1634fbc26b 100644 --- a/src/framework/mpas_io_streams.F +++ b/src/framework/mpas_io_streams.F @@ -36,6 +36,7 @@ module mpas_io_streams module procedure MPAS_streamAddField_4dReal module procedure MPAS_streamAddField_5dReal module procedure MPAS_streamAddField_0dChar + module procedure MPAS_streamAddField_1dChar end interface MPAS_streamAddField interface MPAS_streamUpdateField @@ -50,6 +51,7 @@ module mpas_io_streams module procedure MPAS_streamUpdateField_4dReal module procedure MPAS_streamUpdateField_5dReal module procedure MPAS_streamUpdateField_0dChar + module procedure MPAS_streamUpdateField_1dChar end interface MPAS_streamUpdateField interface MPAS_readStreamAtt @@ -75,12 +77,13 @@ module mpas_io_streams contains - subroutine MPAS_createStream(stream, fileName, ioFormat, ioDirection, precision, & + subroutine MPAS_createStream(stream, ioContext, fileName, ioFormat, ioDirection, precision, & clobberRecords, clobberFiles, truncateFiles, ierr) implicit none type (MPAS_Stream_type), intent(out) :: stream + type (mpas_io_context_type), pointer :: ioContext character (len=*), intent(in) :: fileName integer, intent(in) :: ioFormat integer, intent(in) :: ioDirection @@ -95,7 +98,7 @@ subroutine MPAS_createStream(stream, fileName, ioFormat, ioDirection, precision, if (present(ierr)) ierr = MPAS_STREAM_NOERR - stream % fileHandle = MPAS_io_open(fileName, ioDirection, ioFormat, clobber_file=clobberFiles, truncate_file=truncateFiles, & + stream % fileHandle = MPAS_io_open(fileName, ioDirection, ioFormat, ioContext, clobber_file=clobberFiles, truncate_file=truncateFiles, & ierr=io_err) ! ! Catch a few special errors @@ -375,7 +378,7 @@ subroutine MPAS_streamAddField_0dInteger(stream, field, ierr) return end if - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attLists(1) % attList) ! @@ -420,6 +423,8 @@ subroutine MPAS_streamAddField_1dInteger(stream, field, ierr) type (mpas_pool_type), pointer :: meshPool integer, dimension(:), pointer :: indexArray integer, pointer :: indexDimension + character (len=StrKIND) :: elementName, elementNamePlural + logical :: meshFieldDim if (present(ierr)) ierr = MPAS_STREAM_NOERR @@ -478,10 +483,10 @@ subroutine MPAS_streamAddField_1dInteger(stream, field, ierr) if (field % isVarArray) then do i=1,size(field % constituentNames) - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attLists(i) % attList) end do else - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attLists(1) % attList) end if @@ -526,6 +531,8 @@ subroutine MPAS_streamAddField_2dInteger(stream, field, ierr) type (mpas_pool_type), pointer :: meshPool integer, dimension(:), pointer :: indexArray integer, pointer :: indexDimension + character (len=StrKIND) :: elementName, elementNamePlural + logical :: meshFieldDim if (present(ierr)) ierr = MPAS_STREAM_NOERR @@ -585,10 +592,10 @@ subroutine MPAS_streamAddField_2dInteger(stream, field, ierr) if (field % isVarArray) then do i=1,size(field % constituentNames) - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attLists(i) % attList) end do else - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attLists(1) % attList) end if @@ -633,6 +640,8 @@ subroutine MPAS_streamAddField_3dInteger(stream, field, ierr) type (mpas_pool_type), pointer :: meshPool integer, dimension(:), pointer :: indexArray integer, pointer :: indexDimension + character (len=StrKIND) :: elementName, elementNamePlural + logical :: meshFieldDim if (present(ierr)) ierr = MPAS_STREAM_NOERR @@ -691,10 +700,10 @@ subroutine MPAS_streamAddField_3dInteger(stream, field, ierr) if (field % isVarArray) then do i=1,size(field % constituentNames) - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attLists(i) % attList) end do else - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attLists(1) % attList) end if @@ -781,7 +790,7 @@ subroutine MPAS_streamAddField_0dReal(stream, field, precision, ierr) return end if - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attLists(1) % attList) ! @@ -828,6 +837,8 @@ subroutine MPAS_streamAddField_1dReal(stream, field, precision, ierr) type (mpas_pool_type), pointer :: meshPool integer, dimension(:), pointer :: indexArray integer, pointer :: indexDimension + character (len=StrKIND) :: elementName, elementNamePlural + logical :: meshFieldDim if (present(ierr)) ierr = MPAS_STREAM_NOERR @@ -894,10 +905,10 @@ subroutine MPAS_streamAddField_1dReal(stream, field, precision, ierr) if (field % isVarArray) then do i=1,size(field % constituentNames) - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attLists(i) % attList) end do else - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attLists(1) % attList) end if @@ -944,6 +955,8 @@ subroutine MPAS_streamAddField_2dReal(stream, field, precision, ierr) type (mpas_pool_type), pointer :: meshPool integer, dimension(:), pointer :: indexArray integer, pointer :: indexDimension + character (len=StrKIND) :: elementName, elementNamePlural + logical :: meshFieldDim if (present(ierr)) ierr = MPAS_STREAM_NOERR @@ -1010,10 +1023,10 @@ subroutine MPAS_streamAddField_2dReal(stream, field, precision, ierr) if (field % isVarArray) then do i=1,size(field % constituentNames) - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attLists(i) % attList) end do else - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attLists(1) % attList) end if @@ -1060,6 +1073,8 @@ subroutine MPAS_streamAddField_3dReal(stream, field, precision, ierr) type (mpas_pool_type), pointer :: meshPool integer, dimension(:), pointer :: indexArray integer, pointer :: indexDimension + character (len=StrKIND) :: elementName, elementNamePlural + logical :: meshFieldDim if (present(ierr)) ierr = MPAS_STREAM_NOERR @@ -1127,10 +1142,10 @@ subroutine MPAS_streamAddField_3dReal(stream, field, precision, ierr) if (field % isVarArray) then do i=1,size(field % constituentNames) - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attLists(i) % attList) end do else - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attLists(1) % attList) end if @@ -1178,6 +1193,8 @@ subroutine MPAS_streamAddField_4dReal(stream, field, precision, ierr) type (mpas_pool_type), pointer :: meshPool integer, dimension(:), pointer :: indexArray integer, pointer :: indexDimension + character (len=StrKIND) :: elementName, elementNamePlural + logical :: meshFieldDim if (present(ierr)) ierr = MPAS_STREAM_NOERR @@ -1245,10 +1262,10 @@ subroutine MPAS_streamAddField_4dReal(stream, field, precision, ierr) if (field % isVarArray) then do i=1,size(field % constituentNames) - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attLists(i) % attList) end do else - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attLists(1) % attList) end if @@ -1296,6 +1313,8 @@ subroutine MPAS_streamAddField_5dReal(stream, field, precision, ierr) type (mpas_pool_type), pointer :: meshPool integer, dimension(:), pointer :: indexArray integer, pointer :: indexDimension + character (len=StrKIND) :: elementName, elementNamePlural + logical :: meshFieldDim if (present(ierr)) ierr = MPAS_STREAM_NOERR @@ -1363,10 +1382,10 @@ subroutine MPAS_streamAddField_5dReal(stream, field, precision, ierr) if (field % isVarArray) then do i=1,size(field % constituentNames) - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attLists(i) % attList) end do else - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attLists(1) % attList) end if @@ -1457,10 +1476,10 @@ subroutine MPAS_streamAddField_0dChar(stream, field, ierr) if (field % isVarArray) then do i=1,size(field % constituentNames) - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attLists(i) % attList) end do else - call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attLists(1) % attList) end if @@ -1479,6 +1498,99 @@ subroutine MPAS_streamAddField_0dChar(stream, field, ierr) end subroutine MPAS_streamAddField_0dChar + subroutine MPAS_streamAddField_1dChar(stream, field, ierr) + + implicit none + + type (MPAS_Stream_type), intent(inout) :: stream + type (field1DChar), intent(in), target :: field + integer, intent(out), optional :: ierr + + integer :: io_err + integer :: i + integer :: idim + integer :: totalDimSize, globalDimSize + integer :: ndims + type (field1dChar), pointer :: field_ptr + character (len=StrKIND), dimension(2) :: dimNames + character (len=StrKIND), dimension(:), pointer :: dimNamesInq + integer, dimension(:), pointer :: dimSizes + integer, dimension(:), pointer :: indices + type (field_list_type), pointer :: field_list_cursor + type (field_list_type), pointer :: new_field_list_node + + if (present(ierr)) ierr = MPAS_STREAM_NOERR + + ! + ! Sanity checks + ! + if (.not. stream % isInitialized) then + if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED + return + end if + +!write(stderrUnit,*) '... Adding field '//trim(field % fieldName)//' to stream' + + ndims = 2 + +!write(stderrUnit,*) '... field has ', ndims, ' dimensions' + + ! + ! Determine whether the field is decomposed, the indices that are owned by this task's blocks, + ! and the total number of outer-indices owned by this task + ! + idim = ndims + allocate(indices(1)) + allocate(dimSizes(2)) + dimSizes(1) = 64 + dimNames(1) = 'StrLen' + dimSizes(2) = field % dimSizes(1) + dimNames(2) = field % dimNames(1) + globalDimSize = dimSizes(2) + totalDimSize = dimSizes(2) + + if (field % isVarArray) then + do i=1,size(field % constituentNames) + call MPAS_streamAddField_generic(stream, trim(field % constituentNames(i)), MPAS_IO_CHAR, dimNames, & + dimSizes, field % hasTimeDimension, field % isDecomposed, totalDimSize, globalDimSize, & + indices, ierr=io_err) + end do + else + call MPAS_streamAddField_generic(stream, trim(field % fieldName), MPAS_IO_CHAR, dimNames, dimSizes, & + field % hasTimeDimension, field % isDecomposed, totalDimSize, globalDimSize, indices, ierr=io_err) + end if + + deallocate(indices) + deallocate(dimSizes) + if (io_err /= MPAS_STREAM_NOERR) then + if (present(ierr)) ierr = MPAS_IO_ERR + return + end if + + if (field % isVarArray) then + do i=1,size(field % constituentNames) + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attLists(i) % attList) + end do + else + call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attLists(1) % attList) + end if + + + ! + ! Set field pointer and type in fieldList + ! + new_field_list_node => stream % fieldList + do while (associated(new_field_list_node % next)) + new_field_list_node => new_field_list_node % next + end do + new_field_list_node % field_type = FIELD_1D_CHAR + new_field_list_node % char1dField => field + +!write(stderrUnit,*) '... done adding field' + + end subroutine MPAS_streamAddField_1dChar + + subroutine MPAS_streamAddField_generic(stream, fieldName, fieldType, dimNames, dimSizes, hasTimeDimension, isDecomposed, & totalDimSize, globalDimSize, indices, precision, ierr) @@ -2206,6 +2318,50 @@ subroutine MPAS_streamUpdateField_0dChar(stream, field, ierr) end subroutine MPAS_streamUpdateField_0dChar + subroutine MPAS_streamUpdateField_1dChar(stream, field, ierr) + + implicit none + + type (MPAS_Stream_type), intent(inout) :: stream + type (field1DChar), intent(in), target :: field + integer, intent(out), optional :: ierr + + type (field_list_type), pointer :: field_cursor + + + if (present(ierr)) ierr = MPAS_STREAM_NOERR + + ! + ! Sanity checks + ! + if (.not. stream % isInitialized) then + if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED + return + end if + + STREAM_DEBUG_WRITE( '... Updating 1d char field '//trim(field % fieldName)//' in stream' ) + + field_cursor => stream % fieldList + do while (associated(field_cursor)) + if (field_cursor % field_type == FIELD_1D_CHAR) then + if (field_cursor % char1dField % fieldname == field % fieldname) then + STREAM_DEBUG_WRITE( '... found 1d char named '//trim(field_cursor % char0dField % fieldname) ) + field_cursor % char1dField => field + exit + end if + end if + field_cursor => field_cursor % next + end do + if (.not. associated(field_cursor)) then + STREAM_DEBUG_WRITE( '... 1d char field '//trim(field % fieldname)//' not found in stream' ) + if (present(ierr)) ierr = MPAS_STREAM_FIELD_NOT_FOUND + end if + + STREAM_DEBUG_WRITE( '... done updating field' ) + + end subroutine MPAS_streamUpdateField_1dChar + + subroutine MPAS_readStream(stream, frame, ierr) implicit none @@ -2231,6 +2387,7 @@ subroutine MPAS_readStream(stream, frame, ierr) type (field0dChar), pointer :: field_0dchar_ptr type (field1dChar), pointer :: field_1dchar_ptr type (field_list_type), pointer :: field_cursor + character (len=StrKIND), dimension(:), pointer :: char1d_temp integer :: int0d_temp integer, dimension(:), pointer :: int1d_temp integer, dimension(:,:), pointer :: int2d_temp @@ -2286,7 +2443,7 @@ subroutine MPAS_readStream(stream, frame, ierr) !write(stderrUnit,*) 'Distributing and Copying field to other blocks' - call mpas_dmpar_bcast_int(field_cursor % int0dField % block % domain % dminfo, int0d_temp) + call mpas_dmpar_bcast_int( mpas_io_handle_dminfo(stream % fileHandle), int0d_temp) field_0dint_ptr => field_cursor % int0dField do while (associated(field_0dint_ptr)) field_0dint_ptr % scalar = int0d_temp @@ -2346,14 +2503,14 @@ subroutine MPAS_readStream(stream, frame, ierr) else if (field_cursor % int1dField % isVarArray) then - call mpas_dmpar_bcast_int(field_cursor % int1dField % block % domain % dminfo, int0d_temp) + call mpas_dmpar_bcast_int( mpas_io_handle_dminfo(stream % fileHandle), int0d_temp) field_1dint_ptr => field_cursor % int1dField do while (associated(field_1dint_ptr)) field_1dint_ptr % array(j) = int0d_temp field_1dint_ptr => field_1dint_ptr % next end do else - call mpas_dmpar_bcast_ints(field_cursor % int1dField % block % domain % dminfo, size(int1d_temp), int1d_temp(:)) + call mpas_dmpar_bcast_ints( mpas_io_handle_dminfo(stream % fileHandle), size(int1d_temp), int1d_temp(:)) field_1dint_ptr => field_cursor % int1dField do while (associated(field_1dint_ptr)) field_1dint_ptr % array(:) = int1d_temp(:) @@ -2424,14 +2581,14 @@ subroutine MPAS_readStream(stream, frame, ierr) else if (field_cursor % int2dField % isVarArray) then - call mpas_dmpar_bcast_ints(field_cursor % int2dField % block % domain % dminfo, size(int1d_temp), int1d_temp(:)) + call mpas_dmpar_bcast_ints( mpas_io_handle_dminfo(stream % fileHandle), size(int1d_temp), int1d_temp(:)) field_2dint_ptr => field_cursor % int2dField do while (associated(field_2dint_ptr)) field_2dint_ptr % array(j,:) = int1d_temp(:) field_2dint_ptr => field_2dint_ptr % next end do else - call mpas_dmpar_bcast_ints(field_cursor % int2dField % block % domain % dminfo, size(int2d_temp), int2d_temp(:,1)) + call mpas_dmpar_bcast_ints( mpas_io_handle_dminfo(stream % fileHandle), size(int2d_temp), int2d_temp(:,1)) field_2dint_ptr => field_cursor % int2dField do while (associated(field_2dint_ptr)) field_2dint_ptr % array(:,:) = int2d_temp(:,:) @@ -2506,14 +2663,14 @@ subroutine MPAS_readStream(stream, frame, ierr) else if (field_cursor % int3dField % isVarArray) then - call mpas_dmpar_bcast_ints(field_cursor % int3dField % block % domain % dminfo, size(int2d_temp), int2d_temp(:,1)) + call mpas_dmpar_bcast_ints( mpas_io_handle_dminfo(stream % fileHandle), size(int2d_temp), int2d_temp(:,1)) field_3dint_ptr => field_cursor % int3dField do while (associated(field_3dint_ptr)) field_3dint_ptr % array(j,:,:) = int2d_temp(:,:) field_3dint_ptr => field_3dint_ptr % next end do else - call mpas_dmpar_bcast_ints(field_cursor % int3dField % block % domain % dminfo, size(int3d_temp), int3d_temp(:,1,1)) + call mpas_dmpar_bcast_ints( mpas_io_handle_dminfo(stream % fileHandle), size(int3d_temp), int3d_temp(:,1,1)) field_3dint_ptr => field_cursor % int3dField do while (associated(field_3dint_ptr)) field_3dint_ptr % array(:,:,:) = int3d_temp(:,:,:) @@ -2546,7 +2703,7 @@ subroutine MPAS_readStream(stream, frame, ierr) !write(stderrUnit,*) 'Distributing and Copying field to other blocks' - call mpas_dmpar_bcast_real(field_cursor % real0dField % block % domain % dminfo, real0d_temp) + call mpas_dmpar_bcast_real( mpas_io_handle_dminfo(stream % fileHandle), real0d_temp) field_0dreal_ptr => field_cursor % real0dField do while (associated(field_0dreal_ptr)) field_0dreal_ptr % scalar = real0d_temp @@ -2607,14 +2764,14 @@ subroutine MPAS_readStream(stream, frame, ierr) else if (field_cursor % real1dField % isVarArray) then - call mpas_dmpar_bcast_real(field_cursor % real1dField % block % domain % dminfo, real0d_temp) + call mpas_dmpar_bcast_real( mpas_io_handle_dminfo(stream % fileHandle), real0d_temp) field_1dreal_ptr => field_cursor % real1dField do while (associated(field_1dreal_ptr)) field_1dreal_ptr % array(j) = real0d_temp field_1dreal_ptr => field_1dreal_ptr % next end do else - call mpas_dmpar_bcast_reals(field_cursor % real1dField % block % domain % dminfo, size(real1d_temp), real1d_temp(:)) + call mpas_dmpar_bcast_reals( mpas_io_handle_dminfo(stream % fileHandle), size(real1d_temp), real1d_temp(:)) field_1dreal_ptr => field_cursor % real1dField do while (associated(field_1dreal_ptr)) field_1dreal_ptr % array(:) = real1d_temp(:) @@ -2685,14 +2842,14 @@ subroutine MPAS_readStream(stream, frame, ierr) else if (field_cursor % real2dField % isVarArray) then - call mpas_dmpar_bcast_reals(field_cursor % real2dField % block % domain % dminfo, size(real1d_temp), real1d_temp(:)) + call mpas_dmpar_bcast_reals( mpas_io_handle_dminfo(stream % fileHandle), size(real1d_temp), real1d_temp(:)) field_2dreal_ptr => field_cursor % real2dField do while (associated(field_2dreal_ptr)) field_2dreal_ptr % array(j,:) = real1d_temp(:) field_2dreal_ptr => field_2dreal_ptr % next end do else - call mpas_dmpar_bcast_reals(field_cursor % real2dField % block % domain % dminfo, size(real2d_temp), real2d_temp(:,1)) + call mpas_dmpar_bcast_reals( mpas_io_handle_dminfo(stream % fileHandle), size(real2d_temp), real2d_temp(:,1)) field_2dreal_ptr => field_cursor % real2dField do while (associated(field_2dreal_ptr)) field_2dreal_ptr % array(:,:) = real2d_temp(:,:) @@ -2771,14 +2928,14 @@ subroutine MPAS_readStream(stream, frame, ierr) else if (field_cursor % real3dField % isVarArray) then - call mpas_dmpar_bcast_reals(field_cursor % real3dField % block % domain % dminfo, size(real2d_temp), real2d_temp(:,1)) + call mpas_dmpar_bcast_reals( mpas_io_handle_dminfo(stream % fileHandle), size(real2d_temp), real2d_temp(:,1)) field_3dreal_ptr => field_cursor % real3dField do while (associated(field_3dreal_ptr)) field_3dreal_ptr % array(j,:,:) = real2d_temp(:,:) field_3dreal_ptr => field_3dreal_ptr % next end do else - call mpas_dmpar_bcast_reals(field_cursor % real3dField % block % domain % dminfo, size(real3d_temp), real3d_temp(:,1,1)) + call mpas_dmpar_bcast_reals( mpas_io_handle_dminfo(stream % fileHandle), size(real3d_temp), real3d_temp(:,1,1)) field_3dreal_ptr => field_cursor % real3dField do while (associated(field_3dreal_ptr)) field_3dreal_ptr % array(:,:,:) = real3d_temp(:,:,:) @@ -2858,14 +3015,14 @@ subroutine MPAS_readStream(stream, frame, ierr) else if (field_cursor % real3dField % isVarArray) then - call mpas_dmpar_bcast_reals(field_cursor % real4dField % block % domain % dminfo, size(real3d_temp), real3d_temp(:,1,1)) + call mpas_dmpar_bcast_reals( mpas_io_handle_dminfo(stream % fileHandle), size(real3d_temp), real3d_temp(:,1,1)) field_4dreal_ptr => field_cursor % real4dField do while (associated(field_4dreal_ptr)) field_4dreal_ptr % array(j,:,:,:) = real3d_temp(:,:,:) field_4dreal_ptr => field_4dreal_ptr % next end do else - call mpas_dmpar_bcast_reals(field_cursor % real4dField % block % domain % dminfo, size(real4d_temp), real4d_temp(:,1,1,1)) + call mpas_dmpar_bcast_reals( mpas_io_handle_dminfo(stream % fileHandle), size(real4d_temp), real4d_temp(:,1,1,1)) field_4dreal_ptr => field_cursor % real4dField do while (associated(field_4dreal_ptr)) field_4dreal_ptr % array(:,:,:,:) = real4d_temp(:,:,:,:) @@ -2948,14 +3105,14 @@ subroutine MPAS_readStream(stream, frame, ierr) else if (field_cursor % real5dField % isVarArray) then - call mpas_dmpar_bcast_reals(field_cursor % real5dField % block % domain % dminfo, size(real4d_temp), real4d_temp(:,1,1,1)) + call mpas_dmpar_bcast_reals( mpas_io_handle_dminfo(stream % fileHandle), size(real4d_temp), real4d_temp(:,1,1,1)) field_5dreal_ptr => field_cursor % real5dField do while (associated(field_5dreal_ptr)) field_5dreal_ptr % array(j,:,:,:,:) = real4d_temp(:,:,:,:) field_5dreal_ptr => field_5dreal_ptr % next end do else - call mpas_dmpar_bcast_reals(field_cursor % real5dField % block % domain % dminfo, size(real5d_temp), real5d_temp(:,1,1,1,1)) + call mpas_dmpar_bcast_reals( mpas_io_handle_dminfo(stream % fileHandle), size(real5d_temp), real5d_temp(:,1,1,1,1)) field_5dreal_ptr => field_cursor % real5dField do while (associated(field_5dreal_ptr)) field_5dreal_ptr % array(:,:,:,:,:) = real5d_temp(:,:,:,:,:) @@ -2988,7 +3145,7 @@ subroutine MPAS_readStream(stream, frame, ierr) !write(stderrUnit,*) 'Distributing and Copying field to other blocks' - call mpas_dmpar_bcast_char(field_cursor % char0dField % block % domain % dminfo, field_cursor % char0dField % scalar) + call mpas_dmpar_bcast_char( mpas_io_handle_dminfo(stream % fileHandle), field_cursor % char0dField % scalar) field_0dchar_ptr => field_cursor % char0dField do while (associated(field_0dchar_ptr)) field_0dchar_ptr % scalar = field_cursor % char0dField % scalar @@ -2996,6 +3153,31 @@ subroutine MPAS_readStream(stream, frame, ierr) end do else if (field_cursor % field_type == FIELD_1D_CHAR) then +!write(stderrUnit,*) 'Reading in field '//trim(field_cursor % char1dField % fieldName) +!write(stderrUnit,*) ' > is the field decomposed? ', field_cursor % isDecomposed +!write(stderrUnit,*) ' > outer dimension size ', field_cursor % totalDimSize + +!write(stderrUnit,*) 'MGD calling MPAS_io_get_var now...' + allocate(char1d_temp(field_cursor % char1dField % dimSizes(1))) + call MPAS_io_get_var(stream % fileHandle, field_cursor % char1dField % fieldName, char1d_temp, io_err) + call MPAS_io_err_mesg(io_err, .false.) + if (io_err /= MPAS_IO_NOERR) then + if (present(ierr)) ierr = MPAS_IO_ERR + deallocate(char1d_temp) + return + end if + +!write(stderrUnit,*) 'Distributing and Copying field to other blocks' + + call mpas_dmpar_bcast_chars( mpas_io_handle_dminfo(stream % fileHandle), & + field_cursor % char1dField % dimSizes(1), char1d_temp) + field_1dchar_ptr => field_cursor % char1dField + do while (associated(field_1dchar_ptr)) + field_1dchar_ptr % array(:)(:) = char1d_temp(:)(:) + field_1dchar_ptr => field_1dchar_ptr % next + end do + + deallocate(char1d_temp) end if field_cursor => field_cursor % next end do @@ -3012,6 +3194,7 @@ subroutine MPAS_writeStream(stream, frame, ierr) integer, intent(in) :: frame integer, intent(out), optional :: ierr + character (len=StrKIND) :: dimensionSuffix integer :: io_err integer :: i, j integer :: ncons @@ -3073,6 +3256,12 @@ subroutine MPAS_writeStream(stream, frame, ierr) end if end if + if ( stream % blockWrite ) then + dimensionSuffix = '' + else + dimensionSuffix = 'Solve' + end if + ! ! Loop over fields in the stream ! @@ -3109,11 +3298,14 @@ subroutine MPAS_writeStream(stream, frame, ierr) i = 1 do while (associated(field_1dint_ptr)) if (trim(field_1dint_ptr % dimNames(1)) == 'nCells') then - call mpas_pool_get_dimension(field_1dint_ptr % block % dimensions, 'nCellsSolve', ownedSize) + call mpas_pool_get_dimension(field_1dint_ptr % block % dimensions, trim(field_1dint_ptr % dimNames(1)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_1dint_ptr % dimNames(1)) == 'nEdges') then - call mpas_pool_get_dimension(field_1dint_ptr % block % dimensions, 'nEdgesSolve', ownedSize) + call mpas_pool_get_dimension(field_1dint_ptr % block % dimensions, trim(field_1dint_ptr % dimNames(1)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_1dint_ptr % dimNames(1)) == 'nVertices') then - call mpas_pool_get_dimension(field_1dint_ptr % block % dimensions, 'nVerticesSolve', ownedSize) + call mpas_pool_get_dimension(field_1dint_ptr % block % dimensions, trim(field_1dint_ptr % dimNames(1)) & + // trim(dimensionSuffix), ownedSize) else call mpas_pool_get_dimension(field_1dint_ptr % block % dimensions, field_1dint_ptr % dimNames(1), ownedSize) end if @@ -3125,7 +3317,11 @@ subroutine MPAS_writeStream(stream, frame, ierr) int1d_temp(i:i+ownedSize-1) = field_1dint_ptr % array(1:ownedSize) end if i = i + ownedSize - field_1dint_ptr => field_1dint_ptr % next + if ( .not. stream % blockWrite ) then + field_1dint_ptr => field_1dint_ptr % next + else + nullify(field_1dint_ptr) + end if end do else if (field_cursor % int1dField % isVarArray) then @@ -3166,11 +3362,14 @@ subroutine MPAS_writeStream(stream, frame, ierr) i = 1 do while (associated(field_2dint_ptr)) if (trim(field_2dint_ptr % dimNames(2)) == 'nCells') then - call mpas_pool_get_dimension(field_2dint_ptr % block % dimensions, 'nCellsSolve', ownedSize) + call mpas_pool_get_dimension(field_2dint_ptr % block % dimensions, trim(field_2dint_ptr % dimNames(2)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_2dint_ptr % dimNames(2)) == 'nEdges') then - call mpas_pool_get_dimension(field_2dint_ptr % block % dimensions, 'nEdgesSolve', ownedSize) + call mpas_pool_get_dimension(field_2dint_ptr % block % dimensions, trim(field_2dint_ptr % dimNames(2)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_2dint_ptr % dimNames(2)) == 'nVertices') then - call mpas_pool_get_dimension(field_2dint_ptr % block % dimensions, 'nVerticesSolve', ownedSize) + call mpas_pool_get_dimension(field_2dint_ptr % block % dimensions, trim(field_2dint_ptr % dimNames(2)) & + // trim(dimensionSuffix), ownedSize) else call mpas_pool_get_dimension(field_2dint_ptr % block % dimensions, field_2dint_ptr % dimNames(2), ownedSize) end if @@ -3181,7 +3380,11 @@ subroutine MPAS_writeStream(stream, frame, ierr) int2d_temp(:,i:i+ownedSize-1) = field_2dint_ptr % array(:,1:ownedSize) end if i = i + ownedSize - field_2dint_ptr => field_2dint_ptr % next + if ( .not. stream % blockWrite ) then + field_2dint_ptr => field_2dint_ptr % next + else + nullify(field_2dint_ptr) + end if end do else if (field_cursor % int2dField % isVarArray) then @@ -3226,11 +3429,14 @@ subroutine MPAS_writeStream(stream, frame, ierr) i = 1 do while (associated(field_3dint_ptr)) if (trim(field_3dint_ptr % dimNames(3)) == 'nCells') then - call mpas_pool_get_dimension(field_3dint_ptr % block % dimensions, 'nCellsSolve', ownedSize) + call mpas_pool_get_dimension(field_3dint_ptr % block % dimensions, trim(field_3dint_ptr % dimNames(3)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_3dint_ptr % dimNames(3)) == 'nEdges') then - call mpas_pool_get_dimension(field_3dint_ptr % block % dimensions, 'nEdgesSolve', ownedSize) + call mpas_pool_get_dimension(field_3dint_ptr % block % dimensions, trim(field_3dint_ptr % dimNames(3)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_3dint_ptr % dimNames(3)) == 'nVertices') then - call mpas_pool_get_dimension(field_3dint_ptr % block % dimensions, 'nVerticesSolve', ownedSize) + call mpas_pool_get_dimension(field_3dint_ptr % block % dimensions, trim(field_3dint_ptr % dimNames(3)) & + // trim(dimensionSuffix), ownedSize) else call mpas_pool_get_dimension(field_3dint_ptr % block % dimensions, field_3dint_ptr % dimNames(3), ownedSize) end if @@ -3241,7 +3447,11 @@ subroutine MPAS_writeStream(stream, frame, ierr) int3d_temp(:,:,i:i+ownedSize-1) = field_3dint_ptr % array(:,:,1:ownedSize) end if i = i + ownedSize - field_3dint_ptr => field_3dint_ptr % next + if ( .not. stream % blockWrite ) then + field_3dint_ptr => field_3dint_ptr % next + else + nullify(field_3dint_ptr) + end if end do else if (field_cursor % int3dField % isVarArray) then @@ -3296,11 +3506,14 @@ subroutine MPAS_writeStream(stream, frame, ierr) i = 1 do while (associated(field_1dreal_ptr)) if (trim(field_1dreal_ptr % dimNames(1)) == 'nCells') then - call mpas_pool_get_dimension(field_1dreal_ptr % block % dimensions, 'nCellsSolve', ownedSize) + call mpas_pool_get_dimension(field_1dreal_ptr % block % dimensions, trim(field_1dreal_ptr % dimNames(1)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_1dreal_ptr % dimNames(1)) == 'nEdges') then - call mpas_pool_get_dimension(field_1dreal_ptr % block % dimensions, 'nEdgesSolve', ownedSize) + call mpas_pool_get_dimension(field_1dreal_ptr % block % dimensions, trim(field_1dreal_ptr % dimNames(1)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_1dreal_ptr % dimNames(1)) == 'nVertices') then - call mpas_pool_get_dimension(field_1dreal_ptr % block % dimensions, 'nVerticesSolve', ownedSize) + call mpas_pool_get_dimension(field_1dreal_ptr % block % dimensions, trim(field_1dreal_ptr % dimNames(1)) & + // trim(dimensionSuffix), ownedSize) else call mpas_pool_get_dimension(field_1dreal_ptr % block % dimensions, field_1dreal_ptr % dimNames(1), ownedSize) end if @@ -3312,7 +3525,11 @@ subroutine MPAS_writeStream(stream, frame, ierr) real1d_temp(i:i+ownedSize-1) = field_1dreal_ptr % array(1:ownedSize) end if i = i + ownedSize - field_1dreal_ptr => field_1dreal_ptr % next + if ( .not. stream % blockWrite ) then + field_1dreal_ptr => field_1dreal_ptr % next + else + nullify(field_1dreal_ptr) + end if end do else if (field_cursor % real1dField % isVarArray) then @@ -3353,11 +3570,14 @@ subroutine MPAS_writeStream(stream, frame, ierr) i = 1 do while (associated(field_2dreal_ptr)) if (trim(field_2dreal_ptr % dimNames(2)) == 'nCells') then - call mpas_pool_get_dimension(field_2dreal_ptr % block % dimensions, 'nCellsSolve', ownedSize) + call mpas_pool_get_dimension(field_2dreal_ptr % block % dimensions, trim(field_2dreal_ptr % dimNames(2)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_2dreal_ptr % dimNames(2)) == 'nEdges') then - call mpas_pool_get_dimension(field_2dreal_ptr % block % dimensions, 'nEdgesSolve', ownedSize) + call mpas_pool_get_dimension(field_2dreal_ptr % block % dimensions, trim(field_2dreal_ptr % dimNames(2)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_2dreal_ptr % dimNames(2)) == 'nVertices') then - call mpas_pool_get_dimension(field_2dreal_ptr % block % dimensions, 'nVerticesSolve', ownedSize) + call mpas_pool_get_dimension(field_2dreal_ptr % block % dimensions, trim(field_2dreal_ptr % dimNames(2)) & + // trim(dimensionSuffix), ownedSize) else call mpas_pool_get_dimension(field_2dreal_ptr % block % dimensions, field_2dreal_ptr % dimNames(2), ownedSize) end if @@ -3368,7 +3588,11 @@ subroutine MPAS_writeStream(stream, frame, ierr) real2d_temp(:,i:i+ownedSize-1) = field_2dreal_ptr % array(:,1:ownedSize) end if i = i + ownedSize - field_2dreal_ptr => field_2dreal_ptr % next + if ( .not. stream % blockWrite ) then + field_2dreal_ptr => field_2dreal_ptr % next + else + nullify(field_2dreal_ptr) + end if end do else if (field_cursor % real2dField % isVarArray) then @@ -3413,11 +3637,14 @@ subroutine MPAS_writeStream(stream, frame, ierr) i = 1 do while (associated(field_3dreal_ptr)) if (trim(field_3dreal_ptr % dimNames(3)) == 'nCells') then - call mpas_pool_get_dimension(field_3dreal_ptr % block % dimensions, 'nCellsSolve', ownedSize) + call mpas_pool_get_dimension(field_3dreal_ptr % block % dimensions, trim(field_3dreal_ptr % dimNames(3)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_3dreal_ptr % dimNames(3)) == 'nEdges') then - call mpas_pool_get_dimension(field_3dreal_ptr % block % dimensions, 'nEdgesSolve', ownedSize) + call mpas_pool_get_dimension(field_3dreal_ptr % block % dimensions, trim(field_3dreal_ptr % dimNames(3)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_3dreal_ptr % dimNames(3)) == 'nVertices') then - call mpas_pool_get_dimension(field_3dreal_ptr % block % dimensions, 'nVerticesSolve', ownedSize) + call mpas_pool_get_dimension(field_3dreal_ptr % block % dimensions, trim(field_3dreal_ptr % dimNames(3)) & + // trim(dimensionSuffix), ownedSize) else call mpas_pool_get_dimension(field_3dreal_ptr % block % dimensions, field_3dreal_ptr % dimNames(3), ownedSize) end if @@ -3428,7 +3655,11 @@ subroutine MPAS_writeStream(stream, frame, ierr) real3d_temp(:,:,i:i+ownedSize-1) = field_3dreal_ptr % array(:,:,1:ownedSize) end if i = i + ownedSize - field_3dreal_ptr => field_3dreal_ptr % next + if ( .not. stream % blockWrite ) then + field_3dreal_ptr => field_3dreal_ptr % next + else + nullify(field_3dreal_ptr) + end if end do else if (field_cursor % real3dField % isVarArray) then @@ -3475,11 +3706,14 @@ subroutine MPAS_writeStream(stream, frame, ierr) i = 1 do while (associated(field_4dreal_ptr)) if (trim(field_4dreal_ptr % dimNames(4)) == 'nCells') then - call mpas_pool_get_dimension(field_4dreal_ptr % block % dimensions, 'nCellsSolve', ownedSize) + call mpas_pool_get_dimension(field_4dreal_ptr % block % dimensions, trim(field_4dreal_ptr % dimNames(4)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_4dreal_ptr % dimNames(4)) == 'nEdges') then - call mpas_pool_get_dimension(field_4dreal_ptr % block % dimensions, 'nEdgesSolve', ownedSize) + call mpas_pool_get_dimension(field_4dreal_ptr % block % dimensions, trim(field_4dreal_ptr % dimNames(4)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_4dreal_ptr % dimNames(4)) == 'nVertices') then - call mpas_pool_get_dimension(field_4dreal_ptr % block % dimensions, 'nVerticesSolve', ownedSize) + call mpas_pool_get_dimension(field_4dreal_ptr % block % dimensions, trim(field_4dreal_ptr % dimNames(4)) & + // trim(dimensionSuffix), ownedSize) else call mpas_pool_get_dimension(field_4dreal_ptr % block % dimensions, field_4dreal_ptr % dimNames(4), ownedSize) end if @@ -3490,7 +3724,11 @@ subroutine MPAS_writeStream(stream, frame, ierr) real4d_temp(:,:,:,i:i+ownedSize-1) = field_4dreal_ptr % array(:,:,:,1:ownedSize) end if i = i + ownedSize - field_4dreal_ptr => field_4dreal_ptr % next + if ( .not. stream % blockWrite ) then + field_4dreal_ptr => field_4dreal_ptr % next + else + nullify(field_4dreal_ptr) + end if end do else if (field_cursor % real4dField % isVarArray) then @@ -3539,11 +3777,14 @@ subroutine MPAS_writeStream(stream, frame, ierr) i = 1 do while (associated(field_5dreal_ptr)) if (trim(field_5dreal_ptr % dimNames(5)) == 'nCells') then - call mpas_pool_get_dimension(field_5dreal_ptr % block % dimensions, 'nCellsSolve', ownedSize) + call mpas_pool_get_dimension(field_5dreal_ptr % block % dimensions, trim(field_5dreal_ptr % dimNames(5)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_5dreal_ptr % dimNames(5)) == 'nEdges') then - call mpas_pool_get_dimension(field_5dreal_ptr % block % dimensions, 'nEdgesSolve', ownedSize) + call mpas_pool_get_dimension(field_5dreal_ptr % block % dimensions, trim(field_5dreal_ptr % dimNames(5)) & + // trim(dimensionSuffix), ownedSize) else if (trim(field_5dreal_ptr % dimNames(5)) == 'nVertices') then - call mpas_pool_get_dimension(field_5dreal_ptr % block % dimensions, 'nVerticesSolve', ownedSize) + call mpas_pool_get_dimension(field_5dreal_ptr % block % dimensions, trim(field_5dreal_ptr % dimNames(5)) & + // trim(dimensionSuffix), ownedSize) else call mpas_pool_get_dimension(field_5dreal_ptr % block % dimensions, field_5dreal_ptr % dimNames(5), ownedSize) end if @@ -3554,7 +3795,11 @@ subroutine MPAS_writeStream(stream, frame, ierr) real5d_temp(:,:,:,:,i:i+ownedSize-1) = field_5dreal_ptr % array(:,:,:,:,1:ownedSize) end if i = i + ownedSize - field_5dreal_ptr => field_5dreal_ptr % next + if ( .not. stream % blockWrite ) then + field_5dreal_ptr => field_5dreal_ptr % next + else + nullify(field_5dreal_ptr) + end if end do else if (field_cursor % real5dField % isVarArray) then @@ -3592,6 +3837,18 @@ subroutine MPAS_writeStream(stream, frame, ierr) if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR else if (field_cursor % field_type == FIELD_1D_CHAR) then + +!write(stderrUnit,*) 'Writing out field '//trim(field_cursor % char1dField % fieldName) +!write(stderrUnit,*) ' > is the field decomposed? ', field_cursor % isDecomposed +!write(stderrUnit,*) ' > outer dimension size ', field_cursor % totalDimSize + +!write(stderrUnit,*) 'Copying field from first block' +!write(stderrUnit,*) 'MGD calling MPAS_io_put_var now...' + call MPAS_io_put_var(stream % fileHandle, field_cursor % char1dField % fieldName, field_cursor % char1dField % array, io_err) + call MPAS_io_err_mesg(io_err, .false.) + if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR + + end if field_cursor => field_cursor % next end do @@ -3661,16 +3918,18 @@ subroutine MPAS_readStreamAtt_1dInteger(stream, attName, attValue, ierr) end subroutine MPAS_readStreamAtt_1dInteger - subroutine MPAS_readStreamAtt_0dReal(stream, attName, attValue, ierr) + subroutine MPAS_readStreamAtt_0dReal(stream, attName, attValue, precision, ierr) implicit none type (MPAS_Stream_type), intent(inout) :: stream character (len=*), intent(in) :: attName real (kind=RKIND), intent(out) :: attValue + integer, intent(in), optional :: precision integer, intent(out), optional :: ierr integer :: io_err + integer :: local_precision if (present(ierr)) ierr = MPAS_STREAM_NOERR @@ -3682,23 +3941,31 @@ subroutine MPAS_readStreamAtt_0dReal(stream, attName, attValue, ierr) return end if - call MPAS_io_get_att(stream % fileHandle, attName, attValue, ierr=io_err) + if (present(precision)) then + local_precision = precision + else + local_precision = stream % defaultPrecision + end if + + call MPAS_io_get_att(stream % fileHandle, attName, attValue, precision=local_precision, ierr=io_err) call MPAS_io_err_mesg(io_err, .false.) if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR end subroutine MPAS_readStreamAtt_0dReal - subroutine MPAS_readStreamAtt_1dReal(stream, attName, attValue, ierr) + subroutine MPAS_readStreamAtt_1dReal(stream, attName, attValue, precision, ierr) implicit none type (MPAS_Stream_type), intent(inout) :: stream character (len=*), intent(in) :: attName real (kind=RKIND), dimension(:), pointer :: attValue + integer, intent(in), optional :: precision integer, intent(out), optional :: ierr integer :: io_err + integer :: local_precision if (present(ierr)) ierr = MPAS_STREAM_NOERR @@ -3710,7 +3977,13 @@ subroutine MPAS_readStreamAtt_1dReal(stream, attName, attValue, ierr) return end if - call MPAS_io_get_att(stream % fileHandle, attName, attValue, ierr=io_err) + if (present(precision)) then + local_precision = precision + else + local_precision = stream % defaultPrecision + end if + + call MPAS_io_get_att(stream % fileHandle, attName, attValue, precision=local_precision, ierr=io_err) call MPAS_io_err_mesg(io_err, .false.) if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR @@ -3745,13 +4018,14 @@ subroutine MPAS_readStreamAtt_text(stream, attName, attValue, ierr) end subroutine MPAS_readStreamAtt_text - subroutine MPAS_writeStreamAtt_0dInteger(stream, attName, attValue, ierr) + subroutine MPAS_writeStreamAtt_0dInteger(stream, attName, attValue, syncVal, ierr) implicit none type (MPAS_Stream_type), intent(inout) :: stream character (len=*), intent(in) :: attName integer, intent(in) :: attValue + logical, intent(in), optional :: syncVal integer, intent(out), optional :: ierr integer :: io_err @@ -3766,20 +4040,21 @@ subroutine MPAS_writeStreamAtt_0dInteger(stream, attName, attValue, ierr) return end if - call MPAS_io_put_att(stream % fileHandle, attName, attValue, ierr=io_err) + call MPAS_io_put_att(stream % fileHandle, attName, attValue, syncVal=syncVal, ierr=io_err) call MPAS_io_err_mesg(io_err, .false.) if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR end subroutine MPAS_writeStreamAtt_0dInteger - subroutine MPAS_writeStreamAtt_1dInteger(stream, attName, attValue, ierr) + subroutine MPAS_writeStreamAtt_1dInteger(stream, attName, attValue, syncVal, ierr) implicit none type (MPAS_Stream_type), intent(inout) :: stream character (len=*), intent(in) :: attName integer, dimension(:), intent(in) :: attValue + logical, intent(in), optional :: syncVal integer, intent(out), optional :: ierr integer :: io_err @@ -3794,23 +4069,26 @@ subroutine MPAS_writeStreamAtt_1dInteger(stream, attName, attValue, ierr) return end if - call MPAS_io_put_att(stream % fileHandle, attName, attValue, ierr=io_err) + call MPAS_io_put_att(stream % fileHandle, attName, attValue, syncVal=syncVal, ierr=io_err) call MPAS_io_err_mesg(io_err, .false.) if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR end subroutine MPAS_writeStreamAtt_1dInteger - subroutine MPAS_writeStreamAtt_0dReal(stream, attName, attValue, ierr) + subroutine MPAS_writeStreamAtt_0dReal(stream, attName, attValue, syncVal, precision, ierr) implicit none type (MPAS_Stream_type), intent(inout) :: stream character (len=*), intent(in) :: attName real (kind=RKIND), intent(in) :: attValue + logical, intent(in), optional :: syncVal + integer, intent(in), optional :: precision integer, intent(out), optional :: ierr integer :: io_err + integer :: local_precision if (present(ierr)) ierr = MPAS_STREAM_NOERR @@ -3822,23 +4100,32 @@ subroutine MPAS_writeStreamAtt_0dReal(stream, attName, attValue, ierr) return end if - call MPAS_io_put_att(stream % fileHandle, attName, attValue, ierr=io_err) + if (present(precision)) then + local_precision = precision + else + local_precision = stream % defaultPrecision + end if + + call MPAS_io_put_att(stream % fileHandle, attName, attValue, syncVal=syncVal, precision=local_precision, ierr=io_err) call MPAS_io_err_mesg(io_err, .false.) if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR end subroutine MPAS_writeStreamAtt_0dReal - subroutine MPAS_writeStreamAtt_1dReal(stream, attName, attValue, ierr) + subroutine MPAS_writeStreamAtt_1dReal(stream, attName, attValue, syncVal, precision, ierr) implicit none type (MPAS_Stream_type), intent(inout) :: stream character (len=*), intent(in) :: attName real (kind=RKIND), dimension(:), intent(in) :: attValue + logical, intent(in), optional :: syncVal + integer, intent(in), optional :: precision integer, intent(out), optional :: ierr integer :: io_err + integer :: local_precision if (present(ierr)) ierr = MPAS_STREAM_NOERR @@ -3850,20 +4137,27 @@ subroutine MPAS_writeStreamAtt_1dReal(stream, attName, attValue, ierr) return end if - call MPAS_io_put_att(stream % fileHandle, attName, attValue, ierr=io_err) + if (present(precision)) then + local_precision = precision + else + local_precision = stream % defaultPrecision + end if + + call MPAS_io_put_att(stream % fileHandle, attName, attValue, syncVal=syncVal, precision=local_precision, ierr=io_err) call MPAS_io_err_mesg(io_err, .false.) if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR end subroutine MPAS_writeStreamAtt_1dReal - subroutine MPAS_writeStreamAtt_text(stream, attName, attValue, ierr) + subroutine MPAS_writeStreamAtt_text(stream, attName, attValue, syncVal, ierr) implicit none type (MPAS_Stream_type), intent(inout) :: stream character (len=*), intent(in) :: attName character (len=*), intent(in) :: attValue + logical, intent(in), optional :: syncVal integer, intent(out), optional :: ierr integer :: io_err @@ -3878,7 +4172,7 @@ subroutine MPAS_writeStreamAtt_text(stream, attName, attValue, ierr) return end if - call MPAS_io_put_att(stream % fileHandle, attName, attValue, ierr=io_err) + call MPAS_io_put_att(stream % fileHandle, attName, attValue, syncVal=syncVal, ierr=io_err) call MPAS_io_err_mesg(io_err, .false.) if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR diff --git a/src/framework/mpas_io_streams_types.inc b/src/framework/mpas_io_streams_types.inc index f35a7f2159..2c991ffa31 100644 --- a/src/framework/mpas_io_streams_types.inc +++ b/src/framework/mpas_io_streams_types.inc @@ -52,6 +52,9 @@ integer :: ioDirection integer :: defaultPrecision = MPAS_IO_NATIVE_PRECISION logical :: clobberRecords = .false. + ! When blockWrite is true, a stream only writes a single block. + ! This option is really only useful for writing debugging streams out. + logical :: blockWrite = .false. character(len=StrKIND) :: filename type (MPAS_IO_Handle_type) :: fileHandle type (att_list_type), pointer :: attList => null() diff --git a/src/framework/mpas_io_types.inc b/src/framework/mpas_io_types.inc index f1c051db1f..6cb61723e6 100644 --- a/src/framework/mpas_io_types.inc +++ b/src/framework/mpas_io_types.inc @@ -1,3 +1,9 @@ +#ifdef USE_PIO2 + integer, parameter :: MPAS_IO_OFFSET_KIND = PIO_OFFSET_KIND +#else + integer, parameter :: MPAS_IO_OFFSET_KIND = PIO_OFFSET +#endif + ! File access modes integer, parameter :: MPAS_IO_READ = 1, & MPAS_IO_WRITE = 2 @@ -61,13 +67,14 @@ integer :: ioformat integer :: pio_unlimited_dimid integer :: preexisting_records = 0 - integer (kind=PIO_offset) :: frame_number = 1 + integer (kind=MPAS_IO_OFFSET_KIND) :: frame_number = 1 type (dimlist_type), pointer :: dimlist_head => null() type (dimlist_type), pointer :: dimlist_tail => null() type (fieldlist_type), pointer :: fieldlist_head => null() type (fieldlist_type), pointer :: fieldlist_tail => null() type (attlist_type), pointer :: attlist_head => null() type (attlist_type), pointer :: attlist_tail => null() + type (mpas_io_context_type), pointer :: ioContext => null() end type MPAS_IO_Handle_type type decomphandle_type @@ -81,6 +88,7 @@ character (len=StrKIND) :: attName integer :: attType integer :: attValueInt + integer :: precision integer, dimension(:), pointer :: attValueIntA => null() real (kind=RKIND) :: attValueReal real (kind=RKIND), dimension(:), pointer :: attValueRealA => null() @@ -128,3 +136,9 @@ type (fieldlist_type), pointer :: next => null() end type fieldlist_type + type mpas_io_context_type + type (decomplist_type), pointer :: decomp_list => null() + type (iosystem_desc_t), pointer :: pio_iosystem => null() + integer :: master_pio_iotype = -999 + type (dm_info), pointer :: dminfo => null() + end type mpas_io_context_type diff --git a/src/framework/mpas_io_units.F b/src/framework/mpas_io_units.F index 005a05267b..f0463bdae1 100644 --- a/src/framework/mpas_io_units.F +++ b/src/framework/mpas_io_units.F @@ -44,11 +44,18 @@ subroutine mpas_new_unit(newUnit)!{{{ integer :: i + logical :: opened + do i = 1, maxUnits - if(.not. unitsInUse(i) ) then - newUnit = i - unitsInUse(newUnit) = .true. - return + if (.not. unitsInUse(i)) then + inquire(i, opened=opened) + if (opened) then + unitsInUse(i) = .true. + else + newUnit = i + unitsInUse(newUnit) = .true. + return + endif end if end do diff --git a/src/framework/mpas_kind_types.F b/src/framework/mpas_kind_types.F index 8bdadf29e6..4cd3abf1e9 100644 --- a/src/framework/mpas_kind_types.F +++ b/src/framework/mpas_kind_types.F @@ -30,7 +30,7 @@ module mpas_kind_types integer, parameter :: I8KIND = selected_int_kind(18) integer, parameter :: StrKIND = 512 - integer, parameter :: ShortStrKIND = 32 + integer, parameter :: ShortStrKIND = 64 contains diff --git a/src/framework/mpas_particle_list_types.inc b/src/framework/mpas_particle_list_types.inc new file mode 100644 index 0000000000..126d0d981c --- /dev/null +++ b/src/framework/mpas_particle_list_types.inc @@ -0,0 +1,24 @@ + ! main particle type (used by particle_framework) + type mpas_particle_type + ! IO block number + !integer :: ioBlock = 0 + ! pool containing particle data needed for communication + ! between computational cells + type (mpas_pool_type), pointer :: haloDataPool => NULL() + ! pool containing particle data not needing to be passed + ! between processors + type (mpas_pool_type), pointer :: nonhaloDataPool => NULL() + end type mpas_particle_type + + ! just a list of particles (main unit that code operates on) + type mpas_particle_list_type + type (mpas_particle_type), pointer :: particle => NULL() + ! next item in the list + type (mpas_particle_list_type), pointer :: next => NULL() + ! previous item in the list + type (mpas_particle_list_type), pointer :: prev => NULL() + end type mpas_particle_list_type + + type mpas_list_of_particle_list_type + type (mpas_particle_list_type), pointer :: list => NULL() + end type mpas_list_of_particle_list_type diff --git a/src/framework/mpas_pool_routines.F b/src/framework/mpas_pool_routines.F index 78fa04c095..d2892f0250 100644 --- a/src/framework/mpas_pool_routines.F +++ b/src/framework/mpas_pool_routines.F @@ -22,6 +22,8 @@ module mpas_pool_routines use mpas_derived_types use mpas_io_units use mpas_field_routines + use mpas_threading + use mpas_abort, only : mpas_dmpar_global_abort interface mpas_pool_add_field module procedure mpas_pool_add_field_0d_real @@ -124,8 +126,13 @@ subroutine mpas_pool_set_error_level(newErrorLevel) !{{{ implicit none integer, intent(in) :: newErrorLevel + integer :: threadNum - currentErrorLevel = newErrorLevel + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + currentErrorLevel = newErrorLevel + end if end subroutine mpas_pool_set_error_level !}}} @@ -166,16 +173,20 @@ subroutine mpas_pool_create_pool(newPool, poolSize)!{{{ type (mpas_pool_type), pointer :: newPool integer, intent(in), optional :: poolSize + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newPool) + if ( threadNum == 0 ) then + allocate(newPool) - if (present(poolSize)) then - newPool % size = poolSize - else - newPool % size = MPAS_POOL_TABLE_SIZE + if (present(poolSize)) then + newPool % size = poolSize + else + newPool % size = MPAS_POOL_TABLE_SIZE + end if + allocate(newPool % table(newPool % size)) end if - allocate(newPool % table(newPool % size)) end subroutine mpas_pool_create_pool!}}} @@ -199,192 +210,196 @@ recursive subroutine mpas_pool_destroy_pool(inPool)!{{{ integer :: i, j type (mpas_pool_member_type), pointer :: ptr type (mpas_pool_data_type), pointer :: dptr + integer :: local_err, threadNum + threadNum = mpas_threading_get_thread_num() - do i=1,inPool % size - - ptr => inPool % table(i) % head - do while(associated(inPool % table(i) % head)) + if ( threadNum == 0 ) then + do i=1,inPool % size + ptr => inPool % table(i) % head - inPool % table(i) % head => inPool % table(i) % head % next - - if (ptr % contentsType == MPAS_POOL_DIMENSION) then - - if (ptr % data % contentsDims > 0) then - deallocate(ptr % data % simple_int_arr) - else - deallocate(ptr % data % simple_int) - end if - - else if (ptr % contentsType == MPAS_POOL_CONFIG) then - - dptr => ptr % data - - if (dptr % contentsType == MPAS_POOL_REAL) then - deallocate(dptr % simple_real) - else if (dptr % contentsType == MPAS_POOL_INTEGER) then - deallocate(dptr % simple_int) - else if (dptr % contentsType == MPAS_POOL_CHARACTER) then - deallocate(dptr % simple_char) - else if (dptr % contentsType == MPAS_POOL_LOGICAL) then - deallocate(dptr % simple_logical) - end if - - else if (ptr % contentsType == MPAS_POOL_FIELD) then - - dptr => ptr % data - - ! Do this through brute force... - if (associated(dptr % r0)) then - deallocate(dptr % r0) - else if (associated(dptr % r1)) then - if (associated(dptr % r1 % array)) then - deallocate(dptr % r1 % array) - end if - - deallocate(dptr % r1) - else if (associated(dptr % r2)) then - if (associated(dptr % r2 % array)) then - deallocate(dptr % r2 % array) - end if - - deallocate(dptr % r2) - else if (associated(dptr % r3)) then - if (associated(dptr % r3 % array)) then - deallocate(dptr % r3 % array) - end if - - deallocate(dptr % r3) - else if (associated(dptr % r4)) then - if (associated(dptr % r4 % array)) then - deallocate(dptr % r4 % array) - end if - - deallocate(dptr % r4) - else if (associated(dptr % r5)) then - if (associated(dptr % r5 % array)) then - deallocate(dptr % r5 % array) - end if - - deallocate(dptr % r5) - else if (associated(dptr % i0)) then - deallocate(dptr % i0) - else if (associated(dptr % i1)) then - if (associated(dptr % i1 % array)) then - deallocate(dptr % i1 % array) - end if - - deallocate(dptr % i1) - else if (associated(dptr % i2)) then - if (associated(dptr % i2 % array)) then - deallocate(dptr % i2 % array) - end if - - deallocate(dptr % i2) - else if (associated(dptr % i3)) then - if (associated(dptr % i3 % array)) then - deallocate(dptr % i3 % array) + do while(associated(inPool % table(i) % head)) + ptr => inPool % table(i) % head + inPool % table(i) % head => inPool % table(i) % head % next + + if (ptr % contentsType == MPAS_POOL_DIMENSION) then + + if (ptr % data % contentsDims > 0) then + deallocate(ptr % data % simple_int_arr, stat=local_err) + else + deallocate(ptr % data % simple_int, stat=local_err) end if - - deallocate(dptr % i3) - else if (associated(dptr % c0)) then - deallocate(dptr % c0) - else if (associated(dptr % c1)) then - if (associated(dptr % c1 % array)) then - deallocate(dptr % c1 % array) + + else if (ptr % contentsType == MPAS_POOL_CONFIG) then + + dptr => ptr % data + + if (dptr % contentsType == MPAS_POOL_REAL) then + deallocate(dptr % simple_real, stat=local_err) + else if (dptr % contentsType == MPAS_POOL_INTEGER) then + deallocate(dptr % simple_int, stat=local_err) + else if (dptr % contentsType == MPAS_POOL_CHARACTER) then + deallocate(dptr % simple_char, stat=local_err) + else if (dptr % contentsType == MPAS_POOL_LOGICAL) then + deallocate(dptr % simple_logical, stat=local_err) end if - - deallocate(dptr % c1) - else if (associated(dptr % l0)) then - deallocate(dptr % l0) - else if (associated(dptr % r0a)) then - deallocate(dptr % r0a) - else if (associated(dptr % r1a)) then - do j=1,dptr % contentsTimeLevs - if (associated(dptr % r1a(j) % array)) then - deallocate(dptr % r1a(j) % array) + + else if (ptr % contentsType == MPAS_POOL_FIELD) then + + dptr => ptr % data + + ! Do this through brute force... + if (associated(dptr % r0)) then + deallocate(dptr % r0, stat=local_err) + else if (associated(dptr % r1)) then + if (associated(dptr % r1 % array)) then + deallocate(dptr % r1 % array, stat=local_err) end if - end do - deallocate(dptr % r1a) - else if (associated(dptr % r2a)) then - do j=1,dptr % contentsTimeLevs - if (associated(dptr % r2a(j) % array)) then - deallocate(dptr % r2a(j) % array) + + deallocate(dptr % r1, stat=local_err) + else if (associated(dptr % r2)) then + if (associated(dptr % r2 % array)) then + deallocate(dptr % r2 % array, stat=local_err) end if - end do - deallocate(dptr % r2a) - else if (associated(dptr % r3a)) then - do j=1,dptr % contentsTimeLevs - if (associated(dptr % r3a(j) % array)) then - deallocate(dptr % r3a(j) % array) + + deallocate(dptr % r2, stat=local_err) + else if (associated(dptr % r3)) then + if (associated(dptr % r3 % array)) then + deallocate(dptr % r3 % array, stat=local_err) end if - end do - deallocate(dptr % r3a) - else if (associated(dptr % r4a)) then - do j=1,dptr % contentsTimeLevs - if (associated(dptr % r4a(j) % array)) then - deallocate(dptr % r4a(j) % array) + + deallocate(dptr % r3, stat=local_err) + else if (associated(dptr % r4)) then + if (associated(dptr % r4 % array)) then + deallocate(dptr % r4 % array, stat=local_err) end if - end do - deallocate(dptr % r4a) - else if (associated(dptr % r5a)) then - do j=1,dptr % contentsTimeLevs - if (associated(dptr % r5a(j) % array)) then - deallocate(dptr % r5a(j) % array) + + deallocate(dptr % r4, stat=local_err) + else if (associated(dptr % r5)) then + if (associated(dptr % r5 % array)) then + deallocate(dptr % r5 % array, stat=local_err) end if - end do - deallocate(dptr % r5a) - else if (associated(dptr % i0a)) then - deallocate(dptr % i0a) - else if (associated(dptr % i1a)) then - do j=1,dptr % contentsTimeLevs - if (associated(dptr % i1a(j) % array)) then - deallocate(dptr % i1a(j) % array) + + deallocate(dptr % r5, stat=local_err) + else if (associated(dptr % i0)) then + deallocate(dptr % i0, stat=local_err) + else if (associated(dptr % i1)) then + if (associated(dptr % i1 % array)) then + deallocate(dptr % i1 % array, stat=local_err) end if - end do - deallocate(dptr % i1a) - else if (associated(dptr % i2a)) then - do j=1,dptr % contentsTimeLevs - if (associated(dptr % i2a(j) % array)) then - deallocate(dptr % i2a(j) % array) + + deallocate(dptr % i1, stat=local_err) + else if (associated(dptr % i2)) then + if (associated(dptr % i2 % array)) then + deallocate(dptr % i2 % array, stat=local_err) end if - end do - deallocate(dptr % i2a) - else if (associated(dptr % i3a)) then - do j=1,dptr % contentsTimeLevs - if (associated(dptr % i3a(j) % array)) then - deallocate(dptr % i3a(j) % array) + + deallocate(dptr % i2, stat=local_err) + else if (associated(dptr % i3)) then + if (associated(dptr % i3 % array)) then + deallocate(dptr % i3 % array, stat=local_err) end if - end do - deallocate(dptr % i3a) - else if (associated(dptr % c0a)) then - deallocate(dptr % c0a) - else if (associated(dptr % c1a)) then - do j=1,dptr % contentsTimeLevs - if (associated(dptr % c1a(j) % array)) then - deallocate(dptr % c1a(j) % array) + + deallocate(dptr % i3, stat=local_err) + else if (associated(dptr % c0)) then + deallocate(dptr % c0, stat=local_err) + else if (associated(dptr % c1)) then + if (associated(dptr % c1 % array)) then + deallocate(dptr % c1 % array, stat=local_err) end if - end do - deallocate(dptr % c1a) - else if (associated(dptr % l0a)) then - deallocate(dptr % l0a) - else - call pool_mesg('While destroying pool, member '//trim(ptr % key)//' has no valid field pointers.') + + deallocate(dptr % c1, stat=local_err) + else if (associated(dptr % l0)) then + deallocate(dptr % l0, stat=local_err) + else if (associated(dptr % r0a)) then + deallocate(dptr % r0a, stat=local_err) + else if (associated(dptr % r1a)) then + do j=1,dptr % contentsTimeLevs + if (associated(dptr % r1a(j) % array)) then + deallocate(dptr % r1a(j) % array, stat=local_err) + end if + end do + deallocate(dptr % r1a, stat=local_err) + else if (associated(dptr % r2a)) then + do j=1,dptr % contentsTimeLevs + if (associated(dptr % r2a(j) % array)) then + deallocate(dptr % r2a(j) % array, stat=local_err) + end if + end do + deallocate(dptr % r2a, stat=local_err) + else if (associated(dptr % r3a)) then + do j=1,dptr % contentsTimeLevs + if (associated(dptr % r3a(j) % array)) then + deallocate(dptr % r3a(j) % array, stat=local_err) + end if + end do + deallocate(dptr % r3a, stat=local_err) + else if (associated(dptr % r4a)) then + do j=1,dptr % contentsTimeLevs + if (associated(dptr % r4a(j) % array)) then + deallocate(dptr % r4a(j) % array, stat=local_err) + end if + end do + deallocate(dptr % r4a, stat=local_err) + else if (associated(dptr % r5a)) then + do j=1,dptr % contentsTimeLevs + if (associated(dptr % r5a(j) % array)) then + deallocate(dptr % r5a(j) % array, stat=local_err) + end if + end do + deallocate(dptr % r5a, stat=local_err) + else if (associated(dptr % i0a)) then + deallocate(dptr % i0a, stat=local_err) + else if (associated(dptr % i1a)) then + do j=1,dptr % contentsTimeLevs + if (associated(dptr % i1a(j) % array)) then + deallocate(dptr % i1a(j) % array, stat=local_err) + end if + end do + deallocate(dptr % i1a, stat=local_err) + else if (associated(dptr % i2a)) then + do j=1,dptr % contentsTimeLevs + if (associated(dptr % i2a(j) % array)) then + deallocate(dptr % i2a(j) % array, stat=local_err) + end if + end do + deallocate(dptr % i2a, stat=local_err) + else if (associated(dptr % i3a)) then + do j=1,dptr % contentsTimeLevs + if (associated(dptr % i3a(j) % array)) then + deallocate(dptr % i3a(j) % array, stat=local_err) + end if + end do + deallocate(dptr % i3a, stat=local_err) + else if (associated(dptr % c0a)) then + deallocate(dptr % c0a, stat=local_err) + else if (associated(dptr % c1a)) then + do j=1,dptr % contentsTimeLevs + if (associated(dptr % c1a(j) % array)) then + deallocate(dptr % c1a(j) % array, stat=local_err) + end if + end do + deallocate(dptr % c1a, stat=local_err) + else if (associated(dptr % l0a)) then + deallocate(dptr % l0a, stat=local_err) + else + call pool_mesg('While destroying pool, member '//trim(ptr % key)//' has no valid field pointers.') + end if + + else if (ptr % contentsType == MPAS_POOL_SUBPOOL) then + + call mpas_pool_destroy_pool(ptr % data % p) + end if - - else if (ptr % contentsType == MPAS_POOL_SUBPOOL) then - - call mpas_pool_destroy_pool(ptr % data % p) - - end if - deallocate(ptr % data) - deallocate(ptr) + deallocate(ptr % data, stat=local_err) + deallocate(ptr, stat=local_err) + end do + end do - - end do - - deallocate(inPool % table) - deallocate(inPool) + + deallocate(inPool % table, stat=local_err) + deallocate(inPool, stat=local_err) + end if end subroutine mpas_pool_destroy_pool!}}} @@ -407,42 +422,46 @@ recursive subroutine mpas_pool_empty_pool(inPool)!{{{ integer :: i type (mpas_pool_member_type), pointer :: ptr + integer :: local_err, threadNum + threadNum = mpas_threading_get_thread_num() - do i=1,inPool % size + if ( threadNum == 0 ) then + do i=1,inPool % size - ptr => inPool % table(i) % head - do while(associated(inPool % table(i) % head)) ptr => inPool % table(i) % head - inPool % table(i) % head => inPool % table(i) % head % next - if (ptr % contentsType == MPAS_POOL_DIMENSION) then - if (ptr % data % contentsDims > 0) then - deallocate(ptr % data % simple_int_arr) - else - deallocate(ptr % data % simple_int) - end if - else if (ptr % contentsType == MPAS_POOL_CONFIG) then - if (ptr % data % contentsType == MPAS_POOL_REAL) then - deallocate(ptr % data % simple_real) - else if (ptr % data % contentsType == MPAS_POOL_INTEGER) then - deallocate(ptr % data % simple_int) - else if (ptr % data % contentsType == MPAS_POOL_CHARACTER) then - deallocate(ptr % data % simple_char) - else if (ptr % data % contentsType == MPAS_POOL_LOGICAL) then - deallocate(ptr % data % simple_logical) + do while(associated(inPool % table(i) % head)) + ptr => inPool % table(i) % head + inPool % table(i) % head => inPool % table(i) % head % next + if (ptr % contentsType == MPAS_POOL_DIMENSION) then + if (ptr % data % contentsDims > 0) then + deallocate(ptr % data % simple_int_arr, stat=local_err) + else + deallocate(ptr % data % simple_int, stat=local_err) + end if + else if (ptr % contentsType == MPAS_POOL_CONFIG) then + if (ptr % data % contentsType == MPAS_POOL_REAL) then + deallocate(ptr % data % simple_real, stat=local_err) + else if (ptr % data % contentsType == MPAS_POOL_INTEGER) then + deallocate(ptr % data % simple_int, stat=local_err) + else if (ptr % data % contentsType == MPAS_POOL_CHARACTER) then + deallocate(ptr % data % simple_char, stat=local_err) + else if (ptr % data % contentsType == MPAS_POOL_LOGICAL) then + deallocate(ptr % data % simple_logical, stat=local_err) + end if + else if (ptr % contentsType == MPAS_POOL_PACKAGE) then + deallocate(ptr % data % simple_logical, stat=local_err) + else if (ptr % contentsType == MPAS_POOL_SUBPOOL) then + call mpas_pool_empty_pool(ptr % data % p) + deallocate(ptr % data % p, stat=local_err) end if - else if (ptr % contentsType == MPAS_POOL_PACKAGE) then - deallocate(ptr % data % simple_logical) - else if (ptr % contentsType == MPAS_POOL_SUBPOOL) then - call mpas_pool_empty_pool(ptr % data % p) - deallocate(ptr % data % p) - end if - deallocate(ptr) - end do + deallocate(ptr, stat=local_err) + end do - end do + end do - nullify(inPool % iterator) + nullify(inPool % iterator) + end if end subroutine mpas_pool_empty_pool!}}} @@ -467,11 +486,12 @@ recursive subroutine mpas_pool_clone_pool(srcPool, destPool, overrideTimeLevels) integer, intent(in), optional :: overrideTimeLevels - integer :: i, j, newTimeLevels, minTimeLevels + integer :: i, j, newTimeLevels, minTimeLevels, threadNum type (mpas_pool_member_type), pointer :: ptr type (mpas_pool_data_type), pointer :: dptr type (mpas_pool_member_type), pointer :: newmem + threadNum = mpas_threading_get_thread_num() newTimeLevels = -1 if (present(overrideTimeLevels)) then @@ -487,508 +507,50 @@ recursive subroutine mpas_pool_clone_pool(srcPool, destPool, overrideTimeLevels) !TODO: should we force destPool to have the same table size as srcPool? - ptr => srcPool % iteration_head - do while(associated(ptr)) - - allocate(newmem) - newmem % key = ptr % key - newmem % keyLen = ptr % keyLen - newmem % contentsType = ptr % contentsType - allocate(newmem % data) - newmem % data % contentsType = ptr % data % contentsType - newmem % data % contentsDims = ptr % data % contentsDims - if (newTimeLevels /= -1) then - newmem % data % contentsTimeLevs = newTimeLevels - else - newmem % data % contentsTimeLevs = ptr % data % contentsTimeLevs - end if - - if (ptr % contentsType == MPAS_POOL_DIMENSION) then - - if (ptr % data % contentsDims > 0) then - allocate(newmem % data % simple_int_arr(size(ptr % data % simple_int_arr))) - newmem % data % simple_int_arr(:) = ptr % data % simple_int_arr(:) - else - allocate(newmem % data % simple_int) - newmem % data % simple_int = ptr % data % simple_int - end if - - else if (ptr % contentsType == MPAS_POOL_CONFIG) then - - dptr => ptr % data - - if (dptr % contentsType == MPAS_POOL_REAL) then - allocate(newmem % data % simple_real) - newmem % data % simple_real = dptr % simple_real - else if (dptr % contentsType == MPAS_POOL_INTEGER) then - allocate(newmem % data % simple_int) - newmem % data % simple_int = dptr % simple_int - else if (dptr % contentsType == MPAS_POOL_CHARACTER) then - allocate(newmem % data % simple_char) - newmem % data % simple_char = dptr % simple_char - else if (dptr % contentsType == MPAS_POOL_LOGICAL) then - allocate(newmem % data % simple_logical) - newmem % data % simple_logical = dptr % simple_logical - end if - - else if (ptr % contentsType == MPAS_POOL_FIELD) then - - dptr => ptr % data + !TODO: Allow threading on copy calls below. + if ( threadNum == 0 ) then + ptr => srcPool % iteration_head + do while(associated(ptr)) - ! Do this through brute force... - if (associated(dptr % r0)) then - if (newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % r0a(newmem % data % contentsTimeLevs)) - do j = 1, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % r0, newmem % data % r0) - newmem % data % r0a(j) = newmem % data % r0 - deallocate(newmem % data % r0) - end do - else - call mpas_duplicate_field(dptr % r0, newmem % data % r0) - end if - else if (associated(dptr % r1)) then - if (newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % r1a(newmem % data % contentsTimeLevs)) - do j = 1, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % r1, newmem % data % r1) - newmem % data % r1a(j) = newmem % data % r1 - deallocate(newmem % data % r1) - end do - else - call mpas_duplicate_field(dptr % r1, newmem % data % r1) - end if - else if (associated(dptr % r2)) then - if (newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % r2a(newmem % data % contentsTimeLevs)) - do j = 1, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % r2, newmem % data % r2) - newmem % data % r2a(j) = newmem % data % r2 - deallocate(newmem % data % r2) - end do - else - call mpas_duplicate_field(dptr % r2, newmem % data % r2) - end if - else if (associated(dptr % r3)) then - if (newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % r3a(newmem % data % contentsTimeLevs)) - do j = 1, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % r3, newmem % data % r3) - newmem % data % r3a(j) = newmem % data % r3 - deallocate(newmem % data % r3) - end do - else - call mpas_duplicate_field(dptr % r3, newmem % data % r3) - end if - else if (associated(dptr % r4)) then - if (newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % r4a(newmem % data % contentsTimeLevs)) - do j = 1, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % r4, newmem % data % r4) - newmem % data % r4a(j) = newmem % data % r4 - deallocate(newmem % data % r4) - end do - else - call mpas_duplicate_field(dptr % r4, newmem % data % r4) - end if - else if (associated(dptr % r5)) then - if (newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % r5a(newmem % data % contentsTimeLevs)) - do j = 1, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % r5, newmem % data % r5) - newmem % data % r5a(j) = newmem % data % r5 - deallocate(newmem % data % r5) - end do - else - call mpas_duplicate_field(dptr % r5, newmem % data % r5) - end if - else if (associated(dptr % i0)) then - if (newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % i0a(newmem % data % contentsTimeLevs)) - do j = 1, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % i0, newmem % data % i0) - newmem % data % i0a(j) = newmem % data % i0 - deallocate(newmem % data % i0) - end do - else - call mpas_duplicate_field(dptr % i0, newmem % data % i0) - end if - else if (associated(dptr % i1)) then - if (newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % i1a(newmem % data % contentsTimeLevs)) - do j = 1, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % i1, newmem % data % i1) - newmem % data % i1a(j) = newmem % data % i1 - deallocate(newmem % data % i1) - end do - else - call mpas_duplicate_field(dptr % i1, newmem % data % i1) - end if - else if (associated(dptr % i2)) then - if (newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % i2a(newmem % data % contentsTimeLevs)) - do j = 1, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % i2, newmem % data % i2) - newmem % data % i2a(j) = newmem % data % i2 - deallocate(newmem % data % i2) - end do - else - call mpas_duplicate_field(dptr % i2, newmem % data % i2) - end if - else if (associated(dptr % i3)) then - if (newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % i3a(newmem % data % contentsTimeLevs)) - do j = 1, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % i3, newmem % data % i3) - newmem % data % i3a(j) = newmem % data % i3 - deallocate(newmem % data % i3) - end do - else - call mpas_duplicate_field(dptr % i3, newmem % data % i3) - end if - else if (associated(dptr % c0)) then - if (newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % c0a(newmem % data % contentsTimeLevs)) - do j = 1, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % c0, newmem % data % c0) - newmem % data % c0a(j) = newmem % data % c0 - deallocate(newmem % data % c0) - end do - else - call mpas_duplicate_field(dptr % c0, newmem % data % c0) - end if - else if (associated(dptr % c1)) then - if (newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % c1a(newmem % data % contentsTimeLevs)) - do j = 1, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % c1, newmem % data % c1) - newmem % data % c1a(j) = newmem % data % c1 - deallocate(newmem % data % c1) - end do - else - call mpas_duplicate_field(dptr % c1, newmem % data % c1) - end if - else if (associated(dptr % l0)) then - if (newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % l0a(newmem % data % contentsTimeLevs)) - do j = 1, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % l0, newmem % data % l0) - newmem % data % l0a(j) = newmem % data % l0 - deallocate(newmem % data % l0) - end do - else - call mpas_duplicate_field(dptr % l0, newmem % data % l0) - end if - else if (associated(dptr % r0a)) then - if ( newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % r0a(newmem % data % contentsTimeLevs)) - minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) - do j = 1, minTimeLevels - call mpas_duplicate_field(dptr % r0a(j), newmem % data % r0) - newmem % data % r0a(j) = newmem % data % r0 - deallocate(newmem % data % r0) - end do - - do j = minTimeLevels, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % r0a(dptr % contentsTimeLevs), newmem % data % r0) - newmem % data % r0a(j) = newmem % data % r0 - deallocate(newmem % data % r0) - end do - else - call mpas_duplicate_field(dptr % r0a(1), newmem % data % r0) - end if - else if (associated(dptr % r1a)) then - if ( newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % r1a(newmem % data % contentsTimeLevs)) - minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) - do j = 1, minTimeLevels - call mpas_duplicate_field(dptr % r1a(j), newmem % data % r1) - newmem % data % r1a(j) = newmem % data % r1 - deallocate(newmem % data % r1) - end do - - do j = minTimeLevels, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % r1a(dptr % contentsTimeLevs), newmem % data % r1) - newmem % data % r1a(j) = newmem % data % r1 - deallocate(newmem % data % r1) - end do - else - call mpas_duplicate_field(dptr % r1a(1), newmem % data % r1) - end if - else if (associated(dptr % r2a)) then - if ( newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % r2a(newmem % data % contentsTimeLevs)) - minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) - do j = 1, minTimeLevels - call mpas_duplicate_field(dptr % r2a(j), newmem % data % r2) - newmem % data % r2a(j) = newmem % data % r2 - deallocate(newmem % data % r2) - end do - - do j = minTimeLevels, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % r2a(dptr % contentsTimeLevs), newmem % data % r2) - newmem % data % r2a(j) = newmem % data % r2 - deallocate(newmem % data % r2) - end do - else - call mpas_duplicate_field(dptr % r2a(1), newmem % data % r2) - end if - else if (associated(dptr % r3a)) then - if ( newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % r3a(newmem % data % contentsTimeLevs)) - minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) - do j = 1, minTimeLevels - call mpas_duplicate_field(dptr % r3a(j), newmem % data % r3) - newmem % data % r3a(j) = newmem % data % r3 - deallocate(newmem % data % r3) - end do - - do j = minTimeLevels, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % r3a(dptr % contentsTimeLevs), newmem % data % r3) - newmem % data % r3a(j) = newmem % data % r3 - deallocate(newmem % data % r3) - end do - else - call mpas_duplicate_field(dptr % r3a(1), newmem % data % r3) - end if - else if (associated(dptr % r4a)) then - if ( newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % r4a(newmem % data % contentsTimeLevs)) - minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) - do j = 1, minTimeLevels - call mpas_duplicate_field(dptr % r4a(j), newmem % data % r4) - newmem % data % r4a(j) = newmem % data % r4 - deallocate(newmem % data % r4) - end do - - do j = minTimeLevels, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % r4a(dptr % contentsTimeLevs), newmem % data % r4) - newmem % data % r4a(j) = newmem % data % r4 - deallocate(newmem % data % r4) - end do - else - call mpas_duplicate_field(dptr % r4a(1), newmem % data % r4) - end if - else if (associated(dptr % r5a)) then - if ( newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % r5a(newmem % data % contentsTimeLevs)) - minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) - do j = 1, minTimeLevels - call mpas_duplicate_field(dptr % r5a(j), newmem % data % r5) - newmem % data % r5a(j) = newmem % data % r5 - deallocate(newmem % data % r5) - end do - - do j = minTimeLevels, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % r5a(dptr % contentsTimeLevs), newmem % data % r5) - newmem % data % r5a(j) = newmem % data % r5 - deallocate(newmem % data % r5) - end do - else - call mpas_duplicate_field(dptr % r5a(1), newmem % data % r5) - end if - else if (associated(dptr % i0a)) then - if ( newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % i0a(newmem % data % contentsTimeLevs)) - minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) - do j = 1, minTimeLevels - call mpas_duplicate_field(dptr % i0a(j), newmem % data % i0) - newmem % data % i0a(j) = newmem % data % i0 - deallocate(newmem % data % i0) - end do - - do j = minTimeLevels, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % i0a(dptr % contentsTimeLevs), newmem % data % i0) - newmem % data % i0a(j) = newmem % data % i0 - deallocate(newmem % data % i0) - end do - else - call mpas_duplicate_field(dptr % i0a(1), newmem % data % i0) - end if - else if (associated(dptr % i1a)) then - if ( newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % i1a(newmem % data % contentsTimeLevs)) - minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) - do j = 1, minTimeLevels - call mpas_duplicate_field(dptr % i1a(j), newmem % data % i1) - newmem % data % i1a(j) = newmem % data % i1 - deallocate(newmem % data % i1) - end do - - do j = minTimeLevels, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % i1a(dptr % contentsTimeLevs), newmem % data % i1) - newmem % data % i1a(j) = newmem % data % i1 - deallocate(newmem % data % i1) - end do - else - call mpas_duplicate_field(dptr % i1a(1), newmem % data % i1) - end if - else if (associated(dptr % i2a)) then - if ( newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % i2a(newmem % data % contentsTimeLevs)) - minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) - do j = 1, minTimeLevels - call mpas_duplicate_field(dptr % i2a(j), newmem % data % i2) - newmem % data % i2a(j) = newmem % data % i2 - deallocate(newmem % data % i2) - end do - - do j = minTimeLevels, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % i2a(dptr % contentsTimeLevs), newmem % data % i2) - newmem % data % i2a(j) = newmem % data % i2 - deallocate(newmem % data % i2) - end do - else - call mpas_duplicate_field(dptr % i2a(1), newmem % data % i2) - end if - else if (associated(dptr % i3a)) then - if ( newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % i3a(newmem % data % contentsTimeLevs)) - minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) - do j = 1, minTimeLevels - call mpas_duplicate_field(dptr % i3a(j), newmem % data % i3) - newmem % data % i3a(j) = newmem % data % i3 - deallocate(newmem % data % i3) - end do - - do j = minTimeLevels, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % i3a(dptr % contentsTimeLevs), newmem % data % i3) - newmem % data % i3a(j) = newmem % data % i3 - deallocate(newmem % data % i3) - end do - else - call mpas_duplicate_field(dptr % i3a(1), newmem % data % i3) - end if - else if (associated(dptr % c0a)) then - if ( newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % c0a(newmem % data % contentsTimeLevs)) - minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) - do j = 1, minTimeLevels - call mpas_duplicate_field(dptr % c0a(j), newmem % data % c0) - newmem % data % c0a(j) = newmem % data % c0 - deallocate(newmem % data % c0) - end do - - do j = minTimeLevels, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % c0a(dptr % contentsTimeLevs), newmem % data % c0) - newmem % data % c0a(j) = newmem % data % c0 - deallocate(newmem % data % c0) - end do - else - call mpas_duplicate_field(dptr % c0a(1), newmem % data % c0) - end if - else if (associated(dptr % c1a)) then - if ( newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % c1a(newmem % data % contentsTimeLevs)) - minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) - do j = 1, minTimeLevels - call mpas_duplicate_field(dptr % c1a(j), newmem % data % c1) - newmem % data % c1a(j) = newmem % data % c1 - deallocate(newmem % data % c1) - end do - - do j = minTimeLevels, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % c1a(dptr % contentsTimeLevs), newmem % data % c1) - newmem % data % c1a(j) = newmem % data % c1 - deallocate(newmem % data % c1) - end do - else - call mpas_duplicate_field(dptr % c1a(1), newmem % data % c1) - end if - else if (associated(dptr % l0a)) then - if ( newmem % data % contentsTimeLevs > 1) then - allocate(newmem % data % l0a(newmem % data % contentsTimeLevs)) - minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) - do j = 1, minTimeLevels - call mpas_duplicate_field(dptr % l0a(j), newmem % data % l0) - newmem % data % l0a(j) = newmem % data % l0 - deallocate(newmem % data % l0) - end do - - do j = minTimeLevels, newmem % data % contentsTimeLevs - call mpas_duplicate_field(dptr % l0a(dptr % contentsTimeLevs), newmem % data % l0) - newmem % data % l0a(j) = newmem % data % l0 - deallocate(newmem % data % l0) - end do - else - call mpas_duplicate_field(dptr % l0a(1), newmem % data % l0) - end if + allocate(newmem) + newmem % key = ptr % key + newmem % keyLen = ptr % keyLen + newmem % contentsType = ptr % contentsType + allocate(newmem % data) + newmem % data % contentsType = ptr % data % contentsType + newmem % data % contentsDims = ptr % data % contentsDims + if (newTimeLevels /= -1) then + newmem % data % contentsTimeLevs = newTimeLevels else - call pool_mesg('While cloning pool, member '//trim(ptr % key)//' has no valid field pointers.') + newmem % data % contentsTimeLevs = ptr % data % contentsTimeLevs end if - else if (ptr % contentsType == MPAS_POOL_SUBPOOL) then - - call mpas_pool_create_pool(newmem % data % p, poolSize = ptr % data % p % size) - call mpas_pool_clone_pool(ptr % data % p, newmem % data % p) - - end if - - if (.not. pool_add_member(destPool, newmem % key, newmem)) then - call pool_mesg('Error: Had problems adding '//trim(newmem % key)//' to clone of pool.') - end if - - ptr => ptr % iteration_next - end do - - end subroutine mpas_pool_clone_pool!}}} - - -!----------------------------------------------------------------------- -! routine mpas_pool_copy_pool -! -!> \brief MPAS Pool copy routine -!> \author Michael Duda, Doug Jacobsen -!> \date 03/27/2014 -!> \details -!> This routine assumes srcPool and destPool have identical members. It will -!> copy the data from the members of srcPool into the members of destPool. -! -!----------------------------------------------------------------------- - recursive subroutine mpas_pool_copy_pool(srcPool, destPool)!{{{ - - implicit none - - type (mpas_pool_type), pointer :: srcPool - type (mpas_pool_type), pointer :: destPool - - - integer :: i, j - type (mpas_pool_member_type), pointer :: ptr - type (mpas_pool_data_type), pointer :: dptr - type (mpas_pool_data_type), pointer :: mem - - do i=1,srcPool % size - - ptr => srcPool % table(i) % head - do while(associated(ptr)) - if (ptr % contentsType == MPAS_POOL_DIMENSION) then - mem => pool_get_member(destPool, ptr % key, MPAS_POOL_DIMENSION) - if (.not. associated(mem)) then - call mpas_pool_set_error_level(MPAS_POOL_FATAL) - call pool_mesg('ERROR: Destination pool does not contain member '//trim(ptr % key)//'.') - end if if (ptr % data % contentsDims > 0) then - mem % simple_int_arr(:) = ptr % data % simple_int_arr(:) + allocate(newmem % data % simple_int_arr(size(ptr % data % simple_int_arr))) + newmem % data % simple_int_arr(:) = ptr % data % simple_int_arr(:) else - mem % simple_int = ptr % data % simple_int + allocate(newmem % data % simple_int) + newmem % data % simple_int = ptr % data % simple_int end if else if (ptr % contentsType == MPAS_POOL_CONFIG) then dptr => ptr % data - mem => pool_get_member(destPool, ptr % key, MPAS_POOL_CONFIG) if (dptr % contentsType == MPAS_POOL_REAL) then - mem % simple_real = dptr % simple_real + allocate(newmem % data % simple_real) + newmem % data % simple_real = dptr % simple_real else if (dptr % contentsType == MPAS_POOL_INTEGER) then - mem % simple_int = dptr % simple_int + allocate(newmem % data % simple_int) + newmem % data % simple_int = dptr % simple_int else if (dptr % contentsType == MPAS_POOL_CHARACTER) then - mem % simple_char = dptr % simple_char + allocate(newmem % data % simple_char) + newmem % data % simple_char = dptr % simple_char else if (dptr % contentsType == MPAS_POOL_LOGICAL) then - mem % simple_logical = dptr % simple_logical + allocate(newmem % data % simple_logical) + newmem % data % simple_logical = dptr % simple_logical end if else if (ptr % contentsType == MPAS_POOL_FIELD) then @@ -996,239 +558,709 @@ recursive subroutine mpas_pool_copy_pool(srcPool, destPool)!{{{ dptr => ptr % data ! Do this through brute force... - mem => pool_get_member(destPool, ptr % key, MPAS_POOL_FIELD) if (associated(dptr % r0)) then - call mpas_duplicate_field(dptr % r0, mem % r0, copy_array_only=.true.) + if (newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % r0a(newmem % data % contentsTimeLevs)) + do j = 1, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % r0, newmem % data % r0) + newmem % data % r0a(j) = newmem % data % r0 + deallocate(newmem % data % r0) + end do + else + call mpas_duplicate_field(dptr % r0, newmem % data % r0) + end if else if (associated(dptr % r1)) then - call mpas_duplicate_field(dptr % r1, mem % r1, copy_array_only=.true.) + if (newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % r1a(newmem % data % contentsTimeLevs)) + do j = 1, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % r1, newmem % data % r1) + newmem % data % r1a(j) = newmem % data % r1 + deallocate(newmem % data % r1) + end do + else + call mpas_duplicate_field(dptr % r1, newmem % data % r1) + end if else if (associated(dptr % r2)) then - call mpas_duplicate_field(dptr % r2, mem % r2, copy_array_only=.true.) + if (newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % r2a(newmem % data % contentsTimeLevs)) + do j = 1, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % r2, newmem % data % r2) + newmem % data % r2a(j) = newmem % data % r2 + deallocate(newmem % data % r2) + end do + else + call mpas_duplicate_field(dptr % r2, newmem % data % r2) + end if else if (associated(dptr % r3)) then - call mpas_duplicate_field(dptr % r3, mem % r3, copy_array_only=.true.) + if (newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % r3a(newmem % data % contentsTimeLevs)) + do j = 1, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % r3, newmem % data % r3) + newmem % data % r3a(j) = newmem % data % r3 + deallocate(newmem % data % r3) + end do + else + call mpas_duplicate_field(dptr % r3, newmem % data % r3) + end if else if (associated(dptr % r4)) then - call mpas_duplicate_field(dptr % r4, mem % r4, copy_array_only=.true.) + if (newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % r4a(newmem % data % contentsTimeLevs)) + do j = 1, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % r4, newmem % data % r4) + newmem % data % r4a(j) = newmem % data % r4 + deallocate(newmem % data % r4) + end do + else + call mpas_duplicate_field(dptr % r4, newmem % data % r4) + end if else if (associated(dptr % r5)) then - call mpas_duplicate_field(dptr % r5, mem % r5, copy_array_only=.true.) + if (newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % r5a(newmem % data % contentsTimeLevs)) + do j = 1, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % r5, newmem % data % r5) + newmem % data % r5a(j) = newmem % data % r5 + deallocate(newmem % data % r5) + end do + else + call mpas_duplicate_field(dptr % r5, newmem % data % r5) + end if else if (associated(dptr % i0)) then - call mpas_duplicate_field(dptr % i0, mem % i0, copy_array_only=.true.) + if (newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % i0a(newmem % data % contentsTimeLevs)) + do j = 1, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % i0, newmem % data % i0) + newmem % data % i0a(j) = newmem % data % i0 + deallocate(newmem % data % i0) + end do + else + call mpas_duplicate_field(dptr % i0, newmem % data % i0) + end if else if (associated(dptr % i1)) then - call mpas_duplicate_field(dptr % i1, mem % i1, copy_array_only=.true.) + if (newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % i1a(newmem % data % contentsTimeLevs)) + do j = 1, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % i1, newmem % data % i1) + newmem % data % i1a(j) = newmem % data % i1 + deallocate(newmem % data % i1) + end do + else + call mpas_duplicate_field(dptr % i1, newmem % data % i1) + end if else if (associated(dptr % i2)) then - call mpas_duplicate_field(dptr % i2, mem % i2, copy_array_only=.true.) + if (newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % i2a(newmem % data % contentsTimeLevs)) + do j = 1, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % i2, newmem % data % i2) + newmem % data % i2a(j) = newmem % data % i2 + deallocate(newmem % data % i2) + end do + else + call mpas_duplicate_field(dptr % i2, newmem % data % i2) + end if else if (associated(dptr % i3)) then - call mpas_duplicate_field(dptr % i3, mem % i3, copy_array_only=.true.) + if (newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % i3a(newmem % data % contentsTimeLevs)) + do j = 1, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % i3, newmem % data % i3) + newmem % data % i3a(j) = newmem % data % i3 + deallocate(newmem % data % i3) + end do + else + call mpas_duplicate_field(dptr % i3, newmem % data % i3) + end if else if (associated(dptr % c0)) then - call mpas_duplicate_field(dptr % c0, mem % c0, copy_array_only=.true.) + if (newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % c0a(newmem % data % contentsTimeLevs)) + do j = 1, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % c0, newmem % data % c0) + newmem % data % c0a(j) = newmem % data % c0 + deallocate(newmem % data % c0) + end do + else + call mpas_duplicate_field(dptr % c0, newmem % data % c0) + end if else if (associated(dptr % c1)) then - call mpas_duplicate_field(dptr % c1, mem % c1, copy_array_only=.true.) + if (newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % c1a(newmem % data % contentsTimeLevs)) + do j = 1, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % c1, newmem % data % c1) + newmem % data % c1a(j) = newmem % data % c1 + deallocate(newmem % data % c1) + end do + else + call mpas_duplicate_field(dptr % c1, newmem % data % c1) + end if else if (associated(dptr % l0)) then - call mpas_duplicate_field(dptr % l0, mem % l0, copy_array_only=.true.) + if (newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % l0a(newmem % data % contentsTimeLevs)) + do j = 1, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % l0, newmem % data % l0) + newmem % data % l0a(j) = newmem % data % l0 + deallocate(newmem % data % l0) + end do + else + call mpas_duplicate_field(dptr % l0, newmem % data % l0) + end if else if (associated(dptr % r0a)) then - do j=1,mem % contentsTimeLevs - mem % r0 => mem % r0a(j) - call mpas_duplicate_field(dptr % r0a(j), mem % r0, copy_array_only=.true.) - nullify(mem % r0) - end do + if ( newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % r0a(newmem % data % contentsTimeLevs)) + minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) + do j = 1, minTimeLevels + call mpas_duplicate_field(dptr % r0a(j), newmem % data % r0) + newmem % data % r0a(j) = newmem % data % r0 + deallocate(newmem % data % r0) + end do + + do j = minTimeLevels, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % r0a(dptr % contentsTimeLevs), newmem % data % r0) + newmem % data % r0a(j) = newmem % data % r0 + deallocate(newmem % data % r0) + end do + else + call mpas_duplicate_field(dptr % r0a(1), newmem % data % r0) + end if else if (associated(dptr % r1a)) then - do j=1,mem % contentsTimeLevs - mem % r1 => mem % r1a(j) - call mpas_duplicate_field(dptr % r1a(j), mem % r1, copy_array_only=.true.) - nullify(mem % r1) - end do + if ( newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % r1a(newmem % data % contentsTimeLevs)) + minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) + do j = 1, minTimeLevels + call mpas_duplicate_field(dptr % r1a(j), newmem % data % r1) + newmem % data % r1a(j) = newmem % data % r1 + deallocate(newmem % data % r1) + end do + + do j = minTimeLevels, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % r1a(dptr % contentsTimeLevs), newmem % data % r1) + newmem % data % r1a(j) = newmem % data % r1 + deallocate(newmem % data % r1) + end do + else + call mpas_duplicate_field(dptr % r1a(1), newmem % data % r1) + end if else if (associated(dptr % r2a)) then - do j=1,mem % contentsTimeLevs - mem % r2 => mem % r2a(j) - call mpas_duplicate_field(dptr % r2a(j), mem % r2, copy_array_only=.true.) - nullify(mem % r2) - end do + if ( newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % r2a(newmem % data % contentsTimeLevs)) + minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) + do j = 1, minTimeLevels + call mpas_duplicate_field(dptr % r2a(j), newmem % data % r2) + newmem % data % r2a(j) = newmem % data % r2 + deallocate(newmem % data % r2) + end do + + do j = minTimeLevels, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % r2a(dptr % contentsTimeLevs), newmem % data % r2) + newmem % data % r2a(j) = newmem % data % r2 + deallocate(newmem % data % r2) + end do + else + call mpas_duplicate_field(dptr % r2a(1), newmem % data % r2) + end if else if (associated(dptr % r3a)) then - do j=1,mem % contentsTimeLevs - mem % r3 => mem % r3a(j) - call mpas_duplicate_field(dptr % r3a(j), mem % r3, copy_array_only=.true.) - nullify(mem % r3) - end do + if ( newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % r3a(newmem % data % contentsTimeLevs)) + minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) + do j = 1, minTimeLevels + call mpas_duplicate_field(dptr % r3a(j), newmem % data % r3) + newmem % data % r3a(j) = newmem % data % r3 + deallocate(newmem % data % r3) + end do + + do j = minTimeLevels, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % r3a(dptr % contentsTimeLevs), newmem % data % r3) + newmem % data % r3a(j) = newmem % data % r3 + deallocate(newmem % data % r3) + end do + else + call mpas_duplicate_field(dptr % r3a(1), newmem % data % r3) + end if else if (associated(dptr % r4a)) then - do j=1,mem % contentsTimeLevs - mem % r4 => mem % r4a(j) - call mpas_duplicate_field(dptr % r4a(j), mem % r4, copy_array_only=.true.) - nullify(mem % r4) - end do + if ( newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % r4a(newmem % data % contentsTimeLevs)) + minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) + do j = 1, minTimeLevels + call mpas_duplicate_field(dptr % r4a(j), newmem % data % r4) + newmem % data % r4a(j) = newmem % data % r4 + deallocate(newmem % data % r4) + end do + + do j = minTimeLevels, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % r4a(dptr % contentsTimeLevs), newmem % data % r4) + newmem % data % r4a(j) = newmem % data % r4 + deallocate(newmem % data % r4) + end do + else + call mpas_duplicate_field(dptr % r4a(1), newmem % data % r4) + end if else if (associated(dptr % r5a)) then - do j=1,mem % contentsTimeLevs - mem % r5 => mem % r5a(j) - call mpas_duplicate_field(dptr % r5a(j), mem % r5, copy_array_only=.true.) - nullify(mem % r5) - end do + if ( newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % r5a(newmem % data % contentsTimeLevs)) + minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) + do j = 1, minTimeLevels + call mpas_duplicate_field(dptr % r5a(j), newmem % data % r5) + newmem % data % r5a(j) = newmem % data % r5 + deallocate(newmem % data % r5) + end do + + do j = minTimeLevels, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % r5a(dptr % contentsTimeLevs), newmem % data % r5) + newmem % data % r5a(j) = newmem % data % r5 + deallocate(newmem % data % r5) + end do + else + call mpas_duplicate_field(dptr % r5a(1), newmem % data % r5) + end if else if (associated(dptr % i0a)) then - do j=1,mem % contentsTimeLevs - mem % i0 => mem % i0a(j) - call mpas_duplicate_field(dptr % i0a(j), mem % i0, copy_array_only=.true.) - nullify(mem % i0) - end do + if ( newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % i0a(newmem % data % contentsTimeLevs)) + minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) + do j = 1, minTimeLevels + call mpas_duplicate_field(dptr % i0a(j), newmem % data % i0) + newmem % data % i0a(j) = newmem % data % i0 + deallocate(newmem % data % i0) + end do + + do j = minTimeLevels, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % i0a(dptr % contentsTimeLevs), newmem % data % i0) + newmem % data % i0a(j) = newmem % data % i0 + deallocate(newmem % data % i0) + end do + else + call mpas_duplicate_field(dptr % i0a(1), newmem % data % i0) + end if else if (associated(dptr % i1a)) then - do j=1,mem % contentsTimeLevs - mem % i1 => mem % i1a(j) - call mpas_duplicate_field(dptr % i1a(j), mem % i1, copy_array_only=.true.) - nullify(mem % i1) - end do + if ( newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % i1a(newmem % data % contentsTimeLevs)) + minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) + do j = 1, minTimeLevels + call mpas_duplicate_field(dptr % i1a(j), newmem % data % i1) + newmem % data % i1a(j) = newmem % data % i1 + deallocate(newmem % data % i1) + end do + + do j = minTimeLevels, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % i1a(dptr % contentsTimeLevs), newmem % data % i1) + newmem % data % i1a(j) = newmem % data % i1 + deallocate(newmem % data % i1) + end do + else + call mpas_duplicate_field(dptr % i1a(1), newmem % data % i1) + end if else if (associated(dptr % i2a)) then - do j=1,mem % contentsTimeLevs - mem % i2 => mem % i2a(j) - call mpas_duplicate_field(dptr % i2a(j), mem % i2, copy_array_only=.true.) - nullify(mem % i2) - end do + if ( newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % i2a(newmem % data % contentsTimeLevs)) + minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) + do j = 1, minTimeLevels + call mpas_duplicate_field(dptr % i2a(j), newmem % data % i2) + newmem % data % i2a(j) = newmem % data % i2 + deallocate(newmem % data % i2) + end do + + do j = minTimeLevels, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % i2a(dptr % contentsTimeLevs), newmem % data % i2) + newmem % data % i2a(j) = newmem % data % i2 + deallocate(newmem % data % i2) + end do + else + call mpas_duplicate_field(dptr % i2a(1), newmem % data % i2) + end if else if (associated(dptr % i3a)) then - do j=1,mem % contentsTimeLevs - mem % i3 => mem % i3a(j) - call mpas_duplicate_field(dptr % i3a(j), mem % i3, copy_array_only=.true.) - nullify(mem % i3) - end do + if ( newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % i3a(newmem % data % contentsTimeLevs)) + minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) + do j = 1, minTimeLevels + call mpas_duplicate_field(dptr % i3a(j), newmem % data % i3) + newmem % data % i3a(j) = newmem % data % i3 + deallocate(newmem % data % i3) + end do + + do j = minTimeLevels, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % i3a(dptr % contentsTimeLevs), newmem % data % i3) + newmem % data % i3a(j) = newmem % data % i3 + deallocate(newmem % data % i3) + end do + else + call mpas_duplicate_field(dptr % i3a(1), newmem % data % i3) + end if else if (associated(dptr % c0a)) then - do j=1,mem % contentsTimeLevs - mem % c0 => mem % c0a(j) - call mpas_duplicate_field(dptr % c0a(j), mem % c0, copy_array_only=.true.) - nullify(mem % c0) - end do + if ( newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % c0a(newmem % data % contentsTimeLevs)) + minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) + do j = 1, minTimeLevels + call mpas_duplicate_field(dptr % c0a(j), newmem % data % c0) + newmem % data % c0a(j) = newmem % data % c0 + deallocate(newmem % data % c0) + end do + + do j = minTimeLevels, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % c0a(dptr % contentsTimeLevs), newmem % data % c0) + newmem % data % c0a(j) = newmem % data % c0 + deallocate(newmem % data % c0) + end do + else + call mpas_duplicate_field(dptr % c0a(1), newmem % data % c0) + end if else if (associated(dptr % c1a)) then - do j=1,mem % contentsTimeLevs - mem % c1 => mem % c1a(j) - call mpas_duplicate_field(dptr % c1a(j), mem % c1, copy_array_only=.true.) - nullify(mem % c1) - end do + if ( newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % c1a(newmem % data % contentsTimeLevs)) + minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) + do j = 1, minTimeLevels + call mpas_duplicate_field(dptr % c1a(j), newmem % data % c1) + newmem % data % c1a(j) = newmem % data % c1 + deallocate(newmem % data % c1) + end do + + do j = minTimeLevels, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % c1a(dptr % contentsTimeLevs), newmem % data % c1) + newmem % data % c1a(j) = newmem % data % c1 + deallocate(newmem % data % c1) + end do + else + call mpas_duplicate_field(dptr % c1a(1), newmem % data % c1) + end if else if (associated(dptr % l0a)) then - do j=1,mem % contentsTimeLevs - mem % l0 => mem % l0a(j) - call mpas_duplicate_field(dptr % l0a(j), mem % l0, copy_array_only=.true.) - nullify(mem % l0) - end do + if ( newmem % data % contentsTimeLevs > 1) then + allocate(newmem % data % l0a(newmem % data % contentsTimeLevs)) + minTimeLevels = min(dptr % contentsTimeLevs, newmem % data % contentsTimeLevs) + do j = 1, minTimeLevels + call mpas_duplicate_field(dptr % l0a(j), newmem % data % l0) + newmem % data % l0a(j) = newmem % data % l0 + deallocate(newmem % data % l0) + end do + + do j = minTimeLevels, newmem % data % contentsTimeLevs + call mpas_duplicate_field(dptr % l0a(dptr % contentsTimeLevs), newmem % data % l0) + newmem % data % l0a(j) = newmem % data % l0 + deallocate(newmem % data % l0) + end do + else + call mpas_duplicate_field(dptr % l0a(1), newmem % data % l0) + end if else - call pool_mesg('While copying pool, member '//trim(ptr % key)//' has no valid field pointers.') + call pool_mesg('While cloning pool, member '//trim(ptr % key)//' has no valid field pointers.') end if else if (ptr % contentsType == MPAS_POOL_SUBPOOL) then - mem => pool_get_member(destPool, ptr % key, MPAS_POOL_SUBPOOL) - call mpas_pool_copy_pool(ptr % data % p, mem % p) + call mpas_pool_create_pool(newmem % data % p, poolSize = ptr % data % p % size) + call mpas_pool_clone_pool(ptr % data % p, newmem % data % p) end if - ptr => ptr % next - end do + if (.not. pool_add_member(destPool, newmem % key, newmem)) then + call pool_mesg('Error: Had problems adding '//trim(newmem % key)//' to clone of pool.') + end if - end do + ptr => ptr % iteration_next + end do + end if - end subroutine mpas_pool_copy_pool!}}} + end subroutine mpas_pool_clone_pool!}}} !----------------------------------------------------------------------- -! routine mpas_pool_initialize_time_levels +! routine mpas_pool_copy_pool ! !> \brief MPAS Pool copy routine !> \author Michael Duda, Doug Jacobsen !> \date 03/27/2014 !> \details -!> This routine copies the data from the first time level of every field into -!> all subsequent time levels, to initialize them with real values. +!> This routine assumes srcPool and destPool have identical members. It will +!> copy the data from the members of srcPool into the members of destPool. ! !----------------------------------------------------------------------- - recursive subroutine mpas_pool_initialize_time_levels(inPool)!{{{ + recursive subroutine mpas_pool_copy_pool(srcPool, destPool)!{{{ implicit none - type (mpas_pool_type), pointer :: inPool + type (mpas_pool_type), pointer :: srcPool + type (mpas_pool_type), pointer :: destPool - integer :: i, j + + integer :: i, j, threadNum type (mpas_pool_member_type), pointer :: ptr type (mpas_pool_data_type), pointer :: dptr type (mpas_pool_data_type), pointer :: mem - type (mpas_pool_type), pointer :: subPool - type (mpas_pool_iterator_type) :: itr - call mpas_pool_begin_iteration(inPool) - do while (mpas_pool_get_next_member(inPool, itr)) - if (itr % memberType == MPAS_POOL_SUBPOOL) then - call mpas_pool_get_subpool(inPool, itr % memberName, subPool) - call mpas_pool_initialize_time_levels(subPool) - else if (itr % memberType == MPAS_POOL_FIELD) then - if (itr % nTimeLevels > 1) then - mem => pool_get_member(inPool, itr % memberName, MPAS_POOL_FIELD) - if (itr % dataType == MPAS_POOL_REAL) then - if (itr % nDims == 0) then - do i = 2, itr % nTimeLevels - mem % r0 => mem % r0a(i) - call mpas_duplicate_field(mem % r0a(1), mem % r0, copy_array_only=.true.) + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + do i=1,srcPool % size + + ptr => srcPool % table(i) % head + do while(associated(ptr)) + + if (ptr % contentsType == MPAS_POOL_DIMENSION) then + + mem => pool_get_member(destPool, ptr % key, MPAS_POOL_DIMENSION) + if (.not. associated(mem)) then + call mpas_pool_set_error_level(MPAS_POOL_FATAL) + call pool_mesg('ERROR: Destination pool does not contain member '//trim(ptr % key)//'.') + end if + if (ptr % data % contentsDims > 0) then + mem % simple_int_arr(:) = ptr % data % simple_int_arr(:) + else + mem % simple_int = ptr % data % simple_int + end if + + else if (ptr % contentsType == MPAS_POOL_CONFIG) then + + dptr => ptr % data + + mem => pool_get_member(destPool, ptr % key, MPAS_POOL_CONFIG) + if (dptr % contentsType == MPAS_POOL_REAL) then + mem % simple_real = dptr % simple_real + else if (dptr % contentsType == MPAS_POOL_INTEGER) then + mem % simple_int = dptr % simple_int + else if (dptr % contentsType == MPAS_POOL_CHARACTER) then + mem % simple_char = dptr % simple_char + else if (dptr % contentsType == MPAS_POOL_LOGICAL) then + mem % simple_logical = dptr % simple_logical + end if + + else if (ptr % contentsType == MPAS_POOL_FIELD) then + + dptr => ptr % data + + ! Do this through brute force... + mem => pool_get_member(destPool, ptr % key, MPAS_POOL_FIELD) + if (associated(dptr % r0)) then + call mpas_duplicate_field(dptr % r0, mem % r0, copy_array_only=.true.) + else if (associated(dptr % r1)) then + call mpas_duplicate_field(dptr % r1, mem % r1, copy_array_only=.true.) + else if (associated(dptr % r2)) then + call mpas_duplicate_field(dptr % r2, mem % r2, copy_array_only=.true.) + else if (associated(dptr % r3)) then + call mpas_duplicate_field(dptr % r3, mem % r3, copy_array_only=.true.) + else if (associated(dptr % r4)) then + call mpas_duplicate_field(dptr % r4, mem % r4, copy_array_only=.true.) + else if (associated(dptr % r5)) then + call mpas_duplicate_field(dptr % r5, mem % r5, copy_array_only=.true.) + else if (associated(dptr % i0)) then + call mpas_duplicate_field(dptr % i0, mem % i0, copy_array_only=.true.) + else if (associated(dptr % i1)) then + call mpas_duplicate_field(dptr % i1, mem % i1, copy_array_only=.true.) + else if (associated(dptr % i2)) then + call mpas_duplicate_field(dptr % i2, mem % i2, copy_array_only=.true.) + else if (associated(dptr % i3)) then + call mpas_duplicate_field(dptr % i3, mem % i3, copy_array_only=.true.) + else if (associated(dptr % c0)) then + call mpas_duplicate_field(dptr % c0, mem % c0, copy_array_only=.true.) + else if (associated(dptr % c1)) then + call mpas_duplicate_field(dptr % c1, mem % c1, copy_array_only=.true.) + else if (associated(dptr % l0)) then + call mpas_duplicate_field(dptr % l0, mem % l0, copy_array_only=.true.) + else if (associated(dptr % r0a)) then + do j=1,mem % contentsTimeLevs + mem % r0 => mem % r0a(j) + call mpas_duplicate_field(dptr % r0a(j), mem % r0, copy_array_only=.true.) nullify(mem % r0) end do - else if (itr % nDims == 1) then - do i = 2, itr % nTimeLevels - mem % r1 => mem % r1a(i) - call mpas_duplicate_field(mem % r1a(1), mem % r1, copy_array_only=.true.) + else if (associated(dptr % r1a)) then + do j=1,mem % contentsTimeLevs + mem % r1 => mem % r1a(j) + call mpas_duplicate_field(dptr % r1a(j), mem % r1, copy_array_only=.true.) nullify(mem % r1) end do - else if (itr % nDims == 2) then - do i = 2, itr % nTimeLevels - mem % r2 => mem % r2a(i) - call mpas_duplicate_field(mem % r2a(1), mem % r2, copy_array_only=.true.) + else if (associated(dptr % r2a)) then + do j=1,mem % contentsTimeLevs + mem % r2 => mem % r2a(j) + call mpas_duplicate_field(dptr % r2a(j), mem % r2, copy_array_only=.true.) nullify(mem % r2) end do - else if (itr % nDims == 3) then - do i = 2, itr % nTimeLevels - mem % r3 => mem % r3a(i) - call mpas_duplicate_field(mem % r3a(1), mem % r3, copy_array_only=.true.) + else if (associated(dptr % r3a)) then + do j=1,mem % contentsTimeLevs + mem % r3 => mem % r3a(j) + call mpas_duplicate_field(dptr % r3a(j), mem % r3, copy_array_only=.true.) nullify(mem % r3) end do - else if (itr % nDims == 4) then - do i = 2, itr % nTimeLevels - mem % r4 => mem % r4a(i) - call mpas_duplicate_field(mem % r4a(1), mem % r4, copy_array_only=.true.) + else if (associated(dptr % r4a)) then + do j=1,mem % contentsTimeLevs + mem % r4 => mem % r4a(j) + call mpas_duplicate_field(dptr % r4a(j), mem % r4, copy_array_only=.true.) nullify(mem % r4) end do - else if (itr % nDims == 5) then - do i = 2, itr % nTimeLevels - mem % r5 => mem % r5a(i) - call mpas_duplicate_field(mem % r5a(1), mem % r5, copy_array_only=.true.) + else if (associated(dptr % r5a)) then + do j=1,mem % contentsTimeLevs + mem % r5 => mem % r5a(j) + call mpas_duplicate_field(dptr % r5a(j), mem % r5, copy_array_only=.true.) nullify(mem % r5) end do - end if - else if (itr % dataType == MPAS_POOL_INTEGER) then - if (itr % nDims == 0) then - do i = 2, itr % nTimeLevels - mem % i0 => mem % i0a(i) - call mpas_duplicate_field(mem % i0a(1), mem % i0, copy_array_only=.true.) + else if (associated(dptr % i0a)) then + do j=1,mem % contentsTimeLevs + mem % i0 => mem % i0a(j) + call mpas_duplicate_field(dptr % i0a(j), mem % i0, copy_array_only=.true.) nullify(mem % i0) end do - else if (itr % nDims == 1) then - do i = 2, itr % nTimeLevels - mem % i1 => mem % i1a(i) - call mpas_duplicate_field(mem % i1a(1), mem % i1, copy_array_only=.true.) + else if (associated(dptr % i1a)) then + do j=1,mem % contentsTimeLevs + mem % i1 => mem % i1a(j) + call mpas_duplicate_field(dptr % i1a(j), mem % i1, copy_array_only=.true.) nullify(mem % i1) end do - else if (itr % nDims == 2) then - do i = 2, itr % nTimeLevels - mem % i2 => mem % i2a(i) - call mpas_duplicate_field(mem % i2a(1), mem % i2, copy_array_only=.true.) + else if (associated(dptr % i2a)) then + do j=1,mem % contentsTimeLevs + mem % i2 => mem % i2a(j) + call mpas_duplicate_field(dptr % i2a(j), mem % i2, copy_array_only=.true.) nullify(mem % i2) end do - else if (itr % nDims == 3) then - do i = 2, itr % nTimeLevels - mem % i3 => mem % i3a(i) - call mpas_duplicate_field(mem % i3a(1), mem % i3, copy_array_only=.true.) + else if (associated(dptr % i3a)) then + do j=1,mem % contentsTimeLevs + mem % i3 => mem % i3a(j) + call mpas_duplicate_field(dptr % i3a(j), mem % i3, copy_array_only=.true.) nullify(mem % i3) end do - end if - else if (itr % dataType == MPAS_POOL_CHARACTER) then - if (itr % nDims == 0) then - do i = 2, itr % nTimeLevels - mem % c0 => mem % c0a(i) - call mpas_duplicate_field(mem % c0a(1), mem % c0, copy_array_only=.true.) + else if (associated(dptr % c0a)) then + do j=1,mem % contentsTimeLevs + mem % c0 => mem % c0a(j) + call mpas_duplicate_field(dptr % c0a(j), mem % c0, copy_array_only=.true.) nullify(mem % c0) end do - else if (itr % nDims == 1) then - do i = 2, itr % nTimeLevels - mem % c1 => mem % c1a(i) - call mpas_duplicate_field(mem % c1a(1), mem % c1, copy_array_only=.true.) + else if (associated(dptr % c1a)) then + do j=1,mem % contentsTimeLevs + mem % c1 => mem % c1a(j) + call mpas_duplicate_field(dptr % c1a(j), mem % c1, copy_array_only=.true.) nullify(mem % c1) end do + else if (associated(dptr % l0a)) then + do j=1,mem % contentsTimeLevs + mem % l0 => mem % l0a(j) + call mpas_duplicate_field(dptr % l0a(j), mem % l0, copy_array_only=.true.) + nullify(mem % l0) + end do + else + call pool_mesg('While copying pool, member '//trim(ptr % key)//' has no valid field pointers.') + end if + + else if (ptr % contentsType == MPAS_POOL_SUBPOOL) then + + mem => pool_get_member(destPool, ptr % key, MPAS_POOL_SUBPOOL) + call mpas_pool_copy_pool(ptr % data % p, mem % p) + + end if + + ptr => ptr % next + end do + + end do + end if + + end subroutine mpas_pool_copy_pool!}}} + + +!----------------------------------------------------------------------- +! routine mpas_pool_initialize_time_levels +! +!> \brief MPAS Pool copy routine +!> \author Michael Duda, Doug Jacobsen +!> \date 03/27/2014 +!> \details +!> This routine copies the data from the first time level of every field into +!> all subsequent time levels, to initialize them with real values. +! +!----------------------------------------------------------------------- + recursive subroutine mpas_pool_initialize_time_levels(inPool)!{{{ + + implicit none + + type (mpas_pool_type), pointer :: inPool + + integer :: i, j + type (mpas_pool_member_type), pointer :: ptr + type (mpas_pool_data_type), pointer :: dptr + type (mpas_pool_data_type), pointer :: mem + type (mpas_pool_type), pointer :: subPool + type (mpas_pool_iterator_type) :: itr + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() + + call mpas_pool_begin_iteration(inPool) + do while (mpas_pool_get_next_member(inPool, itr)) + if (itr % memberType == MPAS_POOL_SUBPOOL) then + call mpas_pool_get_subpool(inPool, itr % memberName, subPool) + call mpas_pool_initialize_time_levels(subPool) + else if (itr % memberType == MPAS_POOL_FIELD) then + if ( threadNum == 0 ) then + if (itr % nTimeLevels > 1) then + mem => pool_get_member(inPool, itr % memberName, MPAS_POOL_FIELD) + if (itr % dataType == MPAS_POOL_REAL) then + if (itr % nDims == 0) then + do i = 2, itr % nTimeLevels + mem % r0 => mem % r0a(i) + call mpas_duplicate_field(mem % r0a(1), mem % r0, copy_array_only=.true.) + nullify(mem % r0) + end do + else if (itr % nDims == 1) then + do i = 2, itr % nTimeLevels + mem % r1 => mem % r1a(i) + call mpas_duplicate_field(mem % r1a(1), mem % r1, copy_array_only=.true.) + nullify(mem % r1) + end do + else if (itr % nDims == 2) then + do i = 2, itr % nTimeLevels + mem % r2 => mem % r2a(i) + call mpas_duplicate_field(mem % r2a(1), mem % r2, copy_array_only=.true.) + nullify(mem % r2) + end do + else if (itr % nDims == 3) then + do i = 2, itr % nTimeLevels + mem % r3 => mem % r3a(i) + call mpas_duplicate_field(mem % r3a(1), mem % r3, copy_array_only=.true.) + nullify(mem % r3) + end do + else if (itr % nDims == 4) then + do i = 2, itr % nTimeLevels + mem % r4 => mem % r4a(i) + call mpas_duplicate_field(mem % r4a(1), mem % r4, copy_array_only=.true.) + nullify(mem % r4) + end do + else if (itr % nDims == 5) then + do i = 2, itr % nTimeLevels + mem % r5 => mem % r5a(i) + call mpas_duplicate_field(mem % r5a(1), mem % r5, copy_array_only=.true.) + nullify(mem % r5) + end do + end if + else if (itr % dataType == MPAS_POOL_INTEGER) then + if (itr % nDims == 0) then + do i = 2, itr % nTimeLevels + mem % i0 => mem % i0a(i) + call mpas_duplicate_field(mem % i0a(1), mem % i0, copy_array_only=.true.) + nullify(mem % i0) + end do + else if (itr % nDims == 1) then + do i = 2, itr % nTimeLevels + mem % i1 => mem % i1a(i) + call mpas_duplicate_field(mem % i1a(1), mem % i1, copy_array_only=.true.) + nullify(mem % i1) + end do + else if (itr % nDims == 2) then + do i = 2, itr % nTimeLevels + mem % i2 => mem % i2a(i) + call mpas_duplicate_field(mem % i2a(1), mem % i2, copy_array_only=.true.) + nullify(mem % i2) + end do + else if (itr % nDims == 3) then + do i = 2, itr % nTimeLevels + mem % i3 => mem % i3a(i) + call mpas_duplicate_field(mem % i3a(1), mem % i3, copy_array_only=.true.) + nullify(mem % i3) + end do + end if + else if (itr % dataType == MPAS_POOL_CHARACTER) then + if (itr % nDims == 0) then + do i = 2, itr % nTimeLevels + mem % c0 => mem % c0a(i) + call mpas_duplicate_field(mem % c0a(1), mem % c0, copy_array_only=.true.) + nullify(mem % c0) + end do + else if (itr % nDims == 1) then + do i = 2, itr % nTimeLevels + mem % c1 => mem % c1a(i) + call mpas_duplicate_field(mem % c1a(1), mem % c1, copy_array_only=.true.) + nullify(mem % c1) + end do + end if end if end if end if @@ -1261,6 +1293,9 @@ recursive subroutine mpas_pool_link_pools(inPool, prevPool, nextPool)!{{{ type (mpas_pool_type), pointer :: subPool, prevSubPool, nextSubPool type (mpas_pool_data_type), pointer :: poolMem, prevPoolMem, nextPoolMem type (mpas_pool_iterator_type) :: poolItr + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() nullify(prevSubPool) nullify(nextSubPool) @@ -1293,354 +1328,356 @@ recursive subroutine mpas_pool_link_pools(inPool, prevPool, nextPool)!{{{ ! Link fields else if (poolItr % memberType == MPAS_POOL_FIELD) then - poolMem => pool_get_member(inPool, poolItr % memberName, MPAS_POOL_FIELD) - if (present(prevPool)) then - prevPoolMem => pool_get_member(prevPool, poolItr % memberName, MPAS_POOL_FIELD) - end if + if ( threadNum == 0 ) then + poolMem => pool_get_member(inPool, poolItr % memberName, MPAS_POOL_FIELD) + if (present(prevPool)) then + prevPoolMem => pool_get_member(prevPool, poolItr % memberName, MPAS_POOL_FIELD) + end if - if (present(nextPool)) then - nextPoolMem => pool_get_member(nextPool, poolItr % memberName, MPAS_POOL_FIELD) - end if + if (present(nextPool)) then + nextPoolMem => pool_get_member(nextPool, poolItr % memberName, MPAS_POOL_FIELD) + end if - if (poolItr % dataType == MPAS_POOL_REAL) then - if (poolItr % nDims == 0) then - if (poolItr % nTimeLevels > 1) then - do i = 1, poolItr % nTimeLevels + if (poolItr % dataType == MPAS_POOL_REAL) then + if (poolItr % nDims == 0) then + if (poolItr % nTimeLevels > 1) then + do i = 1, poolItr % nTimeLevels + if (associated(prevPoolMem)) then + poolMem % r0a(i) % prev => prevPoolMem % r0a(i) + else + nullify(poolMem % r0a(i) % prev) + end if + + if (associated(nextPoolMem)) then + poolMem % r0a(i) % next => nextPoolMem % r0a(i) + else + nullify(poolMem % r0a(i) % next) + end if + end do + else if (associated(prevPoolMem)) then - poolMem % r0a(i) % prev => prevPoolMem % r0a(i) + poolMem % r0 % prev => prevPoolMem % r0 else - nullify(poolMem % r0a(i) % prev) + nullify(poolMem % r0 % prev) end if if (associated(nextPoolMem)) then - poolMem % r0a(i) % next => nextPoolMem % r0a(i) + poolMem % r0 % next => nextPoolMem % r0 else - nullify(poolMem % r0a(i) % next) + nullify(poolMem % r0 % next) end if - end do - else - if (associated(prevPoolMem)) then - poolMem % r0 % prev => prevPoolMem % r0 - else - nullify(poolMem % r0 % prev) end if - - if (associated(nextPoolMem)) then - poolMem % r0 % next => nextPoolMem % r0 + else if (poolItr % nDims == 1) then + if (poolItr % nTimeLevels > 1) then + do i = 1, poolItr % nTimeLevels + if (associated(prevPoolMem)) then + poolMem % r1a(i) % prev => prevPoolMem % r1a(i) + else + nullify(poolMem % r1a(i) % prev) + end if + + if (associated(nextPoolMem)) then + poolMem % r1a(i) % next => nextPoolMem % r1a(i) + else + nullify(poolMem % r1a(i) % next) + end if + end do else - nullify(poolMem % r0 % next) - end if - end if - else if (poolItr % nDims == 1) then - if (poolItr % nTimeLevels > 1) then - do i = 1, poolItr % nTimeLevels if (associated(prevPoolMem)) then - poolMem % r1a(i) % prev => prevPoolMem % r1a(i) + poolMem % r1 % prev => prevPoolMem % r1 else - nullify(poolMem % r1a(i) % prev) + nullify(poolMem % r1 % prev) end if if (associated(nextPoolMem)) then - poolMem % r1a(i) % next => nextPoolMem % r1a(i) + poolMem % r1 % next => nextPoolMem % r1 else - nullify(poolMem % r1a(i) % next) + nullify(poolMem % r1 % next) end if - end do - else - if (associated(prevPoolMem)) then - poolMem % r1 % prev => prevPoolMem % r1 - else - nullify(poolMem % r1 % prev) end if - - if (associated(nextPoolMem)) then - poolMem % r1 % next => nextPoolMem % r1 + else if (poolItr % nDims == 2) then + if (poolItr % nTimeLevels > 1) then + do i = 1, poolItr % nTimeLevels + if (associated(prevPoolMem)) then + poolMem % r2a(i) % prev => prevPoolMem % r2a(i) + else + nullify(poolMem % r2a(i) % prev) + end if + + if (associated(nextPoolMem)) then + poolMem % r2a(i) % next => nextPoolMem % r2a(i) + else + nullify(poolMem % r2a(i) % next) + end if + end do else - nullify(poolMem % r1 % next) - end if - end if - else if (poolItr % nDims == 2) then - if (poolItr % nTimeLevels > 1) then - do i = 1, poolItr % nTimeLevels if (associated(prevPoolMem)) then - poolMem % r2a(i) % prev => prevPoolMem % r2a(i) + poolMem % r2 % prev => prevPoolMem % r2 else - nullify(poolMem % r2a(i) % prev) + nullify(poolMem % r2 % prev) end if if (associated(nextPoolMem)) then - poolMem % r2a(i) % next => nextPoolMem % r2a(i) + poolMem % r2 % next => nextPoolMem % r2 else - nullify(poolMem % r2a(i) % next) + nullify(poolMem % r2 % next) end if - end do - else - if (associated(prevPoolMem)) then - poolMem % r2 % prev => prevPoolMem % r2 - else - nullify(poolMem % r2 % prev) end if - - if (associated(nextPoolMem)) then - poolMem % r2 % next => nextPoolMem % r2 + else if (poolItr % nDims == 3) then + if (poolItr % nTimeLevels > 1) then + do i = 1, poolItr % nTimeLevels + if (associated(prevPoolMem)) then + poolMem % r3a(i) % prev => prevPoolMem % r3a(i) + else + nullify(poolMem % r3a(i) % prev) + end if + + if (associated(nextPoolMem)) then + poolMem % r3a(i) % next => nextPoolMem % r3a(i) + else + nullify(poolMem % r3a(i) % next) + end if + end do else - nullify(poolMem % r2 % next) - end if - end if - else if (poolItr % nDims == 3) then - if (poolItr % nTimeLevels > 1) then - do i = 1, poolItr % nTimeLevels if (associated(prevPoolMem)) then - poolMem % r3a(i) % prev => prevPoolMem % r3a(i) + poolMem % r3 % prev => prevPoolMem % r3 else - nullify(poolMem % r3a(i) % prev) + nullify(poolMem % r3 % prev) end if if (associated(nextPoolMem)) then - poolMem % r3a(i) % next => nextPoolMem % r3a(i) + poolMem % r3 % next => nextPoolMem % r3 else - nullify(poolMem % r3a(i) % next) + nullify(poolMem % r3 % next) end if - end do - else - if (associated(prevPoolMem)) then - poolMem % r3 % prev => prevPoolMem % r3 - else - nullify(poolMem % r3 % prev) end if - - if (associated(nextPoolMem)) then - poolMem % r3 % next => nextPoolMem % r3 + else if (poolItr % nDims == 4) then + if (poolItr % nTimeLevels > 1) then + do i = 1, poolItr % nTimeLevels + if (associated(prevPoolMem)) then + poolMem % r4a(i) % prev => prevPoolMem % r4a(i) + else + nullify(poolMem % r4a(i) % prev) + end if + + if (associated(nextPoolMem)) then + poolMem % r4a(i) % next => nextPoolMem % r4a(i) + else + nullify(poolMem % r4a(i) % next) + end if + end do else - nullify(poolMem % r3 % next) - end if - end if - else if (poolItr % nDims == 4) then - if (poolItr % nTimeLevels > 1) then - do i = 1, poolItr % nTimeLevels if (associated(prevPoolMem)) then - poolMem % r4a(i) % prev => prevPoolMem % r4a(i) + poolMem % r4 % prev => prevPoolMem % r4 else - nullify(poolMem % r4a(i) % prev) + nullify(poolMem % r4 % prev) end if if (associated(nextPoolMem)) then - poolMem % r4a(i) % next => nextPoolMem % r4a(i) + poolMem % r4 % next => nextPoolMem % r4 else - nullify(poolMem % r4a(i) % next) + nullify(poolMem % r4 % next) end if - end do - else - if (associated(prevPoolMem)) then - poolMem % r4 % prev => prevPoolMem % r4 - else - nullify(poolMem % r4 % prev) end if - - if (associated(nextPoolMem)) then - poolMem % r4 % next => nextPoolMem % r4 + else if (poolItr % nDims == 5) then + if (poolItr % nTimeLevels > 1) then + do i = 1, poolItr % nTimeLevels + if (associated(prevPoolMem)) then + poolMem % r5a(i) % prev => prevPoolMem % r5a(i) + else + nullify(poolMem % r5a(i) % prev) + end if + + if (associated(nextPoolMem)) then + poolMem % r5a(i) % next => nextPoolMem % r5a(i) + else + nullify(poolMem % r5a(i) % next) + end if + end do else - nullify(poolMem % r4 % next) - end if - end if - else if (poolItr % nDims == 5) then - if (poolItr % nTimeLevels > 1) then - do i = 1, poolItr % nTimeLevels if (associated(prevPoolMem)) then - poolMem % r5a(i) % prev => prevPoolMem % r5a(i) + poolMem % r5 % prev => prevPoolMem % r5 else - nullify(poolMem % r5a(i) % prev) + nullify(poolMem % r5 % prev) end if if (associated(nextPoolMem)) then - poolMem % r5a(i) % next => nextPoolMem % r5a(i) + poolMem % r5 % next => nextPoolMem % r5 else - nullify(poolMem % r5a(i) % next) + nullify(poolMem % r5 % next) end if - end do - else - if (associated(prevPoolMem)) then - poolMem % r5 % prev => prevPoolMem % r5 - else - nullify(poolMem % r5 % prev) - end if - - if (associated(nextPoolMem)) then - poolMem % r5 % next => nextPoolMem % r5 - else - nullify(poolMem % r5 % next) end if end if - end if - else if (poolItr % dataType == MPAS_POOL_INTEGER) then - if (poolItr % nDims == 0) then - if (poolItr % nTimeLevels > 1) then - do i = 1, poolItr % nTimeLevels + else if (poolItr % dataType == MPAS_POOL_INTEGER) then + if (poolItr % nDims == 0) then + if (poolItr % nTimeLevels > 1) then + do i = 1, poolItr % nTimeLevels + if (associated(prevPoolMem)) then + poolMem % i0a(i) % prev => prevPoolMem % i0a(i) + else + nullify(poolMem % i0a(i) % prev) + end if + + if (associated(nextPoolMem)) then + poolMem % i0a(i) % next => nextPoolMem % i0a(i) + else + nullify(poolMem % i0a(i) % next) + end if + end do + else if (associated(prevPoolMem)) then - poolMem % i0a(i) % prev => prevPoolMem % i0a(i) + poolMem % i0 % prev => prevPoolMem % i0 else - nullify(poolMem % i0a(i) % prev) + nullify(poolMem % i0 % prev) end if if (associated(nextPoolMem)) then - poolMem % i0a(i) % next => nextPoolMem % i0a(i) + poolMem % i0 % next => nextPoolMem % i0 else - nullify(poolMem % i0a(i) % next) + nullify(poolMem % i0 % next) end if - end do - else - if (associated(prevPoolMem)) then - poolMem % i0 % prev => prevPoolMem % i0 - else - nullify(poolMem % i0 % prev) end if - - if (associated(nextPoolMem)) then - poolMem % i0 % next => nextPoolMem % i0 + else if (poolItr % nDims == 1) then + if (poolItr % nTimeLevels > 1) then + do i = 1, poolItr % nTimeLevels + if (associated(prevPoolMem)) then + poolMem % i1a(i) % prev => prevPoolMem % i1a(i) + else + nullify(poolMem % i1a(i) % prev) + end if + + if (associated(nextPoolMem)) then + poolMem % i1a(i) % next => nextPoolMem % i1a(i) + else + nullify(poolMem % i1a(i) % next) + end if + end do else - nullify(poolMem % i0 % next) - end if - end if - else if (poolItr % nDims == 1) then - if (poolItr % nTimeLevels > 1) then - do i = 1, poolItr % nTimeLevels if (associated(prevPoolMem)) then - poolMem % i1a(i) % prev => prevPoolMem % i1a(i) + poolMem % i1 % prev => prevPoolMem % i1 else - nullify(poolMem % i1a(i) % prev) + nullify(poolMem % i1 % prev) end if if (associated(nextPoolMem)) then - poolMem % i1a(i) % next => nextPoolMem % i1a(i) + poolMem % i1 % next => nextPoolMem % i1 else - nullify(poolMem % i1a(i) % next) + nullify(poolMem % i1 % next) end if - end do - else - if (associated(prevPoolMem)) then - poolMem % i1 % prev => prevPoolMem % i1 - else - nullify(poolMem % i1 % prev) end if - - if (associated(nextPoolMem)) then - poolMem % i1 % next => nextPoolMem % i1 + else if (poolItr % nDims == 2) then + if (poolItr % nTimeLevels > 1) then + do i = 1, poolItr % nTimeLevels + if (associated(prevPoolMem)) then + poolMem % i2a(i) % prev => prevPoolMem % i2a(i) + else + nullify(poolMem % i2a(i) % prev) + end if + + if (associated(nextPoolMem)) then + poolMem % i2a(i) % next => nextPoolMem % i2a(i) + else + nullify(poolMem % i2a(i) % next) + end if + end do else - nullify(poolMem % i1 % next) - end if - end if - else if (poolItr % nDims == 2) then - if (poolItr % nTimeLevels > 1) then - do i = 1, poolItr % nTimeLevels if (associated(prevPoolMem)) then - poolMem % i2a(i) % prev => prevPoolMem % i2a(i) + poolMem % i2 % prev => prevPoolMem % i2 else - nullify(poolMem % i2a(i) % prev) + nullify(poolMem % i2 % prev) end if if (associated(nextPoolMem)) then - poolMem % i2a(i) % next => nextPoolMem % i2a(i) + poolMem % i2 % next => nextPoolMem % i2 else - nullify(poolMem % i2a(i) % next) + nullify(poolMem % i2 % next) end if - end do - else - if (associated(prevPoolMem)) then - poolMem % i2 % prev => prevPoolMem % i2 - else - nullify(poolMem % i2 % prev) end if - - if (associated(nextPoolMem)) then - poolMem % i2 % next => nextPoolMem % i2 + else if (poolItr % nDims == 3) then + if (poolItr % nTimeLevels > 1) then + do i = 1, poolItr % nTimeLevels + if (associated(prevPoolMem)) then + poolMem % i3a(i) % prev => prevPoolMem % i3a(i) + else + nullify(poolMem % i3a(i) % prev) + end if + + if (associated(nextPoolMem)) then + poolMem % i3a(i) % next => nextPoolMem % i3a(i) + else + nullify(poolMem % i3a(i) % next) + end if + end do else - nullify(poolMem % i2 % next) - end if - end if - else if (poolItr % nDims == 3) then - if (poolItr % nTimeLevels > 1) then - do i = 1, poolItr % nTimeLevels if (associated(prevPoolMem)) then - poolMem % i3a(i) % prev => prevPoolMem % i3a(i) + poolMem % i3 % prev => prevPoolMem % i3 else - nullify(poolMem % i3a(i) % prev) + nullify(poolMem % i3 % prev) end if if (associated(nextPoolMem)) then - poolMem % i3a(i) % next => nextPoolMem % i3a(i) + poolMem % i3 % next => nextPoolMem % i3 else - nullify(poolMem % i3a(i) % next) + nullify(poolMem % i3 % next) end if - end do - else - if (associated(prevPoolMem)) then - poolMem % i3 % prev => prevPoolMem % i3 - else - nullify(poolMem % i3 % prev) - end if - - if (associated(nextPoolMem)) then - poolMem % i3 % next => nextPoolMem % i3 - else - nullify(poolMem % i3 % next) end if end if - end if - else if (poolItr % dataType == MPAS_POOL_CHARACTER) then - if (poolItr % nDims == 0) then - if (poolItr % nTimeLevels > 1) then - do i = 1, poolItr % nTimeLevels + else if (poolItr % dataType == MPAS_POOL_CHARACTER) then + if (poolItr % nDims == 0) then + if (poolItr % nTimeLevels > 1) then + do i = 1, poolItr % nTimeLevels + if (associated(prevPoolMem)) then + poolMem % c0a(i) % prev => prevPoolMem % c0a(i) + else + nullify(poolMem % c0a(i) % prev) + end if + + if (associated(nextPoolMem)) then + poolMem % c0a(i) % next => nextPoolMem % c0a(i) + else + nullify(poolMem % c0a(i) % next) + end if + end do + else if (associated(prevPoolMem)) then - poolMem % c0a(i) % prev => prevPoolMem % c0a(i) + poolMem % c0 % prev => prevPoolMem % c0 else - nullify(poolMem % c0a(i) % prev) + nullify(poolMem % c0 % prev) end if if (associated(nextPoolMem)) then - poolMem % c0a(i) % next => nextPoolMem % c0a(i) + poolMem % c0 % next => nextPoolMem % c0 else - nullify(poolMem % c0a(i) % next) + nullify(poolMem % c0 % next) end if - end do - else - if (associated(prevPoolMem)) then - poolMem % c0 % prev => prevPoolMem % c0 - else - nullify(poolMem % c0 % prev) end if - - if (associated(nextPoolMem)) then - poolMem % c0 % next => nextPoolMem % c0 + else if (poolItr % nDims == 1) then + if (poolItr % nTimeLevels > 1) then + do i = 1, poolItr % nTimeLevels + if (associated(prevPoolMem)) then + poolMem % c1a(i) % prev => prevPoolMem % c1a(i) + else + nullify(poolMem % c1a(i) % prev) + end if + + if (associated(nextPoolMem)) then + poolMem % c1a(i) % next => nextPoolMem % c1a(i) + else + nullify(poolMem % c1a(i) % next) + end if + end do else - nullify(poolMem % c0 % next) - end if - end if - else if (poolItr % nDims == 1) then - if (poolItr % nTimeLevels > 1) then - do i = 1, poolItr % nTimeLevels if (associated(prevPoolMem)) then - poolMem % c1a(i) % prev => prevPoolMem % c1a(i) + poolMem % c1 % prev => prevPoolMem % c1 else - nullify(poolMem % c1a(i) % prev) + nullify(poolMem % c1 % prev) end if if (associated(nextPoolMem)) then - poolMem % c1a(i) % next => nextPoolMem % c1a(i) + poolMem % c1 % next => nextPoolMem % c1 else - nullify(poolMem % c1a(i) % next) + nullify(poolMem % c1 % next) end if - end do - else - if (associated(prevPoolMem)) then - poolMem % c1 % prev => prevPoolMem % c1 - else - nullify(poolMem % c1 % prev) - end if - - if (associated(nextPoolMem)) then - poolMem % c1 % next => nextPoolMem % c1 - else - nullify(poolMem % c1 % next) end if end if end if @@ -1672,6 +1709,9 @@ recursive subroutine mpas_pool_link_parinfo(block, inPool)!{{{ type (mpas_pool_data_type), pointer :: poolMem type (mpas_pool_iterator_type) :: poolItr character (len=StrKIND), dimension(:), pointer :: dimNames + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() call mpas_pool_begin_iteration(inPool) do while (mpas_pool_get_next_member(inPool, poolItr)) @@ -1682,409 +1722,411 @@ recursive subroutine mpas_pool_link_parinfo(block, inPool)!{{{ ! Link fields else if (poolItr % memberType == MPAS_POOL_FIELD) then - decompType = -1 - - poolMem => pool_get_member(inPool, poolItr % memberName, MPAS_POOL_FIELD) - - if (poolItr % dataType == MPAS_POOL_REAL) then - if (poolItr % nDims == 0) then - if (poolItr % nTimeLevels > 1) then - do i = 1, poolItr % nTimeLevels - nullify(poolMem % r0a(i) % sendList) - nullify(poolMem % r0a(i) % recvList) - nullify(poolMem % r0a(i) % copyList) - end do - else - nullify(poolMem % r0 % sendList) - nullify(poolMem % r0 % recvList) - nullify(poolMem % r0 % copyList) - end if - else if (poolItr % nDims == 1) then - if (poolItr % nTimeLevels > 1) then - decompType = pool_get_member_decomp_type(poolMem % r1a(1) % dimNames(1)) - - if (decompType == MPAS_DECOMP_CELLS) then - do i = 1, poolItr % nTimeLevels - poolMem % r1a(i) % sendList => block % parinfo % cellsToSend - poolMem % r1a(i) % recvList => block % parinfo % cellsToRecv - poolMem % r1a(i) % copyList => block % parinfo % cellsToCopy - end do - else if (decompType == MPAS_DECOMP_EDGES) then - do i = 1, poolItr % nTimeLevels - poolMem % r1a(i) % sendList => block % parinfo % edgesToSend - poolMem % r1a(i) % recvList => block % parinfo % edgesToRecv - poolMem % r1a(i) % copyList => block % parinfo % edgesToCopy - end do - else if (decompType == MPAS_DECOMP_VERTICES) then + if ( threadNum == 0 ) then + decompType = -1 + + poolMem => pool_get_member(inPool, poolItr % memberName, MPAS_POOL_FIELD) + + if (poolItr % dataType == MPAS_POOL_REAL) then + if (poolItr % nDims == 0) then + if (poolItr % nTimeLevels > 1) then do i = 1, poolItr % nTimeLevels - poolMem % r1a(i) % sendList => block % parinfo % verticesToSend - poolMem % r1a(i) % recvList => block % parinfo % verticesToRecv - poolMem % r1a(i) % copyList => block % parinfo % verticesToCopy - end do - end if - else - decompType = pool_get_member_decomp_type(poolMem % r1 % dimNames(1)) - - if (decompType == MPAS_DECOMP_CELLS) then - poolMem % r1 % sendList => block % parinfo % cellsToSend - poolMem % r1 % recvList => block % parinfo % cellsToRecv - poolMem % r1 % copyList => block % parinfo % cellsToCopy - else if (decompType == MPAS_DECOMP_EDGES) then - poolMem % r1 % sendList => block % parinfo % edgesToSend - poolMem % r1 % recvList => block % parinfo % edgesToRecv - poolMem % r1 % copyList => block % parinfo % edgesToCopy - else if (decompType == MPAS_DECOMP_VERTICES) then - poolMem % r1 % sendList => block % parinfo % verticesToSend - poolMem % r1 % recvList => block % parinfo % verticesToRecv - poolMem % r1 % copyList => block % parinfo % verticesToCopy - end if - end if - else if (poolItr % nDims == 2) then - if (poolItr % nTimeLevels > 1) then - decompType = pool_get_member_decomp_type(poolMem % r2a(1) % dimNames(2)) - - if (decompType == MPAS_DECOMP_CELLS) then - do i = 1, poolItr % nTimeLevels - poolMem % r2a(i) % sendList => block % parinfo % cellsToSend - poolMem % r2a(i) % recvList => block % parinfo % cellsToRecv - poolMem % r2a(i) % copyList => block % parinfo % cellsToCopy - end do - else if (decompType == MPAS_DECOMP_EDGES) then - do i = 1, poolItr % nTimeLevels - poolMem % r2a(i) % sendList => block % parinfo % edgesToSend - poolMem % r2a(i) % recvList => block % parinfo % edgesToRecv - poolMem % r2a(i) % copyList => block % parinfo % edgesToCopy - end do - else if (decompType == MPAS_DECOMP_VERTICES) then - do i = 1, poolItr % nTimeLevels - poolMem % r2a(i) % sendList => block % parinfo % verticesToSend - poolMem % r2a(i) % recvList => block % parinfo % verticesToRecv - poolMem % r2a(i) % copyList => block % parinfo % verticesToCopy - end do - end if - else - decompType = pool_get_member_decomp_type(poolMem % r2 % dimNames(2)) - - if (decompType == MPAS_DECOMP_CELLS) then - poolMem % r2 % sendList => block % parinfo % cellsToSend - poolMem % r2 % recvList => block % parinfo % cellsToRecv - poolMem % r2 % copyList => block % parinfo % cellsToCopy - else if (decompType == MPAS_DECOMP_EDGES) then - poolMem % r2 % sendList => block % parinfo % edgesToSend - poolMem % r2 % recvList => block % parinfo % edgesToRecv - poolMem % r2 % copyList => block % parinfo % edgesToCopy - else if (decompType == MPAS_DECOMP_VERTICES) then - poolMem % r2 % sendList => block % parinfo % verticesToSend - poolMem % r2 % recvList => block % parinfo % verticesToRecv - poolMem % r2 % copyList => block % parinfo % verticesToCopy - end if - end if - else if (poolItr % nDims == 3) then - if (poolItr % nTimeLevels > 1) then - decompType = pool_get_member_decomp_type(poolMem % r3a(1) % dimNames(3)) - - if (decompType == MPAS_DECOMP_CELLS) then - do i = 1, poolItr % nTimeLevels - poolMem % r3a(i) % sendList => block % parinfo % cellsToSend - poolMem % r3a(i) % recvList => block % parinfo % cellsToRecv - poolMem % r3a(i) % copyList => block % parinfo % cellsToCopy - end do - else if (decompType == MPAS_DECOMP_EDGES) then - do i = 1, poolItr % nTimeLevels - poolMem % r3a(i) % sendList => block % parinfo % edgesToSend - poolMem % r3a(i) % recvList => block % parinfo % edgesToRecv - poolMem % r3a(i) % copyList => block % parinfo % edgesToCopy - end do - else if (decompType == MPAS_DECOMP_VERTICES) then - do i = 1, poolItr % nTimeLevels - poolMem % r3a(i) % sendList => block % parinfo % verticesToSend - poolMem % r3a(i) % recvList => block % parinfo % verticesToRecv - poolMem % r3a(i) % copyList => block % parinfo % verticesToCopy - end do - end if - else - decompType = pool_get_member_decomp_type(poolMem % r3 % dimNames(3)) - - if (decompType == MPAS_DECOMP_CELLS) then - poolMem % r3 % sendList => block % parinfo % cellsToSend - poolMem % r3 % recvList => block % parinfo % cellsToRecv - poolMem % r3 % copyList => block % parinfo % cellsToCopy - else if (decompType == MPAS_DECOMP_EDGES) then - poolMem % r3 % sendList => block % parinfo % edgesToSend - poolMem % r3 % recvList => block % parinfo % edgesToRecv - poolMem % r3 % copyList => block % parinfo % edgesToCopy - else if (decompType == MPAS_DECOMP_VERTICES) then - poolMem % r3 % sendList => block % parinfo % verticesToSend - poolMem % r3 % recvList => block % parinfo % verticesToRecv - poolMem % r3 % copyList => block % parinfo % verticesToCopy - end if - end if - else if (poolItr % nDims == 4) then - if (poolItr % nTimeLevels > 1) then - decompType = pool_get_member_decomp_type(poolMem % r4 % dimNames(4)) - - if (decompType == MPAS_DECOMP_CELLS) then - do i = 1, poolItr % nTimeLevels - poolMem % r4a(i) % sendList => block % parinfo % cellsToSend - poolMem % r4a(i) % recvList => block % parinfo % cellsToRecv - poolMem % r4a(i) % copyList => block % parinfo % cellsToCopy - end do - else if (decompType == MPAS_DECOMP_EDGES) then - do i = 1, poolItr % nTimeLevels - poolMem % r4a(i) % sendList => block % parinfo % edgesToSend - poolMem % r4a(i) % recvList => block % parinfo % edgesToRecv - poolMem % r4a(i) % copyList => block % parinfo % edgesToCopy - end do - else if (decompType == MPAS_DECOMP_VERTICES) then - do i = 1, poolItr % nTimeLevels - poolMem % r4a(i) % sendList => block % parinfo % verticesToSend - poolMem % r4a(i) % recvList => block % parinfo % verticesToRecv - poolMem % r4a(i) % copyList => block % parinfo % verticesToCopy + nullify(poolMem % r0a(i) % sendList) + nullify(poolMem % r0a(i) % recvList) + nullify(poolMem % r0a(i) % copyList) end do + else + nullify(poolMem % r0 % sendList) + nullify(poolMem % r0 % recvList) + nullify(poolMem % r0 % copyList) end if - else - decompType = pool_get_member_decomp_type(poolMem % r4 % dimNames(4)) - - if (decompType == MPAS_DECOMP_CELLS) then - poolMem % r4 % sendList => block % parinfo % cellsToSend - poolMem % r4 % recvList => block % parinfo % cellsToRecv - poolMem % r4 % copyList => block % parinfo % cellsToCopy - else if (decompType == MPAS_DECOMP_EDGES) then - poolMem % r4 % sendList => block % parinfo % edgesToSend - poolMem % r4 % recvList => block % parinfo % edgesToRecv - poolMem % r4 % copyList => block % parinfo % edgesToCopy - else if (decompType == MPAS_DECOMP_VERTICES) then - poolMem % r4 % sendList => block % parinfo % verticesToSend - poolMem % r4 % recvList => block % parinfo % verticesToRecv - poolMem % r4 % copyList => block % parinfo % verticesToCopy + else if (poolItr % nDims == 1) then + if (poolItr % nTimeLevels > 1) then + decompType = pool_get_member_decomp_type(poolMem % r1a(1) % dimNames(1)) + + if (decompType == MPAS_DECOMP_CELLS) then + do i = 1, poolItr % nTimeLevels + poolMem % r1a(i) % sendList => block % parinfo % cellsToSend + poolMem % r1a(i) % recvList => block % parinfo % cellsToRecv + poolMem % r1a(i) % copyList => block % parinfo % cellsToCopy + end do + else if (decompType == MPAS_DECOMP_EDGES) then + do i = 1, poolItr % nTimeLevels + poolMem % r1a(i) % sendList => block % parinfo % edgesToSend + poolMem % r1a(i) % recvList => block % parinfo % edgesToRecv + poolMem % r1a(i) % copyList => block % parinfo % edgesToCopy + end do + else if (decompType == MPAS_DECOMP_VERTICES) then + do i = 1, poolItr % nTimeLevels + poolMem % r1a(i) % sendList => block % parinfo % verticesToSend + poolMem % r1a(i) % recvList => block % parinfo % verticesToRecv + poolMem % r1a(i) % copyList => block % parinfo % verticesToCopy + end do + end if + else + decompType = pool_get_member_decomp_type(poolMem % r1 % dimNames(1)) + + if (decompType == MPAS_DECOMP_CELLS) then + poolMem % r1 % sendList => block % parinfo % cellsToSend + poolMem % r1 % recvList => block % parinfo % cellsToRecv + poolMem % r1 % copyList => block % parinfo % cellsToCopy + else if (decompType == MPAS_DECOMP_EDGES) then + poolMem % r1 % sendList => block % parinfo % edgesToSend + poolMem % r1 % recvList => block % parinfo % edgesToRecv + poolMem % r1 % copyList => block % parinfo % edgesToCopy + else if (decompType == MPAS_DECOMP_VERTICES) then + poolMem % r1 % sendList => block % parinfo % verticesToSend + poolMem % r1 % recvList => block % parinfo % verticesToRecv + poolMem % r1 % copyList => block % parinfo % verticesToCopy + end if end if - end if - else if (poolItr % nDims == 5) then - if (poolItr % nTimeLevels > 1) then - decompType = pool_get_member_decomp_type(poolMem % r5a(1) % dimNames(5)) - - if (decompType == MPAS_DECOMP_CELLS) then - do i = 1, poolItr % nTimeLevels - poolMem % r5a(i) % sendList => block % parinfo % cellsToSend - poolMem % r5a(i) % recvList => block % parinfo % cellsToRecv - poolMem % r5a(i) % copyList => block % parinfo % cellsToCopy - end do - else if (decompType == MPAS_DECOMP_EDGES) then - do i = 1, poolItr % nTimeLevels - poolMem % r5a(i) % sendList => block % parinfo % edgesToSend - poolMem % r5a(i) % recvList => block % parinfo % edgesToRecv - poolMem % r5a(i) % copyList => block % parinfo % edgesToCopy - end do - else if (decompType == MPAS_DECOMP_VERTICES) then - do i = 1, poolItr % nTimeLevels - poolMem % r5a(i) % sendList => block % parinfo % verticesToSend - poolMem % r5a(i) % recvList => block % parinfo % verticesToRecv - poolMem % r5a(i) % copyList => block % parinfo % verticesToCopy - end do + else if (poolItr % nDims == 2) then + if (poolItr % nTimeLevels > 1) then + decompType = pool_get_member_decomp_type(poolMem % r2a(1) % dimNames(2)) + + if (decompType == MPAS_DECOMP_CELLS) then + do i = 1, poolItr % nTimeLevels + poolMem % r2a(i) % sendList => block % parinfo % cellsToSend + poolMem % r2a(i) % recvList => block % parinfo % cellsToRecv + poolMem % r2a(i) % copyList => block % parinfo % cellsToCopy + end do + else if (decompType == MPAS_DECOMP_EDGES) then + do i = 1, poolItr % nTimeLevels + poolMem % r2a(i) % sendList => block % parinfo % edgesToSend + poolMem % r2a(i) % recvList => block % parinfo % edgesToRecv + poolMem % r2a(i) % copyList => block % parinfo % edgesToCopy + end do + else if (decompType == MPAS_DECOMP_VERTICES) then + do i = 1, poolItr % nTimeLevels + poolMem % r2a(i) % sendList => block % parinfo % verticesToSend + poolMem % r2a(i) % recvList => block % parinfo % verticesToRecv + poolMem % r2a(i) % copyList => block % parinfo % verticesToCopy + end do + end if + else + decompType = pool_get_member_decomp_type(poolMem % r2 % dimNames(2)) + + if (decompType == MPAS_DECOMP_CELLS) then + poolMem % r2 % sendList => block % parinfo % cellsToSend + poolMem % r2 % recvList => block % parinfo % cellsToRecv + poolMem % r2 % copyList => block % parinfo % cellsToCopy + else if (decompType == MPAS_DECOMP_EDGES) then + poolMem % r2 % sendList => block % parinfo % edgesToSend + poolMem % r2 % recvList => block % parinfo % edgesToRecv + poolMem % r2 % copyList => block % parinfo % edgesToCopy + else if (decompType == MPAS_DECOMP_VERTICES) then + poolMem % r2 % sendList => block % parinfo % verticesToSend + poolMem % r2 % recvList => block % parinfo % verticesToRecv + poolMem % r2 % copyList => block % parinfo % verticesToCopy + end if end if - else - decompType = pool_get_member_decomp_type(poolMem % r5 % dimNames(5)) - - if (decompType == MPAS_DECOMP_CELLS) then - poolMem % r5 % sendList => block % parinfo % cellsToSend - poolMem % r5 % recvList => block % parinfo % cellsToRecv - poolMem % r5 % copyList => block % parinfo % cellsToCopy - else if (decompType == MPAS_DECOMP_EDGES) then - poolMem % r5 % sendList => block % parinfo % edgesToSend - poolMem % r5 % recvList => block % parinfo % edgesToRecv - poolMem % r5 % copyList => block % parinfo % edgesToCopy - else if (decompType == MPAS_DECOMP_VERTICES) then - poolMem % r5 % sendList => block % parinfo % verticesToSend - poolMem % r5 % recvList => block % parinfo % verticesToRecv - poolMem % r5 % copyList => block % parinfo % verticesToCopy + else if (poolItr % nDims == 3) then + if (poolItr % nTimeLevels > 1) then + decompType = pool_get_member_decomp_type(poolMem % r3a(1) % dimNames(3)) + + if (decompType == MPAS_DECOMP_CELLS) then + do i = 1, poolItr % nTimeLevels + poolMem % r3a(i) % sendList => block % parinfo % cellsToSend + poolMem % r3a(i) % recvList => block % parinfo % cellsToRecv + poolMem % r3a(i) % copyList => block % parinfo % cellsToCopy + end do + else if (decompType == MPAS_DECOMP_EDGES) then + do i = 1, poolItr % nTimeLevels + poolMem % r3a(i) % sendList => block % parinfo % edgesToSend + poolMem % r3a(i) % recvList => block % parinfo % edgesToRecv + poolMem % r3a(i) % copyList => block % parinfo % edgesToCopy + end do + else if (decompType == MPAS_DECOMP_VERTICES) then + do i = 1, poolItr % nTimeLevels + poolMem % r3a(i) % sendList => block % parinfo % verticesToSend + poolMem % r3a(i) % recvList => block % parinfo % verticesToRecv + poolMem % r3a(i) % copyList => block % parinfo % verticesToCopy + end do + end if + else + decompType = pool_get_member_decomp_type(poolMem % r3 % dimNames(3)) + + if (decompType == MPAS_DECOMP_CELLS) then + poolMem % r3 % sendList => block % parinfo % cellsToSend + poolMem % r3 % recvList => block % parinfo % cellsToRecv + poolMem % r3 % copyList => block % parinfo % cellsToCopy + else if (decompType == MPAS_DECOMP_EDGES) then + poolMem % r3 % sendList => block % parinfo % edgesToSend + poolMem % r3 % recvList => block % parinfo % edgesToRecv + poolMem % r3 % copyList => block % parinfo % edgesToCopy + else if (decompType == MPAS_DECOMP_VERTICES) then + poolMem % r3 % sendList => block % parinfo % verticesToSend + poolMem % r3 % recvList => block % parinfo % verticesToRecv + poolMem % r3 % copyList => block % parinfo % verticesToCopy + end if end if - end if - end if - else if (poolItr % dataType == MPAS_POOL_INTEGER) then - if (poolItr % nDims == 0) then - if (poolItr % nTimeLevels > 1) then - do i = 1, poolItr % nTimeLevels - nullify(poolMem % i0a(i) % sendList) - nullify(poolMem % i0a(i) % recvList) - nullify(poolMem % i0a(i) % copyList) - end do - else - nullify(poolMem % i0 % sendList) - nullify(poolMem % i0 % recvList) - nullify(poolMem % i0 % copyList) - end if - else if (poolItr % nDims == 1) then - if (poolItr % nTimeLevels > 1) then - decompType = pool_get_member_decomp_type(poolMem % i1a(1) % dimNames(1)) - - if (decompType == MPAS_DECOMP_CELLS) then - do i = 1, poolItr % nTimeLevels - poolMem % i1a(i) % sendList => block % parinfo % cellsToSend - poolMem % i1a(i) % recvList => block % parinfo % cellsToRecv - poolMem % i1a(i) % copyList => block % parinfo % cellsToCopy - end do - else if (decompType == MPAS_DECOMP_EDGES) then - do i = 1, poolItr % nTimeLevels - poolMem % i1a(i) % sendList => block % parinfo % edgesToSend - poolMem % i1a(i) % recvList => block % parinfo % edgesToRecv - poolMem % i1a(i) % copyList => block % parinfo % edgesToCopy - end do - else if (decompType == MPAS_DECOMP_VERTICES) then - do i = 1, poolItr % nTimeLevels - poolMem % i1a(i) % sendList => block % parinfo % verticesToSend - poolMem % i1a(i) % recvList => block % parinfo % verticesToRecv - poolMem % i1a(i) % copyList => block % parinfo % verticesToCopy - end do + else if (poolItr % nDims == 4) then + if (poolItr % nTimeLevels > 1) then + decompType = pool_get_member_decomp_type(poolMem % r4 % dimNames(4)) + + if (decompType == MPAS_DECOMP_CELLS) then + do i = 1, poolItr % nTimeLevels + poolMem % r4a(i) % sendList => block % parinfo % cellsToSend + poolMem % r4a(i) % recvList => block % parinfo % cellsToRecv + poolMem % r4a(i) % copyList => block % parinfo % cellsToCopy + end do + else if (decompType == MPAS_DECOMP_EDGES) then + do i = 1, poolItr % nTimeLevels + poolMem % r4a(i) % sendList => block % parinfo % edgesToSend + poolMem % r4a(i) % recvList => block % parinfo % edgesToRecv + poolMem % r4a(i) % copyList => block % parinfo % edgesToCopy + end do + else if (decompType == MPAS_DECOMP_VERTICES) then + do i = 1, poolItr % nTimeLevels + poolMem % r4a(i) % sendList => block % parinfo % verticesToSend + poolMem % r4a(i) % recvList => block % parinfo % verticesToRecv + poolMem % r4a(i) % copyList => block % parinfo % verticesToCopy + end do + end if + else + decompType = pool_get_member_decomp_type(poolMem % r4 % dimNames(4)) + + if (decompType == MPAS_DECOMP_CELLS) then + poolMem % r4 % sendList => block % parinfo % cellsToSend + poolMem % r4 % recvList => block % parinfo % cellsToRecv + poolMem % r4 % copyList => block % parinfo % cellsToCopy + else if (decompType == MPAS_DECOMP_EDGES) then + poolMem % r4 % sendList => block % parinfo % edgesToSend + poolMem % r4 % recvList => block % parinfo % edgesToRecv + poolMem % r4 % copyList => block % parinfo % edgesToCopy + else if (decompType == MPAS_DECOMP_VERTICES) then + poolMem % r4 % sendList => block % parinfo % verticesToSend + poolMem % r4 % recvList => block % parinfo % verticesToRecv + poolMem % r4 % copyList => block % parinfo % verticesToCopy + end if end if - else - decompType = pool_get_member_decomp_type(poolMem % i1 % dimNames(1)) - - if (decompType == MPAS_DECOMP_CELLS) then - poolMem % i1 % sendList => block % parinfo % cellsToSend - poolMem % i1 % recvList => block % parinfo % cellsToRecv - poolMem % i1 % copyList => block % parinfo % cellsToCopy - else if (decompType == MPAS_DECOMP_EDGES) then - poolMem % i1 % sendList => block % parinfo % edgesToSend - poolMem % i1 % recvList => block % parinfo % edgesToRecv - poolMem % i1 % copyList => block % parinfo % edgesToCopy - else if (decompType == MPAS_DECOMP_VERTICES) then - poolMem % i1 % sendList => block % parinfo % verticesToSend - poolMem % i1 % recvList => block % parinfo % verticesToRecv - poolMem % i1 % copyList => block % parinfo % verticesToCopy + else if (poolItr % nDims == 5) then + if (poolItr % nTimeLevels > 1) then + decompType = pool_get_member_decomp_type(poolMem % r5a(1) % dimNames(5)) + + if (decompType == MPAS_DECOMP_CELLS) then + do i = 1, poolItr % nTimeLevels + poolMem % r5a(i) % sendList => block % parinfo % cellsToSend + poolMem % r5a(i) % recvList => block % parinfo % cellsToRecv + poolMem % r5a(i) % copyList => block % parinfo % cellsToCopy + end do + else if (decompType == MPAS_DECOMP_EDGES) then + do i = 1, poolItr % nTimeLevels + poolMem % r5a(i) % sendList => block % parinfo % edgesToSend + poolMem % r5a(i) % recvList => block % parinfo % edgesToRecv + poolMem % r5a(i) % copyList => block % parinfo % edgesToCopy + end do + else if (decompType == MPAS_DECOMP_VERTICES) then + do i = 1, poolItr % nTimeLevels + poolMem % r5a(i) % sendList => block % parinfo % verticesToSend + poolMem % r5a(i) % recvList => block % parinfo % verticesToRecv + poolMem % r5a(i) % copyList => block % parinfo % verticesToCopy + end do + end if + else + decompType = pool_get_member_decomp_type(poolMem % r5 % dimNames(5)) + + if (decompType == MPAS_DECOMP_CELLS) then + poolMem % r5 % sendList => block % parinfo % cellsToSend + poolMem % r5 % recvList => block % parinfo % cellsToRecv + poolMem % r5 % copyList => block % parinfo % cellsToCopy + else if (decompType == MPAS_DECOMP_EDGES) then + poolMem % r5 % sendList => block % parinfo % edgesToSend + poolMem % r5 % recvList => block % parinfo % edgesToRecv + poolMem % r5 % copyList => block % parinfo % edgesToCopy + else if (decompType == MPAS_DECOMP_VERTICES) then + poolMem % r5 % sendList => block % parinfo % verticesToSend + poolMem % r5 % recvList => block % parinfo % verticesToRecv + poolMem % r5 % copyList => block % parinfo % verticesToCopy + end if end if end if - else if (poolItr % nDims == 2) then - if (poolItr % nTimeLevels > 1) then - decompType = pool_get_member_decomp_type(poolMem % i2a(1) % dimNames(2)) - - if (decompType == MPAS_DECOMP_CELLS) then - do i = 1, poolItr % nTimeLevels - poolMem % i2a(i) % sendList => block % parinfo % cellsToSend - poolMem % i2a(i) % recvList => block % parinfo % cellsToRecv - poolMem % i2a(i) % copyList => block % parinfo % cellsToCopy - end do - else if (decompType == MPAS_DECOMP_EDGES) then - do i = 1, poolItr % nTimeLevels - poolMem % i2a(i) % sendList => block % parinfo % edgesToSend - poolMem % i2a(i) % recvList => block % parinfo % edgesToRecv - poolMem % i2a(i) % copyList => block % parinfo % edgesToCopy - end do - else if (decompType == MPAS_DECOMP_VERTICES) then + else if (poolItr % dataType == MPAS_POOL_INTEGER) then + if (poolItr % nDims == 0) then + if (poolItr % nTimeLevels > 1) then do i = 1, poolItr % nTimeLevels - poolMem % i2a(i) % sendList => block % parinfo % verticesToSend - poolMem % i2a(i) % recvList => block % parinfo % verticesToRecv - poolMem % i2a(i) % copyList => block % parinfo % verticesToCopy + nullify(poolMem % i0a(i) % sendList) + nullify(poolMem % i0a(i) % recvList) + nullify(poolMem % i0a(i) % copyList) end do + else + nullify(poolMem % i0 % sendList) + nullify(poolMem % i0 % recvList) + nullify(poolMem % i0 % copyList) end if - else - decompType = pool_get_member_decomp_type(poolMem % i2 % dimNames(2)) - - if (decompType == MPAS_DECOMP_CELLS) then - poolMem % i2 % sendList => block % parinfo % cellsToSend - poolMem % i2 % recvList => block % parinfo % cellsToRecv - poolMem % i2 % copyList => block % parinfo % cellsToCopy - else if (decompType == MPAS_DECOMP_EDGES) then - poolMem % i2 % sendList => block % parinfo % edgesToSend - poolMem % i2 % recvList => block % parinfo % edgesToRecv - poolMem % i2 % copyList => block % parinfo % edgesToCopy - else if (decompType == MPAS_DECOMP_VERTICES) then - poolMem % i2 % sendList => block % parinfo % verticesToSend - poolMem % i2 % recvList => block % parinfo % verticesToRecv - poolMem % i2 % copyList => block % parinfo % verticesToCopy + else if (poolItr % nDims == 1) then + if (poolItr % nTimeLevels > 1) then + decompType = pool_get_member_decomp_type(poolMem % i1a(1) % dimNames(1)) + + if (decompType == MPAS_DECOMP_CELLS) then + do i = 1, poolItr % nTimeLevels + poolMem % i1a(i) % sendList => block % parinfo % cellsToSend + poolMem % i1a(i) % recvList => block % parinfo % cellsToRecv + poolMem % i1a(i) % copyList => block % parinfo % cellsToCopy + end do + else if (decompType == MPAS_DECOMP_EDGES) then + do i = 1, poolItr % nTimeLevels + poolMem % i1a(i) % sendList => block % parinfo % edgesToSend + poolMem % i1a(i) % recvList => block % parinfo % edgesToRecv + poolMem % i1a(i) % copyList => block % parinfo % edgesToCopy + end do + else if (decompType == MPAS_DECOMP_VERTICES) then + do i = 1, poolItr % nTimeLevels + poolMem % i1a(i) % sendList => block % parinfo % verticesToSend + poolMem % i1a(i) % recvList => block % parinfo % verticesToRecv + poolMem % i1a(i) % copyList => block % parinfo % verticesToCopy + end do + end if + else + decompType = pool_get_member_decomp_type(poolMem % i1 % dimNames(1)) + + if (decompType == MPAS_DECOMP_CELLS) then + poolMem % i1 % sendList => block % parinfo % cellsToSend + poolMem % i1 % recvList => block % parinfo % cellsToRecv + poolMem % i1 % copyList => block % parinfo % cellsToCopy + else if (decompType == MPAS_DECOMP_EDGES) then + poolMem % i1 % sendList => block % parinfo % edgesToSend + poolMem % i1 % recvList => block % parinfo % edgesToRecv + poolMem % i1 % copyList => block % parinfo % edgesToCopy + else if (decompType == MPAS_DECOMP_VERTICES) then + poolMem % i1 % sendList => block % parinfo % verticesToSend + poolMem % i1 % recvList => block % parinfo % verticesToRecv + poolMem % i1 % copyList => block % parinfo % verticesToCopy + end if end if - end if - else if (poolItr % nDims == 3) then - if (poolItr % nTimeLevels > 1) then - decompType = pool_get_member_decomp_type(poolMem % i3a(1) % dimNames(3)) - - if (decompType == MPAS_DECOMP_CELLS) then - do i = 1, poolItr % nTimeLevels - poolMem % i3a(i) % sendList => block % parinfo % cellsToSend - poolMem % i3a(i) % recvList => block % parinfo % cellsToRecv - poolMem % i3a(i) % copyList => block % parinfo % cellsToCopy - end do - else if (decompType == MPAS_DECOMP_EDGES) then - do i = 1, poolItr % nTimeLevels - poolMem % i3a(i) % sendList => block % parinfo % edgesToSend - poolMem % i3a(i) % recvList => block % parinfo % edgesToRecv - poolMem % i3a(i) % copyList => block % parinfo % edgesToCopy - end do - else if (decompType == MPAS_DECOMP_VERTICES) then - do i = 1, poolItr % nTimeLevels - poolMem % i3a(i) % sendList => block % parinfo % verticesToSend - poolMem % i3a(i) % recvList => block % parinfo % verticesToRecv - poolMem % i3a(i) % copyList => block % parinfo % verticesToCopy - end do + else if (poolItr % nDims == 2) then + if (poolItr % nTimeLevels > 1) then + decompType = pool_get_member_decomp_type(poolMem % i2a(1) % dimNames(2)) + + if (decompType == MPAS_DECOMP_CELLS) then + do i = 1, poolItr % nTimeLevels + poolMem % i2a(i) % sendList => block % parinfo % cellsToSend + poolMem % i2a(i) % recvList => block % parinfo % cellsToRecv + poolMem % i2a(i) % copyList => block % parinfo % cellsToCopy + end do + else if (decompType == MPAS_DECOMP_EDGES) then + do i = 1, poolItr % nTimeLevels + poolMem % i2a(i) % sendList => block % parinfo % edgesToSend + poolMem % i2a(i) % recvList => block % parinfo % edgesToRecv + poolMem % i2a(i) % copyList => block % parinfo % edgesToCopy + end do + else if (decompType == MPAS_DECOMP_VERTICES) then + do i = 1, poolItr % nTimeLevels + poolMem % i2a(i) % sendList => block % parinfo % verticesToSend + poolMem % i2a(i) % recvList => block % parinfo % verticesToRecv + poolMem % i2a(i) % copyList => block % parinfo % verticesToCopy + end do + end if + else + decompType = pool_get_member_decomp_type(poolMem % i2 % dimNames(2)) + + if (decompType == MPAS_DECOMP_CELLS) then + poolMem % i2 % sendList => block % parinfo % cellsToSend + poolMem % i2 % recvList => block % parinfo % cellsToRecv + poolMem % i2 % copyList => block % parinfo % cellsToCopy + else if (decompType == MPAS_DECOMP_EDGES) then + poolMem % i2 % sendList => block % parinfo % edgesToSend + poolMem % i2 % recvList => block % parinfo % edgesToRecv + poolMem % i2 % copyList => block % parinfo % edgesToCopy + else if (decompType == MPAS_DECOMP_VERTICES) then + poolMem % i2 % sendList => block % parinfo % verticesToSend + poolMem % i2 % recvList => block % parinfo % verticesToRecv + poolMem % i2 % copyList => block % parinfo % verticesToCopy + end if end if - else - decompType = pool_get_member_decomp_type(poolMem % i3 % dimNames(3)) - - if (decompType == MPAS_DECOMP_CELLS) then - poolMem % i3 % sendList => block % parinfo % cellsToSend - poolMem % i3 % recvList => block % parinfo % cellsToRecv - poolMem % i3 % copyList => block % parinfo % cellsToCopy - else if (decompType == MPAS_DECOMP_EDGES) then - poolMem % i3 % sendList => block % parinfo % edgesToSend - poolMem % i3 % recvList => block % parinfo % edgesToRecv - poolMem % i3 % copyList => block % parinfo % edgesToCopy - else if (decompType == MPAS_DECOMP_VERTICES) then - poolMem % i3 % sendList => block % parinfo % verticesToSend - poolMem % i3 % recvList => block % parinfo % verticesToRecv - poolMem % i3 % copyList => block % parinfo % verticesToCopy + else if (poolItr % nDims == 3) then + if (poolItr % nTimeLevels > 1) then + decompType = pool_get_member_decomp_type(poolMem % i3a(1) % dimNames(3)) + + if (decompType == MPAS_DECOMP_CELLS) then + do i = 1, poolItr % nTimeLevels + poolMem % i3a(i) % sendList => block % parinfo % cellsToSend + poolMem % i3a(i) % recvList => block % parinfo % cellsToRecv + poolMem % i3a(i) % copyList => block % parinfo % cellsToCopy + end do + else if (decompType == MPAS_DECOMP_EDGES) then + do i = 1, poolItr % nTimeLevels + poolMem % i3a(i) % sendList => block % parinfo % edgesToSend + poolMem % i3a(i) % recvList => block % parinfo % edgesToRecv + poolMem % i3a(i) % copyList => block % parinfo % edgesToCopy + end do + else if (decompType == MPAS_DECOMP_VERTICES) then + do i = 1, poolItr % nTimeLevels + poolMem % i3a(i) % sendList => block % parinfo % verticesToSend + poolMem % i3a(i) % recvList => block % parinfo % verticesToRecv + poolMem % i3a(i) % copyList => block % parinfo % verticesToCopy + end do + end if + else + decompType = pool_get_member_decomp_type(poolMem % i3 % dimNames(3)) + + if (decompType == MPAS_DECOMP_CELLS) then + poolMem % i3 % sendList => block % parinfo % cellsToSend + poolMem % i3 % recvList => block % parinfo % cellsToRecv + poolMem % i3 % copyList => block % parinfo % cellsToCopy + else if (decompType == MPAS_DECOMP_EDGES) then + poolMem % i3 % sendList => block % parinfo % edgesToSend + poolMem % i3 % recvList => block % parinfo % edgesToRecv + poolMem % i3 % copyList => block % parinfo % edgesToCopy + else if (decompType == MPAS_DECOMP_VERTICES) then + poolMem % i3 % sendList => block % parinfo % verticesToSend + poolMem % i3 % recvList => block % parinfo % verticesToRecv + poolMem % i3 % copyList => block % parinfo % verticesToCopy + end if end if end if - end if - else if (poolItr % dataType == MPAS_POOL_CHARACTER) then - if (poolItr % nDims == 0) then - if (poolItr % nTimeLevels > 1) then - do i = 1, poolItr % nTimeLevels - nullify(poolMem % c0a(i) % sendList) - nullify(poolMem % c0a(i) % recvList) - nullify(poolMem % c0a(i) % copyList) - end do - else - nullify(poolMem % c0 % sendList) - nullify(poolMem % c0 % recvList) - nullify(poolMem % c0 % copyList) - end if - else if (poolItr % nDims == 1) then - if (poolItr % nTimeLevels > 1) then - decompType = pool_get_member_decomp_type(poolMem % c1a(1) % dimNames(1)) - - if (decompType == MPAS_DECOMP_CELLS) then - do i = 1, poolItr % nTimeLevels - poolMem % c1a(i) % sendList => block % parinfo % cellsToSend - poolMem % c1a(i) % recvList => block % parinfo % cellsToRecv - poolMem % c1a(i) % copyList => block % parinfo % cellsToCopy - end do - else if (decompType == MPAS_DECOMP_EDGES) then + else if (poolItr % dataType == MPAS_POOL_CHARACTER) then + if (poolItr % nDims == 0) then + if (poolItr % nTimeLevels > 1) then do i = 1, poolItr % nTimeLevels - poolMem % c1a(i) % sendList => block % parinfo % edgesToSend - poolMem % c1a(i) % recvList => block % parinfo % edgesToRecv - poolMem % c1a(i) % copyList => block % parinfo % edgesToCopy - end do - else if (decompType == MPAS_DECOMP_VERTICES) then - do i = 1, poolItr % nTimeLevels - poolMem % c1a(i) % sendList => block % parinfo % verticesToSend - poolMem % c1a(i) % recvList => block % parinfo % verticesToRecv - poolMem % c1a(i) % copyList => block % parinfo % verticesToCopy + nullify(poolMem % c0a(i) % sendList) + nullify(poolMem % c0a(i) % recvList) + nullify(poolMem % c0a(i) % copyList) end do + else + nullify(poolMem % c0 % sendList) + nullify(poolMem % c0 % recvList) + nullify(poolMem % c0 % copyList) end if - else - decompType = pool_get_member_decomp_type(poolMem % c1 % dimNames(1)) - - if (decompType == MPAS_DECOMP_CELLS) then - poolMem % c1 % sendList => block % parinfo % cellsToSend - poolMem % c1 % recvList => block % parinfo % cellsToRecv - poolMem % c1 % copyList => block % parinfo % cellsToCopy - else if (decompType == MPAS_DECOMP_EDGES) then - poolMem % c1 % sendList => block % parinfo % edgesToSend - poolMem % c1 % recvList => block % parinfo % edgesToRecv - poolMem % c1 % copyList => block % parinfo % edgesToCopy - else if (decompType == MPAS_DECOMP_VERTICES) then - poolMem % c1 % sendList => block % parinfo % verticesToSend - poolMem % c1 % recvList => block % parinfo % verticesToRecv - poolMem % c1 % copyList => block % parinfo % verticesToCopy + else if (poolItr % nDims == 1) then + if (poolItr % nTimeLevels > 1) then + decompType = pool_get_member_decomp_type(poolMem % c1a(1) % dimNames(1)) + + if (decompType == MPAS_DECOMP_CELLS) then + do i = 1, poolItr % nTimeLevels + poolMem % c1a(i) % sendList => block % parinfo % cellsToSend + poolMem % c1a(i) % recvList => block % parinfo % cellsToRecv + poolMem % c1a(i) % copyList => block % parinfo % cellsToCopy + end do + else if (decompType == MPAS_DECOMP_EDGES) then + do i = 1, poolItr % nTimeLevels + poolMem % c1a(i) % sendList => block % parinfo % edgesToSend + poolMem % c1a(i) % recvList => block % parinfo % edgesToRecv + poolMem % c1a(i) % copyList => block % parinfo % edgesToCopy + end do + else if (decompType == MPAS_DECOMP_VERTICES) then + do i = 1, poolItr % nTimeLevels + poolMem % c1a(i) % sendList => block % parinfo % verticesToSend + poolMem % c1a(i) % recvList => block % parinfo % verticesToRecv + poolMem % c1a(i) % copyList => block % parinfo % verticesToCopy + end do + end if + else + decompType = pool_get_member_decomp_type(poolMem % c1 % dimNames(1)) + + if (decompType == MPAS_DECOMP_CELLS) then + poolMem % c1 % sendList => block % parinfo % cellsToSend + poolMem % c1 % recvList => block % parinfo % cellsToRecv + poolMem % c1 % copyList => block % parinfo % cellsToCopy + else if (decompType == MPAS_DECOMP_EDGES) then + poolMem % c1 % sendList => block % parinfo % edgesToSend + poolMem % c1 % recvList => block % parinfo % edgesToRecv + poolMem % c1 % copyList => block % parinfo % edgesToCopy + else if (decompType == MPAS_DECOMP_VERTICES) then + poolMem % c1 % sendList => block % parinfo % verticesToSend + poolMem % c1 % recvList => block % parinfo % verticesToRecv + poolMem % c1 % copyList => block % parinfo % verticesToCopy + end if end if end if end if @@ -2115,22 +2157,26 @@ subroutine mpas_pool_add_field_0d_real(inPool, key, field)!{{{ type (field0DReal), pointer :: field type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_REAL - newmem % data % contentsDims = 0 - newmem % data % contentsTimeLevs = 1 - newmem % data % r0 => field + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_REAL + newmem % data % contentsDims = 0 + newmem % data % contentsTimeLevs = 1 + newmem % data % r0 => field - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_field_0d_real!}}} @@ -2155,22 +2201,26 @@ subroutine mpas_pool_add_field_1d_real(inPool, key, field)!{{{ type (field1DReal), pointer :: field type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_REAL - newmem % data % contentsDims = 1 - newmem % data % contentsTimeLevs = 1 - newmem % data % r1 => field + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_REAL + newmem % data % contentsDims = 1 + newmem % data % contentsTimeLevs = 1 + newmem % data % r1 => field - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_field_1d_real!}}} @@ -2195,22 +2245,26 @@ subroutine mpas_pool_add_field_2d_real(inPool, key, field)!{{{ type (field2DReal), pointer :: field type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_REAL - newmem % data % contentsDims = 2 - newmem % data % contentsTimeLevs = 1 - newmem % data % r2 => field + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_REAL + newmem % data % contentsDims = 2 + newmem % data % contentsTimeLevs = 1 + newmem % data % r2 => field - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_field_2d_real!}}} @@ -2235,22 +2289,26 @@ subroutine mpas_pool_add_field_3d_real(inPool, key, field)!{{{ type (field3DReal), pointer :: field type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_REAL - newmem % data % contentsDims = 3 - newmem % data % contentsTimeLevs = 1 - newmem % data % r3 => field + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_REAL + newmem % data % contentsDims = 3 + newmem % data % contentsTimeLevs = 1 + newmem % data % r3 => field - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_field_3d_real!}}} @@ -2275,22 +2333,26 @@ subroutine mpas_pool_add_field_4d_real(inPool, key, field)!{{{ type (field4DReal), pointer :: field type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_REAL - newmem % data % contentsDims = 4 - newmem % data % contentsTimeLevs = 1 - newmem % data % r4 => field + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_REAL + newmem % data % contentsDims = 4 + newmem % data % contentsTimeLevs = 1 + newmem % data % r4 => field - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_field_4d_real!}}} @@ -2315,22 +2377,26 @@ subroutine mpas_pool_add_field_5d_real(inPool, key, field)!{{{ type (field5DReal), pointer :: field type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_REAL - newmem % data % contentsDims = 5 - newmem % data % contentsTimeLevs = 1 - newmem % data % r5 => field + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_REAL + newmem % data % contentsDims = 5 + newmem % data % contentsTimeLevs = 1 + newmem % data % r5 => field - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_field_5d_real!}}} @@ -2355,22 +2421,26 @@ subroutine mpas_pool_add_field_0d_int(inPool, key, field)!{{{ type (field0DInteger), pointer :: field type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_INTEGER - newmem % data % contentsDims = 0 - newmem % data % contentsTimeLevs = 1 - newmem % data % i0 => field + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_INTEGER + newmem % data % contentsDims = 0 + newmem % data % contentsTimeLevs = 1 + newmem % data % i0 => field - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_field_0d_int!}}} @@ -2395,22 +2465,26 @@ subroutine mpas_pool_add_field_1d_int(inPool, key, field)!{{{ type (field1DInteger), pointer :: field type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_INTEGER - newmem % data % contentsDims = 1 - newmem % data % contentsTimeLevs = 1 - newmem % data % i1 => field + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_INTEGER + newmem % data % contentsDims = 1 + newmem % data % contentsTimeLevs = 1 + newmem % data % i1 => field - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_field_1d_int!}}} @@ -2435,22 +2509,26 @@ subroutine mpas_pool_add_field_2d_int(inPool, key, field)!{{{ type (field2DInteger), pointer :: field type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_INTEGER - newmem % data % contentsDims = 2 - newmem % data % contentsTimeLevs = 1 - newmem % data % i2 => field + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_INTEGER + newmem % data % contentsDims = 2 + newmem % data % contentsTimeLevs = 1 + newmem % data % i2 => field - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_field_2d_int!}}} @@ -2475,22 +2553,26 @@ subroutine mpas_pool_add_field_3d_int(inPool, key, field)!{{{ type (field3DInteger), pointer :: field type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_INTEGER - newmem % data % contentsDims = 3 - newmem % data % contentsTimeLevs = 1 - newmem % data % i3 => field + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_INTEGER + newmem % data % contentsDims = 3 + newmem % data % contentsTimeLevs = 1 + newmem % data % i3 => field - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_field_3d_int!}}} @@ -2515,22 +2597,26 @@ subroutine mpas_pool_add_field_0d_char(inPool, key, field)!{{{ type (field0DChar), pointer :: field type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_CHARACTER - newmem % data % contentsDims = 0 - newmem % data % contentsTimeLevs = 1 - newmem % data % c0 => field + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_CHARACTER + newmem % data % contentsDims = 0 + newmem % data % contentsTimeLevs = 1 + newmem % data % c0 => field - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_field_0d_char!}}} @@ -2555,22 +2641,26 @@ subroutine mpas_pool_add_field_1d_char(inPool, key, field)!{{{ type (field1DChar), pointer :: field type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_CHARACTER - newmem % data % contentsDims = 1 - newmem % data % contentsTimeLevs = 1 - newmem % data % c1 => field + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_CHARACTER + newmem % data % contentsDims = 1 + newmem % data % contentsTimeLevs = 1 + newmem % data % c1 => field - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_field_1d_char!}}} @@ -2595,23 +2685,27 @@ subroutine mpas_pool_add_field_0d_reals(inPool, key, fields)!{{{ type (field0DReal), dimension(:), pointer :: fields type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_REAL - newmem % data % contentsDims = 0 - newmem % data % contentsTimeLevs = size(fields) - if (newmem % data % contentsTimeLevs == 1) then - newmem % data % r0 => fields(1) - else - newmem % data % r0a => fields + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_REAL + newmem % data % contentsDims = 0 + newmem % data % contentsTimeLevs = size(fields) + if (newmem % data % contentsTimeLevs == 1) then + newmem % data % r0 => fields(1) + else + newmem % data % r0a => fields + end if + end if - if (.not. pool_add_member(inPool, key, newmem)) then deallocate(newmem % data) deallocate(newmem) @@ -2639,23 +2733,27 @@ subroutine mpas_pool_add_field_1d_reals(inPool, key, fields)!{{{ type (field1DReal), dimension(:), pointer :: fields type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_REAL - newmem % data % contentsDims = 1 - newmem % data % contentsTimeLevs = size(fields) - if (newmem % data % contentsTimeLevs == 1) then - newmem % data % r1 => fields(1) - else - newmem % data % r1a => fields + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_REAL + newmem % data % contentsDims = 1 + newmem % data % contentsTimeLevs = size(fields) + if (newmem % data % contentsTimeLevs == 1) then + newmem % data % r1 => fields(1) + else + newmem % data % r1a => fields + end if + end if - if (.not. pool_add_member(inPool, key, newmem)) then deallocate(newmem % data) deallocate(newmem) @@ -2683,23 +2781,27 @@ subroutine mpas_pool_add_field_2d_reals(inPool, key, fields)!{{{ type (field2DReal), dimension(:), pointer :: fields type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_REAL - newmem % data % contentsDims = 2 - newmem % data % contentsTimeLevs = size(fields) - if (newmem % data % contentsTimeLevs == 1) then - newmem % data % r2 => fields(1) - else - newmem % data % r2a => fields + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_REAL + newmem % data % contentsDims = 2 + newmem % data % contentsTimeLevs = size(fields) + if (newmem % data % contentsTimeLevs == 1) then + newmem % data % r2 => fields(1) + else + newmem % data % r2a => fields + end if + end if - if (.not. pool_add_member(inPool, key, newmem)) then deallocate(newmem % data) deallocate(newmem) @@ -2727,23 +2829,27 @@ subroutine mpas_pool_add_field_3d_reals(inPool, key, fields)!{{{ type (field3DReal), dimension(:), pointer :: fields type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_REAL - newmem % data % contentsDims = 3 - newmem % data % contentsTimeLevs = size(fields) - if (newmem % data % contentsTimeLevs == 1) then - newmem % data % r3 => fields(1) - else - newmem % data % r3a => fields + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_REAL + newmem % data % contentsDims = 3 + newmem % data % contentsTimeLevs = size(fields) + if (newmem % data % contentsTimeLevs == 1) then + newmem % data % r3 => fields(1) + else + newmem % data % r3a => fields + end if + end if - if (.not. pool_add_member(inPool, key, newmem)) then deallocate(newmem % data) deallocate(newmem) @@ -2771,23 +2877,27 @@ subroutine mpas_pool_add_field_4d_reals(inPool, key, fields)!{{{ type (field4DReal), dimension(:), pointer :: fields type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_REAL - newmem % data % contentsDims = 4 - newmem % data % contentsTimeLevs = size(fields) - if (newmem % data % contentsTimeLevs == 1) then - newmem % data % r4 => fields(1) - else - newmem % data % r4a => fields + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_REAL + newmem % data % contentsDims = 4 + newmem % data % contentsTimeLevs = size(fields) + if (newmem % data % contentsTimeLevs == 1) then + newmem % data % r4 => fields(1) + else + newmem % data % r4a => fields + end if + end if - if (.not. pool_add_member(inPool, key, newmem)) then deallocate(newmem % data) deallocate(newmem) @@ -2815,23 +2925,27 @@ subroutine mpas_pool_add_field_5d_reals(inPool, key, fields)!{{{ type (field5DReal), dimension(:), pointer :: fields type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_REAL - newmem % data % contentsDims = 5 - newmem % data % contentsTimeLevs = size(fields) - if (newmem % data % contentsTimeLevs == 1) then - newmem % data % r5 => fields(1) - else - newmem % data % r5a => fields + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_REAL + newmem % data % contentsDims = 5 + newmem % data % contentsTimeLevs = size(fields) + if (newmem % data % contentsTimeLevs == 1) then + newmem % data % r5 => fields(1) + else + newmem % data % r5a => fields + end if + end if - if (.not. pool_add_member(inPool, key, newmem)) then deallocate(newmem % data) deallocate(newmem) @@ -2859,23 +2973,27 @@ subroutine mpas_pool_add_field_0d_ints(inPool, key, fields)!{{{ type (field0DInteger), dimension(:), pointer :: fields type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_INTEGER - newmem % data % contentsDims = 0 - newmem % data % contentsTimeLevs = size(fields) - if (newmem % data % contentsTimeLevs == 1) then - newmem % data % i0 => fields(1) - else - newmem % data % i0a => fields + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_INTEGER + newmem % data % contentsDims = 0 + newmem % data % contentsTimeLevs = size(fields) + if (newmem % data % contentsTimeLevs == 1) then + newmem % data % i0 => fields(1) + else + newmem % data % i0a => fields + end if + end if - if (.not. pool_add_member(inPool, key, newmem)) then deallocate(newmem % data) deallocate(newmem) @@ -2903,22 +3021,26 @@ subroutine mpas_pool_add_field_1d_ints(inPool, key, fields)!{{{ type (field1DInteger), dimension(:), pointer :: fields type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % keyLen = len_trim(key) - newmem % key = trim(key) - newmem % contentsType = MPAS_POOL_FIELD - nullify(newmem % next) + if ( threadNum == 0 ) then + allocate(newmem) + newmem % keyLen = len_trim(key) + newmem % key = trim(key) + newmem % contentsType = MPAS_POOL_FIELD + nullify(newmem % next) - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_INTEGER - newmem % data % contentsDims = 1 - newmem % data % contentsTimeLevs = size(fields) - if (newmem % data % contentsTimeLevs == 1) then - newmem % data % i1 => fields(1) - else - newmem % data % i1a => fields + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_INTEGER + newmem % data % contentsDims = 1 + newmem % data % contentsTimeLevs = size(fields) + if (newmem % data % contentsTimeLevs == 1) then + newmem % data % i1 => fields(1) + else + newmem % data % i1a => fields + end if end if if (.not. pool_add_member(inPool, key, newmem)) then @@ -2948,23 +3070,27 @@ subroutine mpas_pool_add_field_2d_ints(inPool, key, fields)!{{{ type (field2DInteger), dimension(:), pointer :: fields type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_INTEGER - newmem % data % contentsDims = 2 - newmem % data % contentsTimeLevs = size(fields) - if (newmem % data % contentsTimeLevs == 1) then - newmem % data % i2 => fields(1) - else - newmem % data % i2a => fields + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_INTEGER + newmem % data % contentsDims = 2 + newmem % data % contentsTimeLevs = size(fields) + if (newmem % data % contentsTimeLevs == 1) then + newmem % data % i2 => fields(1) + else + newmem % data % i2a => fields + end if + end if - if (.not. pool_add_member(inPool, key, newmem)) then deallocate(newmem % data) deallocate(newmem) @@ -2992,23 +3118,27 @@ subroutine mpas_pool_add_field_3d_ints(inPool, key, fields)!{{{ type (field3DInteger), dimension(:), pointer :: fields type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_INTEGER - newmem % data % contentsDims = 3 - newmem % data % contentsTimeLevs = size(fields) - if (newmem % data % contentsTimeLevs == 1) then - newmem % data % i3 => fields(1) - else - newmem % data % i3a => fields + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_INTEGER + newmem % data % contentsDims = 3 + newmem % data % contentsTimeLevs = size(fields) + if (newmem % data % contentsTimeLevs == 1) then + newmem % data % i3 => fields(1) + else + newmem % data % i3a => fields + end if + end if - if (.not. pool_add_member(inPool, key, newmem)) then deallocate(newmem % data) deallocate(newmem) @@ -3036,23 +3166,27 @@ subroutine mpas_pool_add_field_0d_chars(inPool, key, fields)!{{{ type (field0DChar), dimension(:), pointer :: fields type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_CHARACTER - newmem % data % contentsDims = 0 - newmem % data % contentsTimeLevs = size(fields) - if (newmem % data % contentsTimeLevs == 1) then - newmem % data % c0 => fields(1) - else - newmem % data % c0a => fields + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_CHARACTER + newmem % data % contentsDims = 0 + newmem % data % contentsTimeLevs = size(fields) + if (newmem % data % contentsTimeLevs == 1) then + newmem % data % c0 => fields(1) + else + newmem % data % c0a => fields + end if + end if - if (.not. pool_add_member(inPool, key, newmem)) then deallocate(newmem % data) deallocate(newmem) @@ -3080,23 +3214,27 @@ subroutine mpas_pool_add_field_1d_chars(inPool, key, fields)!{{{ type (field1DChar), dimension(:), pointer :: fields type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_FIELD + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_FIELD - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_CHARACTER - newmem % data % contentsDims = 1 - newmem % data % contentsTimeLevs = size(fields) - if (newmem % data % contentsTimeLevs == 1) then - newmem % data % c1 => fields(1) - else - newmem % data % c1a => fields + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_CHARACTER + newmem % data % contentsDims = 1 + newmem % data % contentsTimeLevs = size(fields) + if (newmem % data % contentsTimeLevs == 1) then + newmem % data % c1 => fields(1) + else + newmem % data % c1a => fields + end if + end if - if (.not. pool_add_member(inPool, key, newmem)) then deallocate(newmem % data) deallocate(newmem) @@ -3127,6 +3265,11 @@ subroutine mpas_pool_get_field_info(inPool, key, info)!{{{ integer :: hash, endl type (mpas_pool_member_type), pointer :: ptr + info % fieldType = -1 + info % nDims = -1 + info % nTimeLevels = -1 + info % nHaloLayers = -1 + info % isActive = .false. endl = len_trim(key) call pool_hash(hash, key, endl) @@ -3142,91 +3285,170 @@ subroutine mpas_pool_get_field_info(inPool, key, info)!{{{ info % fieldType = ptr % data % contentsType info % nDims = ptr % data % contentsDims info % nTimeLevels = ptr % data % contentsTimeLevs + info % nHaloLayers = 0 if ( info % fieldType == MPAS_POOL_REAL ) then if ( info % nDims == 0 ) then if ( info % nTimeLevels > 1 ) then info % isActive = ptr % data % r0a(1) % isActive + if ( associated(ptr % data % r0a(1) % sendList) ) then + info % nHaloLayers = size(ptr % data % r0a(1) % sendList % halos) + end if else info % isActive = ptr % data % r0 % isActive + if ( associated(ptr % data % r0 % sendList) ) then + info % nHaloLayers = size(ptr % data % r0 % sendList % halos) + end if end if else if ( info % nDims == 1 ) then if ( info % nTimeLevels > 1 ) then info % isActive = ptr % data % r1a(1) % isActive + if ( associated(ptr % data % r1a(1) % sendList) ) then + info % nHaloLayers = size(ptr % data % r1a(1) % sendList % halos) + end if else info % isActive = ptr % data % r1 % isActive + if ( associated(ptr % data % r1 % sendList) ) then + info % nHaloLayers = size(ptr % data % r1 % sendList % halos) + end if end if else if ( info % nDims == 2 ) then if ( info % nTimeLevels > 1 ) then info % isActive = ptr % data % r2a(1) % isActive + if ( associated(ptr % data % r2a(1) % sendList) ) then + info % nHaloLayers = size(ptr % data % r2a(1) % sendList % halos) + end if else info % isActive = ptr % data % r2 % isActive + if ( associated(ptr % data % r2 % sendList) ) then + info % nHaloLayers = size(ptr % data % r2 % sendList % halos) + end if end if else if ( info % nDims == 3 ) then if ( info % nTimeLevels > 1 ) then info % isActive = ptr % data % r3a(1) % isActive + if ( associated(ptr % data % r3a(1) % sendList) ) then + info % nHaloLayers = size(ptr % data % r3a(1) % sendList % halos) + end if else info % isActive = ptr % data % r3 % isActive + if ( associated(ptr % data % r3 % sendList) ) then + info % nHaloLayers = size(ptr % data % r3 % sendList % halos) + end if end if else if ( info % nDims == 4 ) then if ( info % nTimeLevels > 1 ) then info % isActive = ptr % data % r4a(1) % isActive + if ( associated(ptr % data % r4a(1) % sendList) ) then + info % nHaloLayers = size(ptr % data % r4a(1) % sendList % halos) + end if else info % isActive = ptr % data % r4 % isActive + if ( associated(ptr % data % r4 % sendList) ) then + info % nHaloLayers = size(ptr % data % r4 % sendList % halos) + end if end if else if ( info % nDims == 5 ) then if ( info % nTimeLevels > 1 ) then info % isActive = ptr % data % r5a(1) % isActive + if ( associated(ptr % data % r5a(1) % sendList) ) then + info % nHaloLayers = size(ptr % data % r5a(1) % sendList % halos) + end if else info % isActive = ptr % data % r5 % isActive + if ( associated(ptr % data % r5 % sendList) ) then + info % nHaloLayers = size(ptr % data % r5 % sendList % halos) + end if end if end if else if (info % fieldType == MPAS_POOL_INTEGER ) then if ( info % nDims == 0 ) then if ( info % nTimeLevels > 1 ) then info % isActive = ptr % data % i0a(1) % isActive + if ( associated(ptr % data % i0a(1) % sendList) ) then + info % nHaloLayers = size(ptr % data % i0a(1) % sendList % halos) + end if else info % isActive = ptr % data % i0 % isActive + if ( associated(ptr % data % i0 % sendList) ) then + info % nHaloLayers = size(ptr % data % i0 % sendList % halos) + end if end if else if ( info % nDims == 1 ) then if ( info % nTimeLevels > 1 ) then info % isActive = ptr % data % i1a(1) % isActive + if ( associated(ptr % data % i1a(1) % sendList) ) then + info % nHaloLayers = size(ptr % data % i1a(1) % sendList % halos) + end if else info % isActive = ptr % data % i1 % isActive + if ( associated(ptr % data % i1 % sendList) ) then + info % nHaloLayers = size(ptr % data % i1 % sendList % halos) + end if end if else if ( info % nDims == 2 ) then if ( info % nTimeLevels > 1 ) then info % isActive = ptr % data % i2a(1) % isActive + if ( associated(ptr % data % i2a(1) % sendList) ) then + info % nHaloLayers = size(ptr % data % i2a(1) % sendList % halos) + end if else info % isActive = ptr % data % i2 % isActive + if ( associated(ptr % data % i2 % sendList) ) then + info % nHaloLayers = size(ptr % data % i2 % sendList % halos) + end if end if else if ( info % nDims == 3 ) then if ( info % nTimeLevels > 1 ) then info % isActive = ptr % data % i3a(1) % isActive + if ( associated(ptr % data % i3a(1) % sendList) ) then + info % nHaloLayers = size(ptr % data % i3a(1) % sendList % halos) + end if else info % isActive = ptr % data % i3 % isActive + if ( associated(ptr % data % i3 % sendList) ) then + info % nHaloLayers = size(ptr % data % i3 % sendList % halos) + end if end if end if else if (info % fieldType == MPAS_POOL_CHARACTER ) then if ( info % nDims == 0 ) then if ( info % nTimeLevels > 1 ) then info % isActive = ptr % data % c0a(1) % isActive + if ( associated(ptr % data % c0a(1) % sendList) ) then + info % nHaloLayers = size(ptr % data % c0a(1) % sendList % halos) + end if else info % isActive = ptr % data % c0 % isActive + if ( associated(ptr % data % c0 % sendList) ) then + info % nHaloLayers = size(ptr % data % c0 % sendList % halos) + end if end if else if ( info % nDims == 1 ) then if ( info % nTimeLevels > 1 ) then info % isActive = ptr % data % c1a(1) % isActive + if ( associated(ptr % data % c1a(1) % sendList) ) then + info % nHaloLayers = size(ptr % data % c1a(1) % sendList % halos) + end if else info % isActive = ptr % data % c1 % isActive + if ( associated(ptr % data % c1 % sendList) ) then + info % nHaloLayers = size(ptr % data % c1 % sendList % halos) + end if end if end if else if (info % fieldType == MPAS_POOL_LOGICAL ) then if ( info % nDims == 0 ) then if ( info % nTimeLevels > 1 ) then info % isActive = ptr % data % l0a(1) % isActive + if ( associated(ptr % data % l0a(1) % sendList) ) then + info % nHaloLayers = size(ptr % data % l0a(1) % sendList % halos) + end if else info % isActive = ptr % data % l0 % isActive + if ( associated(ptr % data % l0 % sendList) ) then + info % nHaloLayers = size(ptr % data % l0 % sendList % halos) + end if end if end if end if @@ -4367,23 +4589,27 @@ subroutine mpas_pool_add_config_real(inPool, key, value)!{{{ real (kind=RKIND), intent(in) :: value type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_CONFIG + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_CONFIG - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_REAL - newmem % data % contentsDims = 0 - newmem % data % contentsTimeLevs = 0 - allocate(newmem % data % simple_real) - newmem % data % simple_real = value + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_REAL + newmem % data % contentsDims = 0 + newmem % data % contentsTimeLevs = 0 + allocate(newmem % data % simple_real) + newmem % data % simple_real = value - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_config_real!}}} @@ -4408,23 +4634,27 @@ subroutine mpas_pool_add_config_int(inPool, key, value)!{{{ integer, intent(in) :: value type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_CONFIG + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_CONFIG - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_INTEGER - newmem % data % contentsDims = 0 - newmem % data % contentsTimeLevs = 0 - allocate(newmem % data % simple_int) - newmem % data % simple_int = value + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_INTEGER + newmem % data % contentsDims = 0 + newmem % data % contentsTimeLevs = 0 + allocate(newmem % data % simple_int) + newmem % data % simple_int = value - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_config_int!}}} @@ -4450,29 +4680,34 @@ subroutine mpas_pool_add_config_char(inPool, key, value)!{{{ type (mpas_pool_member_type), pointer :: newmem integer :: oldLevel + integer :: threadNum - oldLevel = currentErrorLevel - - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_CONFIG - - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_CHARACTER - newmem % data % contentsDims = 0 - newmem % data % contentsTimeLevs = 0 - allocate(newmem % data % simple_char) - if (len_trim(value) > StrKIND) then - call mpas_pool_set_error_level(MPAS_POOL_WARN) - call pool_mesg('WARNING mpas_pool_add_config_char: Input value for key '//trim(key)//' longer than StrKIND.') - call mpas_pool_set_error_level(oldLevel) - end if - newmem % data % simple_char = value + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + oldLevel = currentErrorLevel + + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_CONFIG + + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_CHARACTER + newmem % data % contentsDims = 0 + newmem % data % contentsTimeLevs = 0 + allocate(newmem % data % simple_char) + if (len_trim(value) > StrKIND) then + call mpas_pool_set_error_level(MPAS_POOL_WARN) + call pool_mesg('WARNING mpas_pool_add_config_char: Input value for key '//trim(key)//' longer than StrKIND.') + call mpas_pool_set_error_level(oldLevel) + end if + newmem % data % simple_char = value - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_config_char!}}} @@ -4497,23 +4732,27 @@ subroutine mpas_pool_add_config_logical(inPool, key, value)!{{{ logical, intent(in) :: value type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % keyLen = len_trim(key) - newmem % key = trim(key) - newmem % contentsType = MPAS_POOL_CONFIG + if ( threadNum == 0 ) then + allocate(newmem) + newmem % keyLen = len_trim(key) + newmem % key = trim(key) + newmem % contentsType = MPAS_POOL_CONFIG - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_LOGICAL - newmem % data % contentsDims = 0 - newmem % data % contentsTimeLevs = 0 - allocate(newmem % data % simple_logical) - newmem % data % simple_logical = value + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_LOGICAL + newmem % data % contentsDims = 0 + newmem % data % contentsTimeLevs = 0 + allocate(newmem % data % simple_logical) + newmem % data % simple_logical = value - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_config_logical!}}} @@ -4739,23 +4978,27 @@ subroutine mpas_pool_add_dimension_0d(inPool, key, dim)!{{{ integer, intent(in) :: dim type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_DIMENSION + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_DIMENSION - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_INTEGER - newmem % data % contentsDims = 0 - newmem % data % contentsTimeLevs = 0 - allocate(newmem % data % simple_int) - newmem % data % simple_int = dim + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_INTEGER + newmem % data % contentsDims = 0 + newmem % data % contentsTimeLevs = 0 + allocate(newmem % data % simple_int) + newmem % data % simple_int = dim - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_dimension_0d!}}} @@ -4780,24 +5023,28 @@ subroutine mpas_pool_add_dimension_1d(inPool, key, dims)!{{{ integer, dimension(:), intent(in) :: dims type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_DIMENSION + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_DIMENSION - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_INTEGER - newmem % data % contentsDims = size(dims) - newmem % data % contentsTimeLevs = 0 - allocate(newmem % data % simple_int_arr(newmem % data % contentsDims)) - newmem % data % simple_int_arr(:) = dims(:) + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_INTEGER + newmem % data % contentsDims = 1 + newmem % data % contentsTimeLevs = 0 + allocate(newmem % data % simple_int_arr(size(dims))) + newmem % data % simple_int_arr(:) = dims(:) - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data % simple_int_arr) - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data % simple_int_arr) + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_dimension_1d!}}} @@ -4900,21 +5147,26 @@ subroutine mpas_pool_add_subpool(inPool, key, subPool)!{{{ type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_SUBPOOL + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_SUBPOOL - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_SUBPOOL - newmem % data % contentsDims = 0 - newmem % data % contentsTimeLevs = 0 - newmem % data % p => subPool + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_SUBPOOL + newmem % data % contentsDims = 0 + newmem % data % contentsTimeLevs = 0 + newmem % data % p => subPool - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_subpool!}}} @@ -4972,23 +5224,27 @@ subroutine mpas_pool_add_package(inPool, key, value)!{{{ logical, intent(in) :: value type (mpas_pool_member_type), pointer :: newmem + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - allocate(newmem) - newmem % key = trim(key) - newmem % keyLen = len_trim(key) - newmem % contentsType = MPAS_POOL_PACKAGE + if ( threadNum == 0 ) then + allocate(newmem) + newmem % key = trim(key) + newmem % keyLen = len_trim(key) + newmem % contentsType = MPAS_POOL_PACKAGE - allocate(newmem % data) - newmem % data % contentsType = MPAS_POOL_LOGICAL - newmem % data % contentsDims = 0 - newmem % data % contentsTimeLevs = 0 - allocate(newmem % data % simple_logical) - newmem % data % simple_logical = value + allocate(newmem % data) + newmem % data % contentsType = MPAS_POOL_LOGICAL + newmem % data % contentsDims = 0 + newmem % data % contentsTimeLevs = 0 + allocate(newmem % data % simple_logical) + newmem % data % simple_logical = value - if (.not. pool_add_member(inPool, key, newmem)) then - deallocate(newmem % data) - deallocate(newmem) + if (.not. pool_add_member(inPool, key, newmem)) then + deallocate(newmem % data) + deallocate(newmem) + end if end if end subroutine mpas_pool_add_package!}}} @@ -5202,10 +5458,15 @@ subroutine mpas_pool_begin_iteration(inPool)!{{{ type (mpas_pool_type), intent(inout) :: inPool - integer :: i + integer :: i, threadNum + threadNum = mpas_threading_get_thread_num() - inPool % iterator => inPool % iteration_head + if ( threadNum == 0 ) then + inPool % iterator => inPool % iteration_head + end if + + !$omp barrier end subroutine mpas_pool_begin_iteration!}}} @@ -5230,7 +5491,10 @@ logical function mpas_pool_get_next_member(inPool, iterator)!{{{ type (mpas_pool_type), intent(inout) :: inPool type (mpas_pool_iterator_type), intent(inout) :: iterator - integer :: i + integer :: i, threadNum + + threadNum = mpas_threading_get_thread_num() + !$omp barrier ! ! As long as there are members left to be iterated over, the inPool%iterator @@ -5250,14 +5514,17 @@ logical function mpas_pool_get_next_member(inPool, iterator)!{{{ iterator % nTimeLevels = 0 end if mpas_pool_get_next_member = .true. - - ! Advance iterator to next item - inPool % iterator => inPool % iterator % iteration_next - else mpas_pool_get_next_member = .false. end if + !$omp barrier + + if ( threadNum == 0 .and. associated(inPool % iterator) ) then + ! Only thread 0 can advance iterator to next item + inPool % iterator => inPool % iterator % iteration_next + end if + end function mpas_pool_get_next_member!}}} @@ -5334,6 +5601,95 @@ recursive subroutine mpas_pool_shift_time_levels(inPool)!{{{ end subroutine mpas_pool_shift_time_levels!}}} +!----------------------------------------------------------------------- +! subroutine mpas_pool_print_summary +! +!> \brief MPAS Pool Summary write routine +!> \author Doug Jacobsen +!> \date 07/29/2015 +!> \details +!> This routine writes out a summary of the contents of a pool that match a specific type. +!> The memberType input argument can take the value of MPAS_POOL_FIELD, +!> MPAS_POOL_CONFIG, or MPAS_POOL_PACKAGE. +!> The recurseSubpools_in input argument defaults to false, but can be used to +!> write a summary of subpool contents in addition to the current pool. +!> +!> It's important to note that this routine iterates over a pool. So if it is +!> called within a pool iteration loop, it could cause issues. +! +!----------------------------------------------------------------------- + recursive subroutine mpas_pool_print_summary(inPool, memberType, recurseSubpools_in)!{{{ + type (mpas_pool_type), pointer :: inPool + integer, intent(in) :: memberType + logical, optional, intent(in) :: recurseSubpools_in + + type (mpas_pool_iterator_type) :: poolItr + logical :: recurseSubpools + + type (mpas_pool_type), pointer :: subPool + real (kind=RKIND), pointer :: tempReal + integer, pointer :: tempInteger + logical, pointer :: tempLogical + character (len=StrKIND), pointer :: tempChar + + if ( present(recurseSubpools_in) ) then + recurseSubpools = recurseSubpools_in + else + recurseSubpools = .false. + end if + + call mpas_pool_begin_iteration(inPool) + do while ( mpas_pool_get_next_member(inPool, poolItr) ) + ! Handle writing out configs + if ( poolItr % memberType == memberType .and. memberType == MPAS_POOL_CONFIG ) then + if ( poolItr % dataType == MPAS_POOL_REAL ) then + call mpas_pool_get_config(inPool, poolItr % memberName, tempReal) + write(stderrUnit, *) ' ' // trim(poolItr % memberName) // ' = ', tempReal + else if ( poolItr % dataType == MPAS_POOL_INTEGER ) then + call mpas_pool_get_config(inPool, poolItr % memberName, tempInteger) + write(stderrUnit, *) ' ' // trim(poolItr % memberName) // ' = ', tempInteger + else if ( poolItr % dataType == MPAS_POOL_LOGICAL ) then + call mpas_pool_get_config(inPool, poolItr % memberName, tempLogical) + if ( tempLogical ) then + write(stderrUnit, *) ' ' // trim(poolItr % memberName) // ' = .true.' + else + write(stderrUnit, *) ' ' // trim(poolItr % memberName) // ' = .false.' + end if + else if ( poolItr % dataType == MPAS_POOL_CHARACTER ) then + call mpas_pool_get_config(inPool, poolItr % memberName, tempChar) + write(stderrUnit, *) ' ' // trim(poolItr % memberName) // ' = ''' // trim(tempChar) // '''' + end if + ! Handle packages + else if (poolItr % memberType == memberType .and. memberType == MPAS_POOL_PACKAGE ) then + call mpas_pool_get_package(inPool, poolItr % memberName, tempLogical) + if ( tempLogical ) then + write(stderrUnit, *) ' ' // trim(poolItr % memberName) // ' = .true.' + else + write(stderrUnit, *) ' ' // trim(poolItr % memberName) // ' = .false.' + end if + ! Handle fields + else if (poolItr % memberType == memberType .and. memberType == MPAS_POOL_FIELD ) then + write(stderrUnit, *) ' ' // trim(poolItr % memberName) + if ( poolItr % dataType == MPAS_POOL_REAL) then + write(stderrUnit, *) ' Type: Real' + else if ( poolItr % dataType == MPAS_POOL_INTEGER) then + write(stderrUnit, *) ' Type: Integer' + else if ( poolItr % dataType == MPAS_POOL_CHARACTER) then + write(stderrUnit, *) ' Type: Character' + else if ( poolItr % dataType == MPAS_POOL_LOGICAL) then + write(stderrUnit, *) ' Type: Logical' + end if + write(stderrUnit, *) ' Number of dimensions: ', poolItr % nDims + write(stderrUnit, *) ' Number of time levels: ', poolItr % nTimeLevels + else if (poolItr % memberType == MPAS_POOL_SUBPOOL .and. recurseSubpools ) then + write(stderrUnit, *) ' ** Begin subpool: ' // trim(poolItr % memberName) + call mpas_pool_get_subpool(inPool, poolItr % memberName, subPool) + call mpas_pool_print_summary(subPool, memberType, recurseSubpools) + write(stderrUnit, *) ' ** End subpool: ' // trim(poolItr % memberName) + end if + end do + + end subroutine mpas_pool_print_summary!}}} !!!!!!!!!! Private subroutines !!!!!!!!!! @@ -5437,7 +5793,9 @@ logical function pool_remove_member(inPool, key, memType)!{{{ integer :: hash, endl type (mpas_pool_member_type), pointer :: ptr, ptr_prev + integer :: threadNum + threadNum = mpas_threading_get_thread_num() endl = len_trim(key) call pool_hash(hash, key, endl) @@ -5452,24 +5810,26 @@ logical function pool_remove_member(inPool, key, memType)!{{{ if (endl == ptr_prev % keyLen) then if (key(1:endl) == ptr_prev % key(1:endl)) then inPool % table(hash) % head => ptr_prev % next + if ( threadNum == 0 ) then - ! - ! Un-link the member from the iteration list - ! - if (associated(ptr_prev % iteration_prev)) then - ptr_prev % iteration_prev % iteration_next => ptr_prev % iteration_next - else - inPool % iteration_head => ptr_prev % iteration_next - end if + ! + ! Un-link the member from the iteration list + ! + if (associated(ptr_prev % iteration_prev)) then + ptr_prev % iteration_prev % iteration_next => ptr_prev % iteration_next + else + inPool % iteration_head => ptr_prev % iteration_next + end if - if (associated(ptr_prev % iteration_next)) then - ptr_prev % iteration_next % iteration_prev => ptr_prev % iteration_prev - else - inPool % iteration_tail => ptr_prev % iteration_prev - end if + if (associated(ptr_prev % iteration_next)) then + ptr_prev % iteration_next % iteration_prev => ptr_prev % iteration_prev + else + inPool % iteration_tail => ptr_prev % iteration_prev + end if !TODO: are there cases where we need to delete more data here? - deallocate(ptr_prev) + deallocate(ptr_prev) + end if pool_remove_member = .true. return end if @@ -5482,25 +5842,27 @@ logical function pool_remove_member(inPool, key, memType)!{{{ if (ptr % contentsType == memType) then if (endl == ptr % keyLen) then if (key(1:endl) == ptr % key(1:endl)) then - ptr_prev % next => ptr % next - - ! - ! Un-link the member from the iteration list - ! - if (associated(ptr % iteration_prev)) then - ptr % iteration_prev % iteration_next => ptr % iteration_next - else - inPool % iteration_head => ptr % iteration_next - end if + if ( threadNum == 0 ) then + ptr_prev % next => ptr % next + + ! + ! Un-link the member from the iteration list + ! + if (associated(ptr % iteration_prev)) then + ptr % iteration_prev % iteration_next => ptr % iteration_next + else + inPool % iteration_head => ptr % iteration_next + end if - if (associated(ptr % iteration_next)) then - ptr % iteration_next % iteration_prev => ptr % iteration_prev - else - inPool % iteration_tail => ptr % iteration_prev - end if + if (associated(ptr % iteration_next)) then + ptr % iteration_next % iteration_prev => ptr % iteration_prev + else + inPool % iteration_tail => ptr % iteration_prev + end if !TODO: are there cases where we need to delete more data here? - deallocate(ptr) + deallocate(ptr) + end if pool_remove_member = .true. return end if @@ -5522,11 +5884,18 @@ subroutine pool_mesg(mesg)!{{{ implicit none character (len=*), intent(in) :: mesg + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() if (currentErrorLevel == MPAS_POOL_WARN) then - write(stderrUnit,*) trim(mesg) + if ( threadNum == 0 ) then + write(stderrUnit,*) trim(mesg) + end if else if (currentErrorLevel == MPAS_POOL_FATAL) then - write(stderrUnit,*) trim(mesg) + if ( threadNum == 0 ) then + write(stderrUnit,*) trim(mesg) + end if call mpas_dmpar_global_abort(trim(mesg)) end if diff --git a/src/framework/mpas_pool_types.inc b/src/framework/mpas_pool_types.inc index d5eb28b5fd..dde9e289cc 100644 --- a/src/framework/mpas_pool_types.inc +++ b/src/framework/mpas_pool_types.inc @@ -91,5 +91,6 @@ integer :: fieldType integer :: nDims integer :: nTimeLevels + integer :: nHaloLayers logical :: isActive end type mpas_pool_field_info_type diff --git a/src/framework/mpas_sort.F b/src/framework/mpas_sort.F index 84e9601eca..431353fd39 100644 --- a/src/framework/mpas_sort.F +++ b/src/framework/mpas_sort.F @@ -128,6 +128,7 @@ subroutine mpas_quicksort_1dint(nArray, array)!{{{ integer :: pivot_value integer, dimension(1) :: temp integer, dimension(1000) :: lstack, rstack + real :: rnd if (nArray < 1) return @@ -141,7 +142,8 @@ subroutine mpas_quicksort_1dint(nArray, array)!{{{ r = rstack(top) top = top - 1 - pivot = (l+r)/2 + call random_number(rnd) + pivot = l + int(rnd * real(r-l)) pivot_value = array(pivot) temp(1) = array(pivot) @@ -201,6 +203,7 @@ subroutine mpas_quicksort_1dreal(nArray, array)!{{{ real (kind=RKIND) :: pivot_value real (kind=RKIND), dimension(1) :: temp integer, dimension(1000) :: lstack, rstack + real :: rnd if (nArray < 1) return @@ -214,7 +217,8 @@ subroutine mpas_quicksort_1dreal(nArray, array)!{{{ r = rstack(top) top = top - 1 - pivot = (l+r)/2 + call random_number(rnd) + pivot = l + int(rnd * real(r-l)) pivot_value = array(pivot) temp(1) = array(pivot) @@ -274,6 +278,7 @@ subroutine mpas_quicksort_2dint(nArray, array)!{{{ integer :: pivot_value integer, dimension(2) :: temp integer, dimension(1000) :: lstack, rstack + real :: rnd if (nArray < 1) return @@ -287,7 +292,8 @@ subroutine mpas_quicksort_2dint(nArray, array)!{{{ r = rstack(top) top = top - 1 - pivot = (l+r)/2 + call random_number(rnd) + pivot = l + int(rnd * real(r-l)) pivot_value = array(1,pivot) temp(:) = array(:,pivot) @@ -347,6 +353,7 @@ subroutine mpas_quicksort_2dreal(nArray, array)!{{{ real (kind=RKIND) :: pivot_value real (kind=RKIND), dimension(2) :: temp integer, dimension(1000) :: lstack, rstack + real :: rnd if (nArray < 1) return @@ -360,7 +367,8 @@ subroutine mpas_quicksort_2dreal(nArray, array)!{{{ r = rstack(top) top = top - 1 - pivot = (l+r)/2 + call random_number(rnd) + pivot = l + int(rnd * real(r-l)) pivot_value = array(1,pivot) temp(:) = array(:,pivot) diff --git a/src/framework/mpas_stream_list.F b/src/framework/mpas_stream_list.F index 5062834755..4258039b9b 100644 --- a/src/framework/mpas_stream_list.F +++ b/src/framework/mpas_stream_list.F @@ -205,40 +205,83 @@ end subroutine MPAS_stream_list_remove !}}} !----------------------------------------------------------------------- ! routine MPAS_stream_list_query ! - !> \brief Get a stream from a stream list + !> \brief Get streams from a stream list !> \author Michael Duda, Doug Jacobsen !> \date 08/06/2014 !> \details - !> Searches through a stream list, and returns a pointer for the stream with a matching name. + !> This function allows one to search through a stream list for a set of + !> matching streams. In the first call to this function, the stream + !> argument should be a null pointer. When called a single time, this + !> function sets the stream argument to the first stream in the list that + !> matches the streamPattern argument. Upon subsequent calls, searching + !> begins at the first stream *after* the stream argument, to allow one to + !> find all streams that match streamPattern. Each time a valid stream is + !> found, this function returns .true. and if no more streams are found, it + !> will return .false. ! !----------------------------------------------------------------------- - logical function MPAS_stream_list_query(list, streamName, stream, ierr) result(found) !{{{ + logical function MPAS_stream_list_query(list, streamPattern, stream, ierr) result(found) !{{{ + + use iso_c_binding, only: c_char, c_int + use mpas_c_interfacing, only : mpas_f_to_c_string, mpas_c_to_f_string implicit none type (MPAS_stream_list_type), intent(in) :: list - character (len=*), intent(in) :: streamName + character (len=*), intent(in) :: streamPattern type (MPAS_stream_list_type), pointer :: stream integer, intent(out), optional :: ierr + character(kind=c_char), dimension(StrKIND+1) :: c_pattern + character(kind=c_char), dimension(StrKIND+1) :: c_test_string + integer(kind=c_int) :: c_match + type (MPAS_stream_list_type), pointer :: node + interface + subroutine check_regex_match(pattern, string, imatch) bind(c) + use iso_c_binding, only: c_char, c_int + character(kind=c_char), dimension(*), intent(in) :: pattern, string + integer(kind=c_int), intent(out) :: imatch + end subroutine check_regex_match + end interface LIST_DEBUG_WRITE(' -- Called MPAS_stream_list_query()') + call mpas_f_to_c_string(streamPattern, c_pattern) + if (present(ierr)) ierr = MPAS_STREAM_LIST_NOERR found = .false. ! Return if no streams exist in stream list if ( list % nItems == 0 ) then - LIST_DEBUG_WRITE(' -- Item '//trim(streamName)//' not found in list.') + LIST_DEBUG_WRITE(' -- No items matching '//trim(streamPattern)//' found in list.') nullify(stream) return end if - node => list % head + if ( associated(stream) ) then + if ( associated(stream % next) ) then + node => stream % next + else + nullify(node) + end if + else + node => list % head + end if + do while (associated(node)) - if (node % name == streamName) then + call mpas_f_to_c_string(node % name, c_test_string) + + call check_regex_match(c_pattern, c_test_string, c_match) + + if ( c_match == -1 ) then + write(stderrUnit, *) 'ERROR: Regular expression matching failed.' + write(stderrUnit, *) ' Pattern was: ', trim(streamPattern) + write(stderrUnit, *) ' Test string was: ', trim(node % name) + end if + + if ( c_match == 1 ) then found = .true. stream => node return @@ -246,7 +289,7 @@ logical function MPAS_stream_list_query(list, streamName, stream, ierr) result(f node => node % next end do - LIST_DEBUG_WRITE(' -- Item '//trim(streamName)//' not found in list.') + LIST_DEBUG_WRITE(' -- No items matching '//trim(streamPattern)//' found in list.') nullify(stream) end function MPAS_stream_list_query !}}} diff --git a/src/framework/mpas_stream_list_types.inc b/src/framework/mpas_stream_list_types.inc index acd28770f3..9bb4aec851 100644 --- a/src/framework/mpas_stream_list_types.inc +++ b/src/framework/mpas_stream_list_types.inc @@ -13,6 +13,7 @@ logical :: valid = .false. logical :: immutable = .false. logical :: active_stream = .true. + logical :: blockWrite = .false. character(len=StrKIND) :: filename character(len=StrKIND) :: filename_template character(len=StrKIND) :: filename_interval diff --git a/src/framework/mpas_stream_manager.F b/src/framework/mpas_stream_manager.F index 3606f78949..7ea5ef287d 100644 --- a/src/framework/mpas_stream_manager.F +++ b/src/framework/mpas_stream_manager.F @@ -13,6 +13,9 @@ module mpas_stream_manager use mpas_io_units use mpas_io_streams use mpas_stream_list + use mpas_sort + use mpas_threading + use mpas_abort, only : mpas_dmpar_global_abort public :: MPAS_stream_mgr_init, & @@ -34,10 +37,12 @@ module mpas_stream_manager MPAS_stream_mgr_ringing_alarms, & MPAS_stream_mgr_add_att, & MPAS_stream_mgr_write, & + MPAS_stream_mgr_block_write, & MPAS_stream_mgr_read, & MPAS_stream_mgr_begin_iteration, & MPAS_stream_mgr_get_next_stream, & MPAS_stream_mgr_get_next_field, & + MPAS_stream_mgr_stream_exists, & MPAS_get_stream_filename, & MPAS_build_stream_filename @@ -94,69 +99,75 @@ module mpas_stream_manager !> clock and a pool from which fields may be drawn and added to streams. ! !----------------------------------------------------------------------- - subroutine MPAS_stream_mgr_init(manager, clock, allFields, allPackages, allStructs, ierr)!{{{ + subroutine MPAS_stream_mgr_init(manager, ioContext, clock, allFields, allPackages, allStructs, ierr)!{{{ implicit none character (len=*), parameter :: sub = 'MPAS_stream_mgr_init' type (MPAS_streamManager_type), pointer :: manager + type (MPAS_IO_context_type), pointer :: ioContext type (MPAS_Clock_type), pointer :: clock type (MPAS_Pool_type), pointer :: allFields type (MPAS_Pool_type), pointer :: allPackages type (MPAS_Pool_type), pointer :: allStructs integer, intent(out), optional :: ierr - integer :: err_local + integer :: err_local, threadNum - call seed_random() + threadNum = mpas_threading_get_thread_num() - STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_init()') + if ( threadNum == 0 ) then + call seed_random() - if (present(ierr)) ierr = MPAS_STREAM_MGR_NOERR + STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_init()') - allocate(manager) - manager % allFields => allFields - manager % allPackages => allPackages - manager % allStructs => allStructs - manager % streamClock => clock - manager % numStreams = 0 - manager % errorLevel = MPAS_STREAM_ERR_SILENT + if (present(ierr)) ierr = MPAS_STREAM_MGR_NOERR - ! - ! Set up linked list of streams - ! - call MPAS_stream_list_create(manager % streams, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while creating stream list') - return - end if + allocate(manager) + manager % ioContext => ioContext + manager % allFields => allFields + manager % allPackages => allPackages + manager % allStructs => allStructs + manager % streamClock => clock + manager % numStreams = 0 + manager % errorLevel = MPAS_STREAM_ERR_SILENT - ! - ! Set up linked list of input alarms - ! - call MPAS_stream_list_create(manager % alarms_in, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while creating input alarm list') - return - end if + ! + ! Set up linked list of streams + ! + call MPAS_stream_list_create(manager % streams, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while creating stream list') + return + end if - ! - ! Set up linked list of output alarms - ! - call MPAS_stream_list_create(manager % alarms_out, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while creating output alarm list') - return - end if + ! + ! Set up linked list of input alarms + ! + call MPAS_stream_list_create(manager % alarms_in, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while creating input alarm list') + return + end if - ! - ! Create a pool to hold default global attributes that every stream will have - ! - call mpas_pool_create_pool(manager % defaultAtts) + ! + ! Set up linked list of output alarms + ! + call MPAS_stream_list_create(manager % alarms_out, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while creating output alarm list') + return + end if + + ! + ! Create a pool to hold default global attributes that every stream will have + ! + call mpas_pool_create_pool(manager % defaultAtts) + end if end subroutine MPAS_stream_mgr_init!}}} @@ -182,57 +193,60 @@ subroutine MPAS_stream_mgr_finalize(manager, ierr)!{{{ type (MPAS_streamManager_type), pointer:: manager integer, intent(out), optional :: ierr - integer :: err_local + integer :: err_local, threadNum type (MPAS_stream_list_type), pointer :: stream_cursor + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_finalize()') if (present(ierr)) ierr = MPAS_STREAM_MGR_NOERR - ! - ! Remove all streams - ! - stream_cursor => manager % streams % head - do while (associated(stream_cursor)) - STREAM_DEBUG_WRITE(' -- deleting stream '//trim(stream_cursor % name)) - call MPAS_stream_mgr_destroy_stream(manager, stream_cursor % name, ierr=err_local) - stream_cursor => manager % streams % head - end do + if ( threadNum == 0 ) then + ! + ! Remove all streams + ! + stream_cursor => manager % streams % head + do while (associated(stream_cursor)) + STREAM_DEBUG_WRITE(' -- deleting stream '//trim(stream_cursor % name)) + call MPAS_stream_mgr_destroy_stream(manager, stream_cursor % name, ierr=err_local) + stream_cursor => manager % streams % head + end do - ! - ! Free up list of streams - ! - call MPAS_stream_list_destroy(manager % streams, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while destroying stream list') - end if + ! + ! Free up list of streams + ! + call MPAS_stream_list_destroy(manager % streams, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while destroying stream list') + end if - ! - ! Free up list of input alarms - ! - call MPAS_stream_list_destroy(manager % alarms_in, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while destroying input alarms list') - end if + ! + ! Free up list of input alarms + ! + call MPAS_stream_list_destroy(manager % alarms_in, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while destroying input alarms list') + end if - ! - ! Free up list of output alarms - ! - call MPAS_stream_list_destroy(manager % alarms_out, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while destroying output alarms list') - end if + ! + ! Free up list of output alarms + ! + call MPAS_stream_list_destroy(manager % alarms_out, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while destroying output alarms list') + end if - ! - ! Free up default attribute pool - ! - call mpas_pool_destroy_pool(manager % defaultAtts) + ! + ! Free up default attribute pool + ! + call mpas_pool_destroy_pool(manager % defaultAtts) - deallocate(manager) + deallocate(manager) + end if end subroutine MPAS_stream_mgr_finalize!}}} @@ -263,9 +277,6 @@ end subroutine MPAS_stream_mgr_finalize!}}} !> argument may take on values MPAS_IO_SINGLE_PRECISION, !> MPAS_IO_DOUBLE_PRECISION, or MPAS_IO_NATIVE_PRECISION; if this argument is !> not supplied, native precision is assumed. - !> Note: Setting the precision of real fields is only supported at present - !> for converting double-precision to single-precision on output; input is - !> automatically converted from single- do double-precision if necessary. !> The optional argument clobberMode determines how the stream manager will !> deal with existing files; possible options include MPAS_STREAM_CLOBBER_NEVER, !> MPAS_STREAM_CLOBBER_APPEND, MPAS_STREAM_CLOBBER_TRUNCATE, @@ -276,6 +287,7 @@ end subroutine MPAS_stream_mgr_finalize!}}} !> include: MPAS_IO_NETCDF, MPAS_IO_NETCDF4, MPAS_IO_PNETCDF, and !> MPAS_IO_PNETCDF5. If not specified, the io_type will default to !> MPAS_IO_PNETCDF. + !> NOTE: This routine does not support regular expressions for StreamID ! !----------------------------------------------------------------------- subroutine MPAS_stream_mgr_create_stream(manager, streamID, direction, filename, & @@ -301,96 +313,101 @@ subroutine MPAS_stream_mgr_create_stream(manager, streamID, direction, filename, integer, intent(out), optional :: ierr type (MPAS_stream_list_type), pointer :: new_stream - integer :: err_local + integer :: err_local, threadNum + + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_create_stream() for '//trim(streamID)) if (present(ierr)) ierr = MPAS_STREAM_MGR_NOERR - ! - ! Check that the stream does not already exist - ! - if (MPAS_stream_list_query(manager % streams, streamID, new_stream, ierr=err_local)) then - STREAM_DEBUG_WRITE('-- Stream '//trim(streamID)//' already exist in stream manager') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if + if ( threadNum == 0 ) then + ! + ! Check that the stream does not already exist + ! + nullify(new_stream) + if (MPAS_stream_list_query(manager % streams, streamID, new_stream, ierr=err_local)) then + STREAM_DEBUG_WRITE('-- Stream '//trim(streamID)//' already exist in stream manager') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if - ! - ! Allocate a stream node to store the new stream - ! - allocate(new_stream) - new_stream % name = streamID - new_stream % direction = direction - new_stream % valid = .false. + ! + ! Allocate a stream node to store the new stream + ! + allocate(new_stream) + new_stream % name = streamID + new_stream % direction = direction + new_stream % valid = .false. !TODO: ensure that filename does not contain ':' characters, which PNETCDF does not like... - new_stream % filename_template = filename + new_stream % filename_template = filename - ! Filename interval is 'none' by deault, but is set through the set_property routine. - if (present(filenameInterval)) then - new_stream % filename_interval = filenameInterval - else - new_stream % filename_interval = 'none' - end if + ! Filename interval is 'none' by deault, but is set through the set_property routine. + if (present(filenameInterval)) then + new_stream % filename_interval = filenameInterval + else + new_stream % filename_interval = 'none' + end if - new_stream % nRecords = 0 - if (present(clobberMode)) then - new_stream % clobber_mode = clobberMode - else - new_stream % clobber_mode = MPAS_STREAM_CLOBBER_NEVER - end if - if (present(ioType)) then - new_stream % io_type = ioType - else - new_stream % io_type = MPAS_IO_PNETCDF - end if - allocate(new_stream % referenceTime) - if (present(referenceTime)) then - new_stream % referenceTime = referenceTime - else - new_stream % referenceTime = mpas_get_clock_time(manager % streamClock, MPAS_START_TIME) - end if - if (present(recordInterval)) then - allocate(new_stream % recordInterval) - new_stream % recordInterval = recordInterval - end if - if (present(realPrecision)) then - new_stream % precision = realPrecision - end if - call MPAS_stream_list_create(new_stream % alarmList_in, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while creating input alarm list') - deallocate(new_stream) - return - end if - call MPAS_stream_list_create(new_stream % alarmList_out, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while creating output alarm list') - deallocate(new_stream) - return - end if - call mpas_pool_create_pool(new_stream % att_pool) - call mpas_pool_clone_pool(manager % defaultAtts, new_stream % att_pool) - call mpas_pool_create_pool(new_stream % field_pool) - call mpas_pool_create_pool(new_stream % field_pkg_pool) - call mpas_pool_create_pool(new_stream % pkg_pool) - nullify(new_stream % next) + new_stream % nRecords = 0 + if (present(clobberMode)) then + new_stream % clobber_mode = clobberMode + else + new_stream % clobber_mode = MPAS_STREAM_CLOBBER_NEVER + end if + if (present(ioType)) then + new_stream % io_type = ioType + else + new_stream % io_type = MPAS_IO_PNETCDF + end if + allocate(new_stream % referenceTime) + if (present(referenceTime)) then + new_stream % referenceTime = referenceTime + else + new_stream % referenceTime = mpas_get_clock_time(manager % streamClock, MPAS_START_TIME) + end if + if (present(recordInterval)) then + allocate(new_stream % recordInterval) + new_stream % recordInterval = recordInterval + end if + if (present(realPrecision)) then + new_stream % precision = realPrecision + end if + call MPAS_stream_list_create(new_stream % alarmList_in, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while creating input alarm list') + deallocate(new_stream) + return + end if + call MPAS_stream_list_create(new_stream % alarmList_out, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while creating output alarm list') + deallocate(new_stream) + return + end if + call mpas_pool_create_pool(new_stream % att_pool) + call mpas_pool_clone_pool(manager % defaultAtts, new_stream % att_pool) + call mpas_pool_create_pool(new_stream % field_pool) + call mpas_pool_create_pool(new_stream % field_pkg_pool) + call mpas_pool_create_pool(new_stream % pkg_pool) + nullify(new_stream % next) - ! - ! Add stream to list - ! - call MPAS_stream_list_insert(manager % streams, new_stream, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while adding stream to list') - return + ! + ! Add stream to list + ! + call MPAS_stream_list_insert(manager % streams, new_stream, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while adding stream to list') + return + end if + + manager % numStreams = manager % numStreams + 1 end if - - manager % numStreams = manager % numStreams + 1 end subroutine MPAS_stream_mgr_create_stream!}}} @@ -404,6 +421,7 @@ end subroutine MPAS_stream_mgr_create_stream!}}} !> \details !> Destroy the stream, including freeing all memory explicitly associated with the stream. !> This will not deallocate the memory associated with the fields in the stream. + !> NOTE: This routine does not support regular expressions for StreamID ! !----------------------------------------------------------------------- subroutine MPAS_stream_mgr_destroy_stream(manager, streamID, ierr)!{{{ @@ -416,82 +434,85 @@ subroutine MPAS_stream_mgr_destroy_stream(manager, streamID, ierr)!{{{ character (len=*), intent(in) :: streamID integer, intent(out), optional :: ierr - integer :: err_local + integer :: err_local, threadNum type (MPAS_stream_list_type), pointer :: stream, alarm_cursor, delete_me + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_destroy_stream()') if (present(ierr)) ierr = MPAS_STREAM_MGR_NOERR - ! - ! Remove stream from list - ! - call MPAS_stream_list_remove(manager % streams, streamID, stream, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while removing stream from list') - return - end if + if ( threadNum == 0 ) then + ! + ! Remove stream from list + ! + call MPAS_stream_list_remove(manager % streams, streamID, stream, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while removing stream from list') + return + end if - ! - ! Unlink stream from input alarms - ! - alarm_cursor => stream % alarmList_in % head - do while (associated(alarm_cursor)) - call MPAS_stream_list_remove(alarm_cursor % xref % streamList, streamID, delete_me, ierr=err_local) - if (err_local == MPAS_STREAM_LIST_NOERR) then - deallocate(delete_me) - else - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while removing stream from list of input alarm') - return - end if - alarm_cursor => alarm_cursor % next - end do + ! + ! Unlink stream from input alarms + ! + alarm_cursor => stream % alarmList_in % head + do while (associated(alarm_cursor)) + call MPAS_stream_list_remove(alarm_cursor % xref % streamList, streamID, delete_me, ierr=err_local) + if (err_local == MPAS_STREAM_LIST_NOERR) then + deallocate(delete_me) + else + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while removing stream from list of input alarm') + return + end if + alarm_cursor => alarm_cursor % next + end do - ! - ! Unlink stream from output alarms - ! - alarm_cursor => stream % alarmList_out % head - do while (associated(alarm_cursor)) - call MPAS_stream_list_remove(alarm_cursor % xref % streamList, streamID, delete_me, ierr=err_local) - if (err_local == MPAS_STREAM_LIST_NOERR) then - deallocate(delete_me) - else - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while removing stream from list of output alarm') - return - end if - alarm_cursor => alarm_cursor % next - end do + ! + ! Unlink stream from output alarms + ! + alarm_cursor => stream % alarmList_out % head + do while (associated(alarm_cursor)) + call MPAS_stream_list_remove(alarm_cursor % xref % streamList, streamID, delete_me, ierr=err_local) + if (err_local == MPAS_STREAM_LIST_NOERR) then + deallocate(delete_me) + else + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while removing stream from list of output alarm') + return + end if + alarm_cursor => alarm_cursor % next + end do - ! - ! Free up stream storage -- reverse of whatever was done when allocating the stream - ! - call MPAS_stream_list_destroy(stream % alarmList_in, ierr=err_local) - call MPAS_stream_list_destroy(stream % alarmList_out, ierr=err_local) - call mpas_pool_destroy_pool(stream % att_pool) - call mpas_pool_destroy_pool(stream % field_pool) - call mpas_pool_destroy_pool(stream % field_pkg_pool) - call mpas_pool_destroy_pool(stream % pkg_pool) - if (associated(stream % referenceTime)) then - deallocate(stream % referenceTime) - end if - if (associated(stream % recordInterval)) then - deallocate(stream % recordInterval) - end if - if (stream % valid) then - call MPAS_closeStream(stream % stream, ierr=err_local) - if (err_local /= MPAS_STREAM_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while closing stream '//trim(stream % name)) - end if - deallocate(stream % stream) + ! + ! Free up stream storage -- reverse of whatever was done when allocating the stream + ! + call MPAS_stream_list_destroy(stream % alarmList_in, ierr=err_local) + call MPAS_stream_list_destroy(stream % alarmList_out, ierr=err_local) + call mpas_pool_destroy_pool(stream % att_pool) + call mpas_pool_destroy_pool(stream % field_pool) + call mpas_pool_destroy_pool(stream % field_pkg_pool) + call mpas_pool_destroy_pool(stream % pkg_pool) + if (associated(stream % referenceTime)) then + deallocate(stream % referenceTime) + end if + if (associated(stream % recordInterval)) then + deallocate(stream % recordInterval) + end if + if (stream % valid) then + call MPAS_closeStream(stream % stream, ierr=err_local) + if (err_local /= MPAS_STREAM_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while closing stream '//trim(stream % name)) + end if + deallocate(stream % stream) + end if + deallocate(stream) + + manager % numStreams = manager % numStreams - 1 end if - deallocate(stream) - - manager % numStreams = manager % numStreams - 1 end subroutine MPAS_stream_mgr_destroy_stream!}}} @@ -537,6 +558,7 @@ end subroutine MPAS_stream_mgr_get_clock !}}} !> structs as well. If the optional 'packages' argument is supplied, the !> specified packages will be attached to all var and var_array members !> added to the stream. + !> NOTE: This routine does not support regular expressions for StreamID ! !----------------------------------------------------------------------- recursive subroutine MPAS_stream_mgr_add_pool(manager, streamID, poolName, packages, ierr)!{{{ @@ -554,7 +576,7 @@ recursive subroutine MPAS_stream_mgr_add_pool(manager, streamID, poolName, packa type (MPAS_stream_list_type), pointer :: stream type (mpas_pool_field_info_type) :: info integer, pointer :: test_ptr - integer :: err_local + integer :: err_local, threadNum type (mpas_pool_type), pointer :: fieldPool type (mpas_pool_iterator_type) :: poolItr @@ -572,93 +594,97 @@ recursive subroutine MPAS_stream_mgr_add_pool(manager, streamID, poolName, packa type (field0DChar), pointer :: char0DField type (field1DChar), pointer :: char1DField + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_add_pool()') if (present(ierr)) ierr = MPAS_STREAM_MGR_NOERR - ! - ! Check that stream exists - ! - if (.not. MPAS_stream_list_query(manager % streams, streamID, stream, ierr=err_local)) then - STREAM_ERROR_WRITE('Requested stream '//trim(streamID)//' does not exist in stream manager') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if + if ( threadNum == 0 ) then + ! + ! Check that stream exists + ! + nullify(stream) + if (.not. MPAS_stream_list_query(manager % streams, streamID, stream, ierr=err_local)) then + STREAM_ERROR_WRITE('Requested stream '//trim(streamID)//' does not exist in stream manager') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if - ! - ! Don't modify an immutable stream - ! - if (stream % immutable) then - STREAM_ERROR_WRITE('Requested stream '//trim(streamID)//' is immutable.') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if + ! + ! Don't modify an immutable stream + ! + if (stream % immutable) then + STREAM_ERROR_WRITE('Requested stream '//trim(streamID)//' is immutable.') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if - ! - ! Check that the pool exists - ! - call mpas_pool_get_subpool(manager % allStructs, poolName, fieldPool) - if (.not. associated(fieldPool) ) then - STREAM_ERROR_WRITE('Requested pool '//trim(poolName)//' does not exist.') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if + ! + ! Check that the pool exists + ! + call mpas_pool_get_subpool(manager % allStructs, poolName, fieldPool) + if (.not. associated(fieldPool) ) then + STREAM_ERROR_WRITE('Requested pool '//trim(poolName)//' does not exist.') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if - ! - ! Iterate over pool, adding each field to the stream, and recursively calling this subroutine for each subpool - ! - call mpas_pool_begin_iteration(fieldPool) - do while (mpas_pool_get_next_member(fieldPool, poolItr)) - if (poolItr % memberType == MPAS_POOL_SUBPOOL) then - STREAM_DEBUG_WRITE('-- Try to add subpool...') - ! call mpas_stream_mgr_add_pool(manager, streamId, poolItr % memberName, packages=packages, ierr=ierr) - else if (poolItr % memberType == MPAS_POOL_FIELD) then - if (poolItr % dataType == MPAS_POOL_REAL) then - if (poolItr % nDims == 0) then - call mpas_pool_get_field(fieldPool, poolItr % memberName, real0DField) - call mpas_stream_mgr_add_field(manager, streamID, real0DField % fieldName, packages=packages, ierr=ierr) - else if (poolItr % nDims == 1) then - call mpas_pool_get_field(fieldPool, poolItr % memberName, real1DField) - call mpas_stream_mgr_add_field(manager, streamID, real1DField % fieldName, packages=packages, ierr=ierr) - else if (poolItr % nDims == 2) then - call mpas_pool_get_field(fieldPool, poolItr % memberName, real2DField) - call mpas_stream_mgr_add_field(manager, streamID, real2DField % fieldName, packages=packages, ierr=ierr) - else if (poolItr % nDims == 3) then - call mpas_pool_get_field(fieldPool, poolItr % memberName, real3DField) - call mpas_stream_mgr_add_field(manager, streamID, real3DField % fieldName, packages=packages, ierr=ierr) - else if (poolItr % nDims == 4) then - call mpas_pool_get_field(fieldPool, poolItr % memberName, real4DField) - call mpas_stream_mgr_add_field(manager, streamID, real4DField % fieldName, packages=packages, ierr=ierr) - else if (poolItr % nDims == 5) then - call mpas_pool_get_field(fieldPool, poolItr % memberName, real5DField) - call mpas_stream_mgr_add_field(manager, streamID, real5DField % fieldName, packages=packages, ierr=ierr) - end if - else if (poolItr % dataType == MPAS_POOL_INTEGER) then - if (poolItr % nDims == 0) then - call mpas_pool_get_field(fieldPool, poolItr % memberName, int0DField) - call mpas_stream_mgr_add_field(manager, streamID, int0DField % fieldName, packages=packages, ierr=ierr) - else if (poolItr % nDims == 1) then - call mpas_pool_get_field(fieldPool, poolItr % memberName, int1DField) - call mpas_stream_mgr_add_field(manager, streamID, int1DField % fieldName, packages=packages, ierr=ierr) - else if (poolItr % nDims == 2) then - call mpas_pool_get_field(fieldPool, poolItr % memberName, int2DField) - call mpas_stream_mgr_add_field(manager, streamID, int2DField % fieldName, packages=packages, ierr=ierr) - else if (poolItr % nDims == 3) then - call mpas_pool_get_field(fieldPool, poolItr % memberName, int3DField) - call mpas_stream_mgr_add_field(manager, streamID, int3DField % fieldName, packages=packages, ierr=ierr) - end if - else if (poolItr % dataType == MPAS_POOL_CHARACTER) then - if (poolItr % nDims == 0) then - call mpas_pool_get_field(fieldPool, poolItr % memberName, char0DField) - call mpas_stream_mgr_add_field(manager, streamID, char0DField % fieldName, packages=packages, ierr=ierr) - else if (poolItr % nDims == 1) then - call mpas_pool_get_field(fieldPool, poolItr % memberName, char1DField) - call mpas_stream_mgr_add_field(manager, streamID, char1DField % fieldName, packages=packages, ierr=ierr) - end if - end if - end if - end do + ! + ! Iterate over pool, adding each field to the stream, and recursively calling this subroutine for each subpool + ! + call mpas_pool_begin_iteration(fieldPool) + do while (mpas_pool_get_next_member(fieldPool, poolItr)) + if (poolItr % memberType == MPAS_POOL_SUBPOOL) then + STREAM_DEBUG_WRITE('-- Try to add subpool...') + ! call mpas_stream_mgr_add_pool(manager, streamId, poolItr % memberName, packages=packages, ierr=ierr) + else if (poolItr % memberType == MPAS_POOL_FIELD) then + if (poolItr % dataType == MPAS_POOL_REAL) then + if (poolItr % nDims == 0) then + call mpas_pool_get_field(fieldPool, poolItr % memberName, real0DField, 1) + call mpas_stream_mgr_add_field(manager, streamID, real0DField % fieldName, packages=packages, ierr=ierr) + else if (poolItr % nDims == 1) then + call mpas_pool_get_field(fieldPool, poolItr % memberName, real1DField, 1) + call mpas_stream_mgr_add_field(manager, streamID, real1DField % fieldName, packages=packages, ierr=ierr) + else if (poolItr % nDims == 2) then + call mpas_pool_get_field(fieldPool, poolItr % memberName, real2DField, 1) + call mpas_stream_mgr_add_field(manager, streamID, real2DField % fieldName, packages=packages, ierr=ierr) + else if (poolItr % nDims == 3) then + call mpas_pool_get_field(fieldPool, poolItr % memberName, real3DField, 1) + call mpas_stream_mgr_add_field(manager, streamID, real3DField % fieldName, packages=packages, ierr=ierr) + else if (poolItr % nDims == 4) then + call mpas_pool_get_field(fieldPool, poolItr % memberName, real4DField, 1) + call mpas_stream_mgr_add_field(manager, streamID, real4DField % fieldName, packages=packages, ierr=ierr) + else if (poolItr % nDims == 5) then + call mpas_pool_get_field(fieldPool, poolItr % memberName, real5DField, 1) + call mpas_stream_mgr_add_field(manager, streamID, real5DField % fieldName, packages=packages, ierr=ierr) + end if + else if (poolItr % dataType == MPAS_POOL_INTEGER) then + if (poolItr % nDims == 0) then + call mpas_pool_get_field(fieldPool, poolItr % memberName, int0DField, 1) + call mpas_stream_mgr_add_field(manager, streamID, int0DField % fieldName, packages=packages, ierr=ierr) + else if (poolItr % nDims == 1) then + call mpas_pool_get_field(fieldPool, poolItr % memberName, int1DField, 1) + call mpas_stream_mgr_add_field(manager, streamID, int1DField % fieldName, packages=packages, ierr=ierr) + else if (poolItr % nDims == 2) then + call mpas_pool_get_field(fieldPool, poolItr % memberName, int2DField, 1) + call mpas_stream_mgr_add_field(manager, streamID, int2DField % fieldName, packages=packages, ierr=ierr) + else if (poolItr % nDims == 3) then + call mpas_pool_get_field(fieldPool, poolItr % memberName, int3DField, 1) + call mpas_stream_mgr_add_field(manager, streamID, int3DField % fieldName, packages=packages, ierr=ierr) + end if + else if (poolItr % dataType == MPAS_POOL_CHARACTER) then + if (poolItr % nDims == 0) then + call mpas_pool_get_field(fieldPool, poolItr % memberName, char0DField, 1) + call mpas_stream_mgr_add_field(manager, streamID, char0DField % fieldName, packages=packages, ierr=ierr) + else if (poolItr % nDims == 1) then + call mpas_pool_get_field(fieldPool, poolItr % memberName, char1DField, 1) + call mpas_stream_mgr_add_field(manager, streamID, char1DField % fieldName, packages=packages, ierr=ierr) + end if + end if + end if + end do + end if end subroutine MPAS_stream_mgr_add_pool!}}} @@ -673,6 +699,7 @@ end subroutine MPAS_stream_mgr_add_pool!}}} !> Adds a field from the allFields pool to a stream. If the optional !> argument 'packages' is present, those packages will be attached to !> the field as well. + !> NOTE: This routine does not support regular expressions for StreamID ! !----------------------------------------------------------------------- subroutine MPAS_stream_mgr_add_field(manager, streamID, fieldName, packages, ierr)!{{{ @@ -693,79 +720,83 @@ subroutine MPAS_stream_mgr_add_field(manager, streamID, fieldName, packages, ier integer, pointer :: test_ptr logical :: test_logical integer :: err_level - integer :: err_local + integer :: err_local, threadNum + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_add_field()') if (present(ierr)) ierr = MPAS_STREAM_MGR_NOERR - ! - ! Check that stream exists - ! - if (.not. MPAS_stream_list_query(manager % streams, streamID, stream, ierr=err_local)) then - STREAM_ERROR_WRITE('Requested stream '//trim(streamID)//' does not exist in stream manager') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if + if ( threadNum == 0 ) then + ! + ! Check that stream exists + ! + nullify(stream) + if (.not. MPAS_stream_list_query(manager % streams, streamID, stream, ierr=err_local)) then + STREAM_ERROR_WRITE('Requested stream '//trim(streamID)//' does not exist in stream manager') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if - ! - ! Don't modify an immutable stream - ! - if (stream % immutable) then - STREAM_ERROR_WRITE('Requested stream '//trim(streamID)//' is immutable.') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if + ! + ! Don't modify an immutable stream + ! + if (stream % immutable) then + STREAM_ERROR_WRITE('Requested stream '//trim(streamID)//' is immutable.') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if - ! - ! Check that field exists - ! - info % nDims = -1 - call mpas_pool_get_field_info(manager % allFields, fieldName, info) - if (info % nDims == -1) then - STREAM_ERROR_WRITE('Requested field '//trim(fieldName)//' not available') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if + ! + ! Check that field exists + ! + info % nDims = -1 + call mpas_pool_get_field_info(manager % allFields, fieldName, info) + if (info % nDims == -1) then + STREAM_ERROR_WRITE('Requested field '//trim(fieldName)//' not available') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if - ! - ! Check that the field does not already exist in the stream - ! - nullify(test_ptr) - err_level = mpas_pool_get_error_level() - call mpas_pool_set_error_level(MPAS_POOL_SILENT) - call mpas_pool_get_config(stream % field_pool, fieldName, value=test_ptr) - call mpas_pool_set_error_level(err_level) - if (associated(test_ptr)) then - STREAM_ERROR_WRITE('Requested field '//trim(fieldName)//' already in stream '//trim(streamID)) - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if + ! + ! Check that the field does not already exist in the stream + ! + nullify(test_ptr) + err_level = mpas_pool_get_error_level() + call mpas_pool_set_error_level(MPAS_POOL_SILENT) + call mpas_pool_get_config(stream % field_pool, fieldName, value=test_ptr) + call mpas_pool_set_error_level(err_level) + if (associated(test_ptr)) then + STREAM_ERROR_WRITE('Requested field '//trim(fieldName)//' already in stream '//trim(streamID)) + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if - ! - ! Validate packages - ! - if (present(packages)) then - test_logical = parse_package_list(manager % allPackages, trim(packages), err_local) - if (err_local /= 0) then - STREAM_WARNING_WRITE('One or more packages in '''//trim(packages)//''' attached to field '''//trim(fieldName)//''' is undefined') - end if - end if + ! + ! Validate packages + ! + if (present(packages)) then + test_logical = parse_package_list(manager % allPackages, trim(packages), err_local) + if (err_local /= 0) then + STREAM_WARNING_WRITE('One or more packages in '''//trim(packages)//''' attached to field '''//trim(fieldName)//''' is undefined') + end if + end if - ! - ! Add field to field pool in stream if the field is activated - ! - if (info % isActive) then - call mpas_pool_add_config(stream % field_pool, fieldName, 1) + ! + ! Add field to field pool in stream if the field is activated + ! + if (info % isActive) then + call mpas_pool_add_config(stream % field_pool, fieldName, 1) - if (present(packages)) then - STREAM_DEBUG_WRITE('-- Attaching packages '//trim(packages)//' to field '//trim(fieldName)//' in stream '//trim(streamID)) - write(field_pkg,'(a)') trim(fieldName)//':packages' - call mpas_pool_add_config(stream % field_pkg_pool, field_pkg, packages) - end if - else - write(stderrUnit, *) ' * Requested field '//trim(fieldName)//' is deactivated due to packages, or is a scratch variable.' + if (present(packages)) then + STREAM_DEBUG_WRITE('-- Attaching packages '//trim(packages)//' to field '//trim(fieldName)//' in stream '//trim(streamID)) + write(field_pkg,'(a)') trim(fieldName)//':packages' + call mpas_pool_add_config(stream % field_pkg_pool, field_pkg, packages) + end if + else + write(stderrUnit, *) ' * Requested field '//trim(fieldName)//' is deactivated due to packages, or is a scratch variable.' + end if end if end subroutine MPAS_stream_mgr_add_field!}}} @@ -782,6 +813,7 @@ end subroutine MPAS_stream_mgr_add_field!}}} !> Both streams need to exist within the same stream manager. If the !> optional 'packages' argument is supplied, the specified packages will be !> attached to all fields added from refStreamID. + !> NOTE: This routine does not support regular expressions for StreamID ! !----------------------------------------------------------------------- subroutine MPAS_stream_mgr_add_stream_fields(manager, streamID, refStreamID, packages, ierr)!{{{ @@ -803,8 +835,9 @@ subroutine MPAS_stream_mgr_add_stream_fields(manager, streamID, refStreamID, pac integer, pointer :: test_ptr logical :: test_logical integer :: err_level - integer :: err_local + integer :: err_local, threadNum + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_add_stream_fields()') @@ -813,6 +846,7 @@ subroutine MPAS_stream_mgr_add_stream_fields(manager, streamID, refStreamID, pac ! ! Check that reference stream exists ! + nullify(refStream) if (.not. MPAS_stream_list_query(manager % streams, refStreamID, refStream, ierr=err_local)) then STREAM_ERROR_WRITE('Requested reference stream '//trim(refStreamID)//' does not exist in stream manager') if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR @@ -822,6 +856,7 @@ subroutine MPAS_stream_mgr_add_stream_fields(manager, streamID, refStreamID, pac ! ! Check that stream exists ! + nullify(stream) if (.not. MPAS_stream_list_query(manager % streams, streamID, stream, ierr=err_local)) then STREAM_ERROR_WRITE('Requested stream '//trim(streamID)//' does not exist in stream manager') if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR @@ -853,46 +888,48 @@ subroutine MPAS_stream_mgr_add_stream_fields(manager, streamID, refStreamID, pac err_level = mpas_pool_get_error_level() call mpas_pool_set_error_level(MPAS_POOL_SILENT) - call mpas_pool_begin_iteration(refStream % field_pool) - do while (mpas_pool_get_next_member(refStream % field_pool, itr)) - if ( itr % memberType == MPAS_POOL_CONFIG ) then - if ( itr % dataType == MPAS_POOL_INTEGER ) then - - ! - ! Check that field exists - ! - info % nDims = -1 - call mpas_pool_get_field_info(manager % allFields, itr % memberName, info) - if (info % nDims == -1) then - STREAM_ERROR_WRITE('Requested field '//trim(itr % memberName)//' not available') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if - - ! Test that the field does not already exist in stream - nullify(test_ptr) - call mpas_pool_get_config(stream % field_pool, itr % memberName, value=test_ptr) - - if ( associated(test_ptr) ) then - STREAM_ERROR_WRITE('Requested field '//trim(itr % memberName)//' already in stream '//trim(streamID)) - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - end if - - if ( info % isActive ) then - call mpas_pool_add_config(stream % field_pool, itr % memberName, 1) - - if (present(packages)) then - STREAM_DEBUG_WRITE('-- Attaching packages '//trim(packages)//' to field '//trim(itr % memberName)//' in stream '//trim(streamID)) - write(field_pkg,'(a)') trim(itr % memberName)//':packages' - call mpas_pool_add_config(stream % field_pkg_pool, field_pkg, packages) - end if - else - write(stderrUnit, *) ' * Requested field '//trim(itr % memberName)//' is deactivated due to packages, or is a scratch variable.' - end if - - end if - end if - end do + if ( threadNum == 0 ) then + call mpas_pool_begin_iteration(refStream % field_pool) + do while (mpas_pool_get_next_member(refStream % field_pool, itr)) + if ( itr % memberType == MPAS_POOL_CONFIG ) then + if ( itr % dataType == MPAS_POOL_INTEGER ) then + + ! + ! Check that field exists + ! + info % nDims = -1 + call mpas_pool_get_field_info(manager % allFields, itr % memberName, info) + if (info % nDims == -1) then + STREAM_ERROR_WRITE('Requested field '//trim(itr % memberName)//' not available') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if + + ! Test that the field does not already exist in stream + nullify(test_ptr) + call mpas_pool_get_config(stream % field_pool, itr % memberName, value=test_ptr) + + if ( associated(test_ptr) ) then + STREAM_ERROR_WRITE('Requested field '//trim(itr % memberName)//' already in stream '//trim(streamID)) + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + end if + + if ( info % isActive ) then + call mpas_pool_add_config(stream % field_pool, itr % memberName, 1) + + if (present(packages)) then + STREAM_DEBUG_WRITE('-- Attaching packages '//trim(packages)//' to field '//trim(itr % memberName)//' in stream '//trim(streamID)) + write(field_pkg,'(a)') trim(itr % memberName)//':packages' + call mpas_pool_add_config(stream % field_pkg_pool, field_pkg, packages) + end if + else + write(stderrUnit, *) ' * Requested field '//trim(itr % memberName)//' is deactivated due to packages, or is a scratch variable.' + end if + + end if + end if + end do + end if call mpas_pool_set_error_level(err_level) end subroutine MPAS_stream_mgr_add_stream_fields!}}} @@ -906,6 +943,7 @@ end subroutine MPAS_stream_mgr_add_stream_fields!}}} !> \date 13 June 2014 !> \details !> Removes a field from a stream. + !> NOTE: This routine does not support regular expressions for StreamID ! !----------------------------------------------------------------------- subroutine MPAS_stream_mgr_remove_field(manager, streamID, fieldName, ierr)!{{{ @@ -921,8 +959,9 @@ subroutine MPAS_stream_mgr_remove_field(manager, streamID, fieldName, ierr)!{{{ type (MPAS_stream_list_type), pointer :: stream integer, pointer :: test_ptr - integer :: err_local + integer :: err_local, threadNum + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_remove_field()') @@ -931,6 +970,7 @@ subroutine MPAS_stream_mgr_remove_field(manager, streamID, fieldName, ierr)!{{{ ! ! Check that stream exists ! + nullify(stream) if (.not. MPAS_stream_list_query(manager % streams, streamID, stream, ierr=err_local)) then STREAM_ERROR_WRITE('Requested stream '//trim(streamID)//' does not exist in stream manager') if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR @@ -960,7 +1000,9 @@ subroutine MPAS_stream_mgr_remove_field(manager, streamID, fieldName, ierr)!{{{ ! ! Remove field from stream's field pool ! - call mpas_pool_remove_config(stream % field_pool, fieldName) + if ( threadNum == 0 ) then + call mpas_pool_remove_config(stream % field_pool, fieldName) + end if end subroutine MPAS_stream_mgr_remove_field!}}} @@ -981,6 +1023,7 @@ end subroutine MPAS_stream_mgr_remove_field!}}} !> will be added to the alarm pool, along with an integer that has the same !> name as the stream whose value will represent the direction the stream !> will be handled when this alarm rings. + !> NOTE: This routine does not support regular expressions for StreamID ! !----------------------------------------------------------------------- subroutine MPAS_stream_mgr_add_alarm(manager, streamID, alarmID, direction, ierr)!{{{ @@ -996,8 +1039,9 @@ subroutine MPAS_stream_mgr_add_alarm(manager, streamID, alarmID, direction, ierr integer, intent(out), optional :: ierr type (MPAS_stream_list_type), pointer :: stream, new_alarm, new_xref - integer :: err_local + integer :: err_local, threadNum + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_add_alarm()') @@ -1006,6 +1050,7 @@ subroutine MPAS_stream_mgr_add_alarm(manager, streamID, alarmID, direction, ierr ! ! Check that stream exists ! + nullify(stream) if (.not. MPAS_stream_list_query(manager % streams, streamID, stream, ierr=err_local)) then STREAM_ERROR_WRITE('Requested stream '//trim(streamID)//' does not exist in stream manager') if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR @@ -1039,6 +1084,7 @@ subroutine MPAS_stream_mgr_add_alarm(manager, streamID, alarmID, direction, ierr ! Check that the alarm does not already exist for the stream in the specified direction ! if (direction == MPAS_STREAM_INPUT .or. direction == MPAS_STREAM_INPUT_OUTPUT) then + nullify(new_alarm) if (MPAS_stream_list_query(stream % alarmList_in, alarmID, new_alarm, ierr=err_local)) then STREAM_ERROR_WRITE('Requested input alarm '//trim(alarmID)//' already on stream '//trim(streamID)) if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR @@ -1046,6 +1092,7 @@ subroutine MPAS_stream_mgr_add_alarm(manager, streamID, alarmID, direction, ierr end if end if if (direction == MPAS_STREAM_OUTPUT .or. direction == MPAS_STREAM_INPUT_OUTPUT) then + nullify(new_alarm) if (MPAS_stream_list_query(stream % alarmList_out, alarmID, new_alarm, ierr=err_local)) then STREAM_ERROR_WRITE('Requested output alarm '//trim(alarmID)//' already on stream '//trim(streamID)) if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR @@ -1058,94 +1105,98 @@ subroutine MPAS_stream_mgr_add_alarm(manager, streamID, alarmID, direction, ierr ! Add alarm to alarm to the alarms_in and/or alarms_out list ! Add alarm to the alarmList_in and/or alarmList_out list for the field ! - if (direction == MPAS_STREAM_INPUT .or. direction == MPAS_STREAM_INPUT_OUTPUT) then - - ! If alarm is not already defined, we need to create a new alarm node - if (.not. MPAS_stream_list_query(manager % alarms_in, alarmID, new_alarm, ierr=err_local)) then - allocate(new_alarm) - new_alarm % name = alarmID - call MPAS_stream_list_create(new_alarm % streamList, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while creating stream list for alarm') - return - end if - nullify(new_alarm % next) - - call MPAS_stream_list_insert(manager % alarms_in, new_alarm, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while adding input alarm to list') - return - end if - end if - - ! Add specified stream to alarm node stream list - allocate(new_xref) - new_xref % name = streamID - new_xref % xref => stream - call MPAS_stream_list_insert(new_alarm % streamList, new_xref, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while adding stream to alarm stream list') - return - end if - - ! Add alarm to stream alarm list - allocate(new_xref) - new_xref % name = alarmID - new_xref % xref => new_alarm - call MPAS_stream_list_insert(stream % alarmList_in, new_xref, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while adding alarm to stream input alarm list') - return - end if - end if - - if (direction == MPAS_STREAM_OUTPUT .or. direction == MPAS_STREAM_INPUT_OUTPUT) then - - ! If alarm is not already defined, we need to create a new alarm node - if (.not. MPAS_stream_list_query(manager % alarms_out, alarmID, new_alarm, ierr=err_local)) then - allocate(new_alarm) - new_alarm % name = alarmID - call MPAS_stream_list_create(new_alarm % streamList, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while creating stream list for alarm') - return - end if - nullify(new_alarm % next) - - call MPAS_stream_list_insert(manager % alarms_out, new_alarm, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while adding output alarm to list') - return - end if - end if - - ! Add specified stream to alarm node stream list - allocate(new_xref) - new_xref % name = streamID - new_xref % xref => stream - call MPAS_stream_list_insert(new_alarm % streamList, new_xref, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while adding stream to alarm stream list') - return - end if - - ! Add alarm to stream alarm list - allocate(new_xref) - new_xref % name = alarmID - new_xref % xref => new_alarm - call MPAS_stream_list_insert(stream % alarmList_out, new_xref, ierr=err_local) - if (err_local /= MPAS_STREAM_LIST_NOERR) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Problems while adding alarm to stream output alarm list') - return - end if + if ( threadNum == 0 ) then + if (direction == MPAS_STREAM_INPUT .or. direction == MPAS_STREAM_INPUT_OUTPUT) then + + ! If alarm is not already defined, we need to create a new alarm node + nullify(new_alarm) + if (.not. MPAS_stream_list_query(manager % alarms_in, alarmID, new_alarm, ierr=err_local)) then + allocate(new_alarm) + new_alarm % name = alarmID + call MPAS_stream_list_create(new_alarm % streamList, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while creating stream list for alarm') + return + end if + nullify(new_alarm % next) + + call MPAS_stream_list_insert(manager % alarms_in, new_alarm, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while adding input alarm to list') + return + end if + end if + + ! Add specified stream to alarm node stream list + allocate(new_xref) + new_xref % name = streamID + new_xref % xref => stream + call MPAS_stream_list_insert(new_alarm % streamList, new_xref, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while adding stream to alarm stream list') + return + end if + + ! Add alarm to stream alarm list + allocate(new_xref) + new_xref % name = alarmID + new_xref % xref => new_alarm + call MPAS_stream_list_insert(stream % alarmList_in, new_xref, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while adding alarm to stream input alarm list') + return + end if + end if + + if (direction == MPAS_STREAM_OUTPUT .or. direction == MPAS_STREAM_INPUT_OUTPUT) then + + ! If alarm is not already defined, we need to create a new alarm node + nullify(new_alarm) + if (.not. MPAS_stream_list_query(manager % alarms_out, alarmID, new_alarm, ierr=err_local)) then + allocate(new_alarm) + new_alarm % name = alarmID + call MPAS_stream_list_create(new_alarm % streamList, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while creating stream list for alarm') + return + end if + nullify(new_alarm % next) + + call MPAS_stream_list_insert(manager % alarms_out, new_alarm, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while adding output alarm to list') + return + end if + end if + + ! Add specified stream to alarm node stream list + allocate(new_xref) + new_xref % name = streamID + new_xref % xref => stream + call MPAS_stream_list_insert(new_alarm % streamList, new_xref, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while adding stream to alarm stream list') + return + end if + + ! Add alarm to stream alarm list + allocate(new_xref) + new_xref % name = alarmID + new_xref % xref => new_alarm + call MPAS_stream_list_insert(stream % alarmList_out, new_xref, ierr=err_local) + if (err_local /= MPAS_STREAM_LIST_NOERR) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Problems while adding alarm to stream output alarm list') + return + end if + end if end if end subroutine MPAS_stream_mgr_add_alarm!}}} @@ -1160,6 +1211,7 @@ end subroutine MPAS_stream_mgr_add_alarm!}}} !> \details !> This routine will remove the association of a stream to an alarm from !> the stream manager. + !> NOTE: This routine does not support regular expressions for StreamID ! !----------------------------------------------------------------------- subroutine MPAS_stream_mgr_remove_alarm(manager, streamID, alarmID, direction, ierr)!{{{ @@ -1177,8 +1229,9 @@ subroutine MPAS_stream_mgr_remove_alarm(manager, streamID, alarmID, direction, i type (MPAS_stream_list_type), pointer :: stream type (MPAS_stream_list_type), pointer :: alarmNode type (MPAS_stream_list_type), pointer :: streamNode - integer :: err_local + integer :: err_local, threadNum + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_remove_alarm()') @@ -1187,56 +1240,59 @@ subroutine MPAS_stream_mgr_remove_alarm(manager, streamID, alarmID, direction, i ! ! Check that stream exists ! + nullify(stream) if (.not. MPAS_stream_list_query(manager % streams, streamID, stream, ierr=err_local)) then STREAM_ERROR_WRITE('Requested stream '//trim(streamID)//' does not exist in stream manager') if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR return end if - ! - ! Unlink alarm from alarmList_in or alarmList_out for stream - ! - nullify(alarmNode) - if (direction == MPAS_STREAM_INPUT) then - call MPAS_stream_list_remove(stream % alarmList_in, alarmID, alarmNode, ierr=ierr) - else if (direction == MPAS_STREAM_OUTPUT) then - call MPAS_stream_list_remove(stream % alarmList_out, alarmID, alarmNode, ierr=ierr) - else - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Requested to remove alarm from invalid direction from stream '//trim(streamID)) - return - end if + if ( threadNum == 0 ) then + ! + ! Unlink alarm from alarmList_in or alarmList_out for stream + ! + nullify(alarmNode) + if (direction == MPAS_STREAM_INPUT) then + call MPAS_stream_list_remove(stream % alarmList_in, alarmID, alarmNode, ierr=ierr) + else if (direction == MPAS_STREAM_OUTPUT) then + call MPAS_stream_list_remove(stream % alarmList_out, alarmID, alarmNode, ierr=ierr) + else + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Requested to remove alarm from invalid direction from stream '//trim(streamID)) + return + end if - ! - ! Remove stream from alarm's streamList in alarms_in or alarms_out - ! - if (associated(alarmNode)) then - call MPAS_stream_list_remove(alarmNode % xref % streamList, streamID, streamNode, ierr=ierr) - else - if (direction == MPAS_STREAM_INPUT) then - STREAM_ERROR_WRITE('Input alarm '//trim(alarmID)//' does not exist on stream '//trim(streamID)) - else - STREAM_ERROR_WRITE('Output alarm '//trim(alarmID)//' does not exist on stream '//trim(streamID)) - end if - return - end if - if (.not. associated(streamNode)) then - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - STREAM_ERROR_WRITE('Alarm '//trim(alarmID)//' does not have stream '//trim(streamID)//' on its stream list.') - return - end if + ! + ! Remove stream from alarm's streamList in alarms_in or alarms_out + ! + if (associated(alarmNode)) then + call MPAS_stream_list_remove(alarmNode % xref % streamList, streamID, streamNode, ierr=ierr) + else + if (direction == MPAS_STREAM_INPUT) then + STREAM_ERROR_WRITE('Input alarm '//trim(alarmID)//' does not exist on stream '//trim(streamID)) + else + STREAM_ERROR_WRITE('Output alarm '//trim(alarmID)//' does not exist on stream '//trim(streamID)) + end if + return + end if + if (.not. associated(streamNode)) then + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + STREAM_ERROR_WRITE('Alarm '//trim(alarmID)//' does not have stream '//trim(streamID)//' on its stream list.') + return + end if - ! - ! If the alarm has no associated streams, should we remove it from alarms_in or alarms_out? - ! - if (MPAS_stream_list_length(alarmNode % xref % streamList) == 0) then - if (direction == MPAS_STREAM_INPUT) then - STREAM_ERROR_WRITE('Input alarm '//trim(alarmID)//' has no associated streams and will be deleted.') - call MPAS_stream_list_remove(manager % alarms_in, alarmID, alarmNode, ierr=ierr) - else - STREAM_ERROR_WRITE('Output alarm '//trim(alarmID)//' has no associated streams and will be deleted.') - call MPAS_stream_list_remove(manager % alarms_out, alarmID, alarmNode, ierr=ierr) - end if + ! + ! If the alarm has no associated streams, should we remove it from alarms_in or alarms_out? + ! + if (MPAS_stream_list_length(alarmNode % xref % streamList) == 0) then + if (direction == MPAS_STREAM_INPUT) then + STREAM_ERROR_WRITE('Input alarm '//trim(alarmID)//' has no associated streams and will be deleted.') + call MPAS_stream_list_remove(manager % alarms_in, alarmID, alarmNode, ierr=ierr) + else + STREAM_ERROR_WRITE('Output alarm '//trim(alarmID)//' has no associated streams and will be deleted.') + call MPAS_stream_list_remove(manager % alarms_out, alarmID, alarmNode, ierr=ierr) + end if + end if end if end subroutine MPAS_stream_mgr_remove_alarm!}}} @@ -1250,7 +1306,8 @@ end subroutine MPAS_stream_mgr_remove_alarm!}}} !> \date 2 September 2014 !> \details !> Resets all alarms used by the stream manager. If the optional argument - !> 'streamID' is provided, only alarms associated with that stream will be + !> 'streamID' is provided, only alarms associated with streams that match + !> the 'streamID' regular expression will be !> reset. If the optional 'direction' argument is provided, only alarms !> associated with that direction will be reset. ! @@ -1267,8 +1324,10 @@ subroutine MPAS_stream_mgr_reset_alarms(manager, streamID, direction, ierr)!{{{ type (MPAS_stream_list_type), pointer :: stream type (MPAS_stream_list_type), pointer :: alarm_cursor integer :: local_direction - integer :: local_ierr + integer :: local_ierr, threadNum + logical :: resetAlarms + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_reset_alarms()') @@ -1285,46 +1344,64 @@ subroutine MPAS_stream_mgr_reset_alarms(manager, streamID, direction, ierr)!{{{ end if - ! - ! Check for optional streamID argument; default is to handle all alarms in the manager. - ! - nullify(stream) - if (present(streamID)) then - if (.not. MPAS_stream_list_query(manager % streams, streamID, stream, ierr=local_ierr)) then - STREAM_ERROR_WRITE('Stream '//trim(streamID)//' does not exist in stream manager.') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if - end if + if ( threadNum == 0 ) then + ! + ! Check for optional streamID argument; default is to handle all alarms in the manager. + ! + nullify(stream) + if (present(streamID)) then + resetAlarms = .false. + do while (MPAS_stream_list_query(manager % streams, streamID, stream, ierr=local_ierr)) + resetAlarms = .true. + if (local_direction == MPAS_STREAM_INPUT .or. local_direction == MPAS_STREAM_INPUT_OUTPUT) then + alarm_cursor => stream % alarmList_in % head + do while (associated(alarm_cursor)) + if (mpas_is_alarm_ringing(manager % streamClock, alarm_cursor % name, ierr=local_ierr)) then + call mpas_reset_clock_alarm(manager % streamClock, alarm_cursor % name, ierr=local_ierr) + end if + alarm_cursor => alarm_cursor % next + end do + end if - - if (local_direction == MPAS_STREAM_INPUT .or. local_direction == MPAS_STREAM_INPUT_OUTPUT) then - if (associated(stream)) then - alarm_cursor => stream % alarmList_in % head + if (local_direction == MPAS_STREAM_OUTPUT .or. local_direction == MPAS_STREAM_INPUT_OUTPUT) then + alarm_cursor => stream % alarmList_out % head + do while (associated(alarm_cursor)) + if (mpas_is_alarm_ringing(manager % streamClock, alarm_cursor % name, ierr=local_ierr)) then + call mpas_reset_clock_alarm(manager % streamClock, alarm_cursor % name, ierr=local_ierr) + end if + alarm_cursor => alarm_cursor % next + end do + end if + end do + + if ( .not. resetAlarms ) then + STREAM_ERROR_WRITE('No stream matching '//trim(streamID)//' exists in stream manager.') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if else - alarm_cursor => manager % alarms_in % head - end if - do while (associated(alarm_cursor)) - if (mpas_is_alarm_ringing(manager % streamClock, alarm_cursor % name, ierr=local_ierr)) then - call mpas_reset_clock_alarm(manager % streamClock, alarm_cursor % name, ierr=local_ierr) + + if (local_direction == MPAS_STREAM_INPUT .or. local_direction == MPAS_STREAM_INPUT_OUTPUT) then + alarm_cursor => manager % alarms_in % head + do while (associated(alarm_cursor)) + if (mpas_is_alarm_ringing(manager % streamClock, alarm_cursor % name, ierr=local_ierr)) then + call mpas_reset_clock_alarm(manager % streamClock, alarm_cursor % name, ierr=local_ierr) + end if + alarm_cursor => alarm_cursor % next + end do end if - alarm_cursor => alarm_cursor % next - end do - end if - if (local_direction == MPAS_STREAM_OUTPUT .or. local_direction == MPAS_STREAM_INPUT_OUTPUT) then - if (associated(stream)) then - alarm_cursor => stream % alarmList_out % head - else - alarm_cursor => manager % alarms_out % head - end if - do while (associated(alarm_cursor)) - if (mpas_is_alarm_ringing(manager % streamClock, alarm_cursor % name, ierr=local_ierr)) then - call mpas_reset_clock_alarm(manager % streamClock, alarm_cursor % name, ierr=local_ierr) + if (local_direction == MPAS_STREAM_OUTPUT .or. local_direction == MPAS_STREAM_INPUT_OUTPUT) then + alarm_cursor => manager % alarms_out % head + do while (associated(alarm_cursor)) + if (mpas_is_alarm_ringing(manager % streamClock, alarm_cursor % name, ierr=local_ierr)) then + call mpas_reset_clock_alarm(manager % streamClock, alarm_cursor % name, ierr=local_ierr) + end if + alarm_cursor => alarm_cursor % next + end do end if - alarm_cursor => alarm_cursor % next - end do + end if end if end subroutine MPAS_stream_mgr_reset_alarms!}}} @@ -1342,6 +1419,7 @@ end subroutine MPAS_stream_mgr_reset_alarms!}}} !> the optional argument 'direction' is given, only alarms for the specified !> direction are tested. If any of the tested alarms is ringing, the function !> returns .true.; otherwise, it returns .false.. + !> Note: This function doesn't support streamID regular expressions ! !----------------------------------------------------------------------- logical function MPAS_stream_mgr_ringing_alarms(manager, streamID, direction, ierr) !{{{ @@ -1356,8 +1434,9 @@ logical function MPAS_stream_mgr_ringing_alarms(manager, streamID, direction, ie type (MPAS_stream_list_type), pointer :: stream type (MPAS_stream_list_type), pointer :: alarm_cursor integer :: local_direction - integer :: local_ierr + integer :: local_ierr, threadNum + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_ringing_alarms()') @@ -1389,35 +1468,37 @@ logical function MPAS_stream_mgr_ringing_alarms(manager, streamID, direction, ie end if - if (local_direction == MPAS_STREAM_INPUT .or. local_direction == MPAS_STREAM_INPUT_OUTPUT) then - if (associated(stream)) then - alarm_cursor => stream % alarmList_in % head - else - alarm_cursor => manager % alarms_in % head - end if - do while (associated(alarm_cursor)) - if (mpas_is_alarm_ringing(manager % streamClock, alarm_cursor % name, ierr=local_ierr)) then - MPAS_stream_mgr_ringing_alarms = .true. - return - end if - alarm_cursor => alarm_cursor % next - end do - end if + if ( threadNum == 0 ) then + if (local_direction == MPAS_STREAM_INPUT .or. local_direction == MPAS_STREAM_INPUT_OUTPUT) then + if (associated(stream)) then + alarm_cursor => stream % alarmList_in % head + else + alarm_cursor => manager % alarms_in % head + end if + do while (associated(alarm_cursor)) + if (mpas_is_alarm_ringing(manager % streamClock, alarm_cursor % name, ierr=local_ierr)) then + MPAS_stream_mgr_ringing_alarms = .true. + return + end if + alarm_cursor => alarm_cursor % next + end do + end if - if (local_direction == MPAS_STREAM_OUTPUT .or. local_direction == MPAS_STREAM_INPUT_OUTPUT) then - if (associated(stream)) then - alarm_cursor => stream % alarmList_out % head - else - alarm_cursor => manager % alarms_out % head - end if - do while (associated(alarm_cursor)) - if (mpas_is_alarm_ringing(manager % streamClock, alarm_cursor % name, ierr=local_ierr)) then - MPAS_stream_mgr_ringing_alarms = .true. - return - end if - alarm_cursor => alarm_cursor % next - end do + if (local_direction == MPAS_STREAM_OUTPUT .or. local_direction == MPAS_STREAM_INPUT_OUTPUT) then + if (associated(stream)) then + alarm_cursor => stream % alarmList_out % head + else + alarm_cursor => manager % alarms_out % head + end if + do while (associated(alarm_cursor)) + if (mpas_is_alarm_ringing(manager % streamClock, alarm_cursor % name, ierr=local_ierr)) then + MPAS_stream_mgr_ringing_alarms = .true. + return + end if + alarm_cursor => alarm_cursor % next + end do + end if end if end function MPAS_stream_mgr_ringing_alarms !}}} @@ -1447,7 +1528,10 @@ subroutine MPAS_stream_mgr_set_property_int(manager, streamID, propertyName, pro integer, intent(out), optional :: ierr type (MPAS_stream_list_type), pointer :: stream_cursor - integer :: err_local + integer :: err_local, threadNum + logical :: setProperties + + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_set_property()') @@ -1456,31 +1540,41 @@ subroutine MPAS_stream_mgr_set_property_int(manager, streamID, propertyName, pro ! ! Find requested stream ! - if (.not. MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) then - STREAM_ERROR_WRITE('Stream '//trim(streamID)//' does not exist in call to MPAS_stream_mgr_set_property().') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if + setProperties = .false. + nullify(stream_cursor) + do while (MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) + setProperties = .true. + ! + ! Set property + ! + if ( threadNum == 0 ) then + select case (propertyName) - ! - ! Set property - ! - select case (propertyName) + case (MPAS_STREAM_PROPERTY_PRECISION) + stream_cursor % precision = propertyValue - case (MPAS_STREAM_PROPERTY_PRECISION) - stream_cursor % precision = propertyValue + case (MPAS_STREAM_PROPERTY_CLOBBER) + stream_cursor % clobber_mode = propertyValue - case (MPAS_STREAM_PROPERTY_CLOBBER) - stream_cursor % clobber_mode = propertyValue + case (MPAS_STREAM_PROPERTY_IOTYPE) + stream_cursor % io_type = propertyValue - case (MPAS_STREAM_PROPERTY_IOTYPE) - stream_cursor % io_type = propertyValue + case default + STREAM_ERROR_WRITE('MPAS_stream_mgr_set_property(): No such property ' COMMA propertyName) + STREAM_ERROR_WRITE(' or specified property is not of type integer.') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + end select + end if + end do + + call mpas_threading_barrier() + + if ( .not. setProperties ) then + STREAM_ERROR_WRITE('No stream matching '//trim(streamID)//' exists in call to MPAS_stream_mgr_set_property().') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if - case default - STREAM_ERROR_WRITE('MPAS_stream_mgr_set_property(): No such property ' COMMA propertyName) - STREAM_ERROR_WRITE(' or specified property is not of type integer.') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - end select end subroutine MPAS_stream_mgr_set_property_int !}}} @@ -1509,7 +1603,10 @@ subroutine MPAS_stream_mgr_set_property_char(manager, streamID, propertyName, pr integer, intent(out), optional :: ierr type (MPAS_stream_list_type), pointer :: stream_cursor - integer :: err_local + integer :: err_local, threadNum + logical :: setProperties + + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_set_property()') @@ -1518,41 +1615,51 @@ subroutine MPAS_stream_mgr_set_property_char(manager, streamID, propertyName, pr ! ! Find requested stream ! - if (.not. MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) then - STREAM_ERROR_WRITE('Stream '//trim(streamID)//' does not exist in call to MPAS_stream_mgr_set_property().') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if + setProperties = .false. + nullify(stream_cursor) + do while (MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) + setProperties = .true. + if ( threadNum == 0 ) then + ! + ! Set property + ! + select case (propertyName) + + case (MPAS_STREAM_PROPERTY_FILENAME) +!TODO: ensure that filename does not contain ':' characters, which PNETCDF does not like... + stream_cursor % filename_template = propertyValue - ! - ! Set property - ! - select case (propertyName) + case (MPAS_STREAM_PROPERTY_FILENAME_INTV) + stream_cursor % filename_interval = propertyValue - case (MPAS_STREAM_PROPERTY_FILENAME) -!TODO: ensure that filename does not contain ':' characters, which PNETCDF does not like... - stream_cursor % filename_template = propertyValue + case (MPAS_STREAM_PROPERTY_REF_TIME) + call mpas_set_time(stream_cursor % referenceTime, dateTimeString=propertyValue) - case (MPAS_STREAM_PROPERTY_FILENAME_INTV) - stream_cursor % filename_interval = propertyValue + case (MPAS_STREAM_PROPERTY_RECORD_INTV) + + ! The interval between records may not have been allocated if the optional recordInterval + ! argument was not provided when the stream was created + if (.not. associated(stream_cursor % recordInterval)) then + allocate(stream_cursor % recordInterval) + end if + call mpas_set_timeInterval(stream_cursor % recordInterval, timeString=propertyValue) + + case default + STREAM_ERROR_WRITE(' MPAS_stream_mgr_set_property(): No such property ' COMMA propertyName) + STREAM_ERROR_WRITE(' or specified property is not of type character.') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + end select + end if + end do - case (MPAS_STREAM_PROPERTY_REF_TIME) - call mpas_set_time(stream_cursor % referenceTime, dateTimeString=propertyValue) + call mpas_threading_barrier() - case (MPAS_STREAM_PROPERTY_RECORD_INTV) - - ! The interval between records may not have been allocated if the optional recordInterval - ! argument was not provided when the stream was created - if (.not. associated(stream_cursor % recordInterval)) then - allocate(stream_cursor % recordInterval) - end if - call mpas_set_timeInterval(stream_cursor % recordInterval, timeString=propertyValue) + if ( .not. setProperties ) then + STREAM_ERROR_WRITE('No stream matching '//trim(streamID)//' exists in call to MPAS_stream_mgr_set_property().') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if - case default - STREAM_ERROR_WRITE(' MPAS_stream_mgr_set_property(): No such property ' COMMA propertyName) - STREAM_ERROR_WRITE(' or specified property is not of type character.') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - end select end subroutine MPAS_stream_mgr_set_property_char !}}} @@ -1581,7 +1688,10 @@ subroutine MPAS_stream_mgr_set_property_logical(manager, streamID, propertyName, integer, intent(out), optional :: ierr type (MPAS_stream_list_type), pointer :: stream_cursor - integer :: err_local + integer :: err_local, threadNum + logical :: setProperties + + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_set_property()') @@ -1590,28 +1700,39 @@ subroutine MPAS_stream_mgr_set_property_logical(manager, streamID, propertyName, ! ! Find requested stream ! - if (.not. MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) then - STREAM_ERROR_WRITE('Stream '//trim(streamID)//' does not exist in call to MPAS_stream_mgr_set_property().') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if + setProperties = .false. + nullify(stream_cursor) + do while (MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) + setProperties = .true. - ! - ! Set property - ! - select case (propertyName) + if ( threadNum == 0 ) then + ! + ! Set property + ! + select case (propertyName) - case (MPAS_STREAM_PROPERTY_ACTIVE) - stream_cursor % active_stream = propertyValue + case (MPAS_STREAM_PROPERTY_ACTIVE) + stream_cursor % active_stream = propertyValue - case (MPAS_STREAM_PROPERTY_IMMUTABLE) - stream_cursor % immutable = propertyValue + case (MPAS_STREAM_PROPERTY_IMMUTABLE) + stream_cursor % immutable = propertyValue + + case default + STREAM_ERROR_WRITE(' MPAS_stream_mgr_set_property(): No such property ' COMMA propertyName) + STREAM_ERROR_WRITE(' or specified property is not of type logical.') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + end select + end if + end do + + call mpas_threading_barrier() + + if ( .not. setProperties ) then + STREAM_ERROR_WRITE('No stream matching '//trim(streamID)//' exists in call to MPAS_stream_mgr_set_property().') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if - case default - STREAM_ERROR_WRITE(' MPAS_stream_mgr_set_property(): No such property ' COMMA propertyName) - STREAM_ERROR_WRITE(' or specified property is not of type logical.') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - end select end subroutine MPAS_stream_mgr_set_property_logical !}}} @@ -1624,6 +1745,7 @@ end subroutine MPAS_stream_mgr_set_property_logical !}}} !> \date 13 June 2014 !> \details !> Retrieves the value of a stream property within an MPAS stream manager. + !> NOTE: This routine does not support streamID regular expressions ! !----------------------------------------------------------------------- subroutine MPAS_stream_mgr_get_property_int(manager, streamID, propertyName, propertyValue, direction, ierr) !{{{ @@ -1649,6 +1771,7 @@ subroutine MPAS_stream_mgr_get_property_int(manager, streamID, propertyName, pro ! ! Find requested stream ! + nullify(stream_cursor) if (.not. MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) then STREAM_ERROR_WRITE('Stream '//trim(streamID)//' does not exist in call to MPAS_stream_mgr_get_property().') if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR @@ -1686,6 +1809,7 @@ end subroutine MPAS_stream_mgr_get_property_int !}}} !> \date 13 June 2014 !> \details !> Retrieves the value of a stream property within an MPAS stream manager. + !> NOTE: This routine does not support streamID regular expressions ! !----------------------------------------------------------------------- subroutine MPAS_stream_mgr_get_property_char(manager, streamID, propertyName, propertyValue, direction, ierr) !{{{ @@ -1714,6 +1838,7 @@ subroutine MPAS_stream_mgr_get_property_char(manager, streamID, propertyName, pr ! ! Find requested stream ! + nullify(stream_cursor) if (.not. MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) then STREAM_ERROR_WRITE('Stream '//trim(streamID)//' does not exist in call to MPAS_stream_mgr_get_property().') if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR @@ -1794,6 +1919,7 @@ end subroutine MPAS_stream_mgr_get_property_char !}}} !> \date 13 June 2014 !> \details !> Retrieves the value of a stream property within an MPAS stream manager. + !> NOTE: This routine does not support streamID regular expressions ! !----------------------------------------------------------------------- subroutine MPAS_stream_mgr_get_property_logical(manager, streamID, propertyName, propertyValue, direction, ierr) !{{{ @@ -1819,6 +1945,7 @@ subroutine MPAS_stream_mgr_get_property_logical(manager, streamID, propertyName, ! ! Find requested stream ! + nullify(stream_cursor) if (.not. MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) then STREAM_ERROR_WRITE('Stream '//trim(streamID)//' does not exist in call to MPAS_stream_mgr_get_property().') if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR @@ -1870,6 +1997,7 @@ subroutine MPAS_stream_mgr_add_pkg(manager, streamID, packageName, ierr)!{{{ logical, pointer :: package type (MPAS_stream_list_type), pointer :: stream_cursor integer :: err_local + logical :: addedPackages STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_add_pkg()') @@ -1890,16 +2018,25 @@ subroutine MPAS_stream_mgr_add_pkg(manager, streamID, packageName, ierr)!{{{ ! ! Find requested stream ! - if (.not. MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) then - STREAM_ERROR_WRITE('Stream '//trim(streamID)//' does not exist in call to MPAS_stream_mgr_add_pkg().') + addedPackages = .false. + nullify(stream_cursor) + do while (MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) + + addedPackages = .true. + + ! + ! Add package to the packages pool for the stream + ! + call mpas_pool_add_package(stream_cursor % pkg_pool, packageName, package) + + end do + + if ( .not. addedPackages ) then + STREAM_ERROR_WRITE('No stream matching '//trim(streamID)//' exists in call to MPAS_stream_mgr_add_pkg().') if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR return end if - ! - ! Add package to the packages pool for the stream - ! - call mpas_pool_add_package(stream_cursor % pkg_pool, packageName, package) end subroutine MPAS_stream_mgr_add_pkg!}}} @@ -1929,6 +2066,7 @@ subroutine MPAS_stream_mgr_remove_pkg(manager, streamID, packageName, ierr)!{{{ logical, pointer :: package type (MPAS_stream_list_type), pointer :: stream_cursor integer :: err_local + logical :: removedPackage STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_remove_pkg()') @@ -1938,16 +2076,25 @@ subroutine MPAS_stream_mgr_remove_pkg(manager, streamID, packageName, ierr)!{{{ ! ! Find requested stream ! - if (.not. MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) then - STREAM_ERROR_WRITE('Stream '//trim(streamID)//' does not exist in call to MPAS_stream_mgr_remove_pkg().') + removedPackage = .false. + nullify(stream_cursor) + do while (MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) + + removedPackage = .true. + + ! + ! Remove package from the packages pool for the stream + ! + call mpas_pool_remove_package(stream_cursor % pkg_pool, packageName) + + end do + + if ( .not. removedPackage ) then + STREAM_ERROR_WRITE('No stream matching '//trim(streamID)//' exists in call to MPAS_stream_mgr_remove_pkg().') if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR return end if - ! - ! Remove package from the packages pool for the stream - ! - call mpas_pool_remove_package(stream_cursor % pkg_pool, packageName) end subroutine MPAS_stream_mgr_remove_pkg!}}} @@ -1982,6 +2129,7 @@ subroutine MPAS_stream_mgr_add_att_int(manager, attName, attVal, streamID, ierr) integer :: att_type integer :: err_level integer :: err_local + logical :: addedAttribute STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_add_att()') @@ -1990,35 +2138,59 @@ subroutine MPAS_stream_mgr_add_att_int(manager, attName, attVal, streamID, ierr) nullify(queryVal) if (present(streamID)) then - if (.not. MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) then - STREAM_ERROR_WRITE('Stream '//trim(streamID)//' does not exist in call to MPAS_stream_mgr_add_att().') + addedAttribute = .false. + nullify(stream_cursor) + do while (MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) + addedAttribute = .true. + att_pool => stream_cursor % att_pool + err_level = mpas_pool_get_error_level() + call mpas_pool_set_error_level(MPAS_POOL_SILENT) + call mpas_pool_get_config(att_pool, attName, queryVal) + call mpas_pool_set_error_level(err_level) + if (.not. associated(queryVal)) then + ! + ! Querying the type of the attribute should return MPAS_POOL_FATAL if the attribute really + ! does not exist in the pool; otherwise, the attribute exists but was of the wrong type + ! in the call above to mpas_pool_get_config() + ! + if (mpas_pool_config_type(att_pool, attName) /= MPAS_POOL_FATAL) then + STREAM_ERROR_WRITE('Attribute '//trim(attName)//' in stream '//trim(stream_cursor % name)//' is not of type integer.') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if + call mpas_pool_add_config(att_pool, attName, attVal) + else + queryVal = attVal + end if + end do + + if ( .not. addedAttribute ) then + STREAM_ERROR_WRITE('No stream matching '//trim(streamID)//' exists in call to MPAS_stream_mgr_add_att().') if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR return end if - att_pool => stream_cursor % att_pool else att_pool => manager % defaultAtts - end if - - err_level = mpas_pool_get_error_level() - call mpas_pool_set_error_level(MPAS_POOL_SILENT) - call mpas_pool_get_config(att_pool, attName, queryVal) - call mpas_pool_set_error_level(err_level) - if (.not. associated(queryVal)) then - ! - ! Querying the type of the attribute should return MPAS_POOL_FATAL if the attribute really - ! does not exist in the pool; otherwise, the attribute exists but was of the wrong type - ! in the call above to mpas_pool_get_config() - ! - if (mpas_pool_config_type(att_pool, attName) /= MPAS_POOL_FATAL) then - STREAM_ERROR_WRITE('Attribute '//trim(attName)//' in stream '//trim(streamID)//' is not of type integer.') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return + err_level = mpas_pool_get_error_level() + call mpas_pool_set_error_level(MPAS_POOL_SILENT) + call mpas_pool_get_config(att_pool, attName, queryVal) + call mpas_pool_set_error_level(err_level) + if (.not. associated(queryVal)) then + ! + ! Querying the type of the attribute should return MPAS_POOL_FATAL if the attribute really + ! does not exist in the pool; otherwise, the attribute exists but was of the wrong type + ! in the call above to mpas_pool_get_config() + ! + if (mpas_pool_config_type(att_pool, attName) /= MPAS_POOL_FATAL) then + STREAM_ERROR_WRITE('Attribute '//trim(attName)//' in streamManager is not of type integer.') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if + call mpas_pool_add_config(att_pool, attName, attVal) + else + queryVal = attVal end if - call mpas_pool_add_config(att_pool, attName, attVal) - else - queryVal = attVal end if end subroutine MPAS_stream_mgr_add_att_int!}}} @@ -2054,6 +2226,7 @@ subroutine MPAS_stream_mgr_add_att_real(manager, attName, attVal, streamID, ierr integer :: att_type integer :: err_level integer :: err_local + logical :: addedAttribute STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_add_att()') @@ -2062,35 +2235,63 @@ subroutine MPAS_stream_mgr_add_att_real(manager, attName, attVal, streamID, ierr nullify(queryVal) if (present(streamID)) then - if (.not. MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) then - STREAM_ERROR_WRITE('Stream '//trim(streamID)//' does not exist in call to MPAS_stream_mgr_add_att().') + addedAttribute = .false. + nullify(stream_cursor) + do while (MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) + + addedAttribute = .true. + att_pool => stream_cursor % att_pool + + err_level = mpas_pool_get_error_level() + call mpas_pool_set_error_level(MPAS_POOL_SILENT) + call mpas_pool_get_config(att_pool, attName, queryVal) + call mpas_pool_set_error_level(err_level) + if (.not. associated(queryVal)) then + ! + ! Querying the type of the attribute should return MPAS_POOL_FATAL if the attribute really + ! does not exist in the pool; otherwise, the attribute exists but was of the wrong type + ! in the call above to mpas_pool_get_config() + ! + if (mpas_pool_config_type(att_pool, attName) /= MPAS_POOL_FATAL) then + STREAM_ERROR_WRITE('Attribute '//trim(attName)//' in stream '//trim(stream_cursor % name)//' is not of type real.') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if + call mpas_pool_add_config(att_pool, attName, attVal) + else + queryVal = attVal + end if + end do + + if ( .not. addedAttribute ) then + STREAM_ERROR_WRITE('No stream matching '//trim(streamID)//' exists in call to MPAS_stream_mgr_add_att().') if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR return end if - att_pool => stream_cursor % att_pool else att_pool => manager % defaultAtts - end if - err_level = mpas_pool_get_error_level() - call mpas_pool_set_error_level(MPAS_POOL_SILENT) - call mpas_pool_get_config(att_pool, attName, queryVal) - call mpas_pool_set_error_level(err_level) - if (.not. associated(queryVal)) then - ! - ! Querying the type of the attribute should return MPAS_POOL_FATAL if the attribute really - ! does not exist in the pool; otherwise, the attribute exists but was of the wrong type - ! in the call above to mpas_pool_get_config() - ! - if (mpas_pool_config_type(att_pool, attName) /= MPAS_POOL_FATAL) then - STREAM_ERROR_WRITE('Attribute '//trim(attName)//' in stream '//trim(streamID)//' is not of type real.') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return + err_level = mpas_pool_get_error_level() + call mpas_pool_set_error_level(MPAS_POOL_SILENT) + call mpas_pool_get_config(att_pool, attName, queryVal) + call mpas_pool_set_error_level(err_level) + if (.not. associated(queryVal)) then + ! + ! Querying the type of the attribute should return MPAS_POOL_FATAL if the attribute really + ! does not exist in the pool; otherwise, the attribute exists but was of the wrong type + ! in the call above to mpas_pool_get_config() + ! + if (mpas_pool_config_type(att_pool, attName) /= MPAS_POOL_FATAL) then + STREAM_ERROR_WRITE('Attribute '//trim(attName)//' in streamManager is not of type real.') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if + call mpas_pool_add_config(att_pool, attName, attVal) + else + queryVal = attVal end if - call mpas_pool_add_config(att_pool, attName, attVal) - else - queryVal = attVal + end if end subroutine MPAS_stream_mgr_add_att_real!}}} @@ -2126,6 +2327,7 @@ subroutine MPAS_stream_mgr_add_att_char(manager, attName, attVal, streamID, ierr integer :: att_type integer :: err_level integer :: err_local + logical :: addedAttribute STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_add_att()') @@ -2134,35 +2336,62 @@ subroutine MPAS_stream_mgr_add_att_char(manager, attName, attVal, streamID, ierr nullify(queryVal) if (present(streamID)) then - if (.not. MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) then - STREAM_ERROR_WRITE('Stream '//trim(streamID)//' does not exist in call to MPAS_stream_mgr_add_att().') + addedAttribute = .false. + nullify(stream_cursor) + do while (MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) + addedAttribute = .true. + att_pool => stream_cursor % att_pool + + err_level = mpas_pool_get_error_level() + call mpas_pool_set_error_level(MPAS_POOL_SILENT) + call mpas_pool_get_config(att_pool, attName, queryVal) + call mpas_pool_set_error_level(err_level) + if (.not. associated(queryVal)) then + ! + ! Querying the type of the attribute should return MPAS_POOL_FATAL if the attribute really + ! does not exist in the pool; otherwise, the attribute exists but was of the wrong type + ! in the call above to mpas_pool_get_config() + ! + if (mpas_pool_config_type(att_pool, attName) /= MPAS_POOL_FATAL) then + STREAM_ERROR_WRITE('Attribute '//trim(attName)//' in stream '//trim(stream_cursor % name)//' is not of type character.') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if + call mpas_pool_add_config(att_pool, attName, attVal) + else + queryVal = attVal + end if + end do + + if ( .not. addedAttribute ) then + STREAM_ERROR_WRITE('No stream matching '//trim(streamID)//' exists in call to MPAS_stream_mgr_add_att().') if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR return end if - att_pool => stream_cursor % att_pool else att_pool => manager % defaultAtts - end if - err_level = mpas_pool_get_error_level() - call mpas_pool_set_error_level(MPAS_POOL_SILENT) - call mpas_pool_get_config(att_pool, attName, queryVal) - call mpas_pool_set_error_level(err_level) - if (.not. associated(queryVal)) then - ! - ! Querying the type of the attribute should return MPAS_POOL_FATAL if the attribute really - ! does not exist in the pool; otherwise, the attribute exists but was of the wrong type - ! in the call above to mpas_pool_get_config() - ! - if (mpas_pool_config_type(att_pool, attName) /= MPAS_POOL_FATAL) then - STREAM_ERROR_WRITE('Attribute '//trim(attName)//' in stream '//trim(streamID)//' is not of type character.') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return + err_level = mpas_pool_get_error_level() + call mpas_pool_set_error_level(MPAS_POOL_SILENT) + call mpas_pool_get_config(att_pool, attName, queryVal) + call mpas_pool_set_error_level(err_level) + if (.not. associated(queryVal)) then + ! + ! Querying the type of the attribute should return MPAS_POOL_FATAL if the attribute really + ! does not exist in the pool; otherwise, the attribute exists but was of the wrong type + ! in the call above to mpas_pool_get_config() + ! + if (mpas_pool_config_type(att_pool, attName) /= MPAS_POOL_FATAL) then + STREAM_ERROR_WRITE('Attribute '//trim(attName)//' in streamManager is not of type character.') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if + call mpas_pool_add_config(att_pool, attName, attVal) + else + queryVal = attVal end if - call mpas_pool_add_config(att_pool, attName, attVal) - else - queryVal = attVal + end if end subroutine MPAS_stream_mgr_add_att_char!}}} @@ -2198,6 +2427,7 @@ subroutine MPAS_stream_mgr_add_att_logical(manager, attName, attVal, streamID, i integer :: att_type integer :: err_level integer :: err_local + logical :: addedAttribute STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_add_att()') @@ -2206,35 +2436,61 @@ subroutine MPAS_stream_mgr_add_att_logical(manager, attName, attVal, streamID, i nullify(queryVal) if (present(streamID)) then - if (.not. MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) then - STREAM_ERROR_WRITE('Stream '//trim(streamID)//' does not exist in call to MPAS_stream_mgr_add_att().') + addedAttribute = .false. + nullify(stream_cursor) + do while (MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=err_local)) + addedAttribute = .true. + att_pool => stream_cursor % att_pool + + err_level = mpas_pool_get_error_level() + call mpas_pool_set_error_level(MPAS_POOL_SILENT) + call mpas_pool_get_config(att_pool, attName, queryVal) + call mpas_pool_set_error_level(err_level) + if (.not. associated(queryVal)) then + ! + ! Querying the type of the attribute should return MPAS_POOL_FATAL if the attribute really + ! does not exist in the pool; otherwise, the attribute exists but was of the wrong type + ! in the call above to mpas_pool_get_config() + ! + if (mpas_pool_config_type(att_pool, attName) /= MPAS_POOL_FATAL) then + STREAM_ERROR_WRITE('Attribute '//trim(attName)//' in stream '//trim(stream_cursor % name)//' is not of type logical.') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if + call mpas_pool_add_config(att_pool, attName, attVal) + else + queryVal = attVal + end if + end do + + if ( .not. addedAttribute ) then + STREAM_ERROR_WRITE('No stream matching '//trim(streamID)//' exists in call to MPAS_stream_mgr_add_att().') if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR return end if - att_pool => stream_cursor % att_pool else att_pool => manager % defaultAtts - end if - err_level = mpas_pool_get_error_level() - call mpas_pool_set_error_level(MPAS_POOL_SILENT) - call mpas_pool_get_config(att_pool, attName, queryVal) - call mpas_pool_set_error_level(err_level) - if (.not. associated(queryVal)) then - ! - ! Querying the type of the attribute should return MPAS_POOL_FATAL if the attribute really - ! does not exist in the pool; otherwise, the attribute exists but was of the wrong type - ! in the call above to mpas_pool_get_config() - ! - if (mpas_pool_config_type(att_pool, attName) /= MPAS_POOL_FATAL) then - STREAM_ERROR_WRITE('Attribute '//trim(attName)//' in stream '//trim(streamID)//' is not of type logical.') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return + err_level = mpas_pool_get_error_level() + call mpas_pool_set_error_level(MPAS_POOL_SILENT) + call mpas_pool_get_config(att_pool, attName, queryVal) + call mpas_pool_set_error_level(err_level) + if (.not. associated(queryVal)) then + ! + ! Querying the type of the attribute should return MPAS_POOL_FATAL if the attribute really + ! does not exist in the pool; otherwise, the attribute exists but was of the wrong type + ! in the call above to mpas_pool_get_config() + ! + if (mpas_pool_config_type(att_pool, attName) /= MPAS_POOL_FATAL) then + STREAM_ERROR_WRITE('Attribute '//trim(attName)//' in streamManger is not of type logical.') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if + call mpas_pool_add_config(att_pool, attName, attVal) + else + queryVal = attVal end if - call mpas_pool_add_config(att_pool, attName, attVal) - else - queryVal = attVal end if end subroutine MPAS_stream_mgr_add_att_logical!}}} @@ -2248,7 +2504,7 @@ end subroutine MPAS_stream_mgr_add_att_logical!}}} !> \date 13 June 2014 !> \details !> With no optional arguments, writes all streams whose alarms are ringing. - !> The "streamID" argument optionally specifies the ID of a particular stream + !> The "streamID" argument optionally specifies a regular expression of streams !> to be written; if no other optional arguments are given, the specified !> stream is only written if any of its alarms are ringing. !> The "timeLevel" argument optionally specifies, for fields with multiple @@ -2258,10 +2514,13 @@ end subroutine MPAS_stream_mgr_add_att_logical!}}} !> The "forceWriteNow" argument optionally specifies that all streams -- or !> the stream specified by the "streamID" argument -- should be written by !> the call regardless of whether any alarms associated with the stream(s) - !> are ringing. + !> are ringing. The "writeTime" argument optionally specifies a time stamp + !> to be used for expanding a filename template, when it is not passed in, + !> the current time of the streamManager's clock is used to expand filename + !> templates. ! !----------------------------------------------------------------------- - subroutine MPAS_stream_mgr_write(manager, streamID, timeLevel, mgLevel, forceWriteNow, ierr) !{{{ + subroutine MPAS_stream_mgr_write(manager, streamID, timeLevel, mgLevel, forceWriteNow, writeTime, ierr) !{{{ implicit none @@ -2270,15 +2529,21 @@ subroutine MPAS_stream_mgr_write(manager, streamID, timeLevel, mgLevel, forceWri integer, intent(in), optional :: timeLevel integer, intent(in), optional :: mgLevel logical, intent(in), optional :: forceWriteNow + character (len=*), intent(in), optional :: writeTime integer, intent(out), optional :: ierr type (MPAS_stream_list_type), pointer :: stream_cursor + integer :: blockID integer :: local_timeLevel integer :: local_mgLevel logical :: local_forceWrite integer :: local_ierr + type (MPAS_Time_type) :: local_writeTime integer :: temp_ierr + integer :: threadNum + logical :: wroteStreams + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_write()') local_ierr = MPAS_STREAM_MGR_NOERR @@ -2307,47 +2572,61 @@ subroutine MPAS_stream_mgr_write(manager, streamID, timeLevel, mgLevel, forceWri end if - ! - ! If a stream is specified, we process just that stream; otherwise, - ! process all streams - ! - if (present(streamID)) then - nullify(stream_cursor) - if (MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=ierr)) then - STREAM_DEBUG_WRITE('-- Handling write of stream '//trim(stream_cursor % name)) - - ! Verify that the stream is an output stream - if (stream_cursor % direction /= MPAS_STREAM_OUTPUT .and. & - stream_cursor % direction /= MPAS_STREAM_INPUT_OUTPUT) then - STREAM_ERROR_WRITE('Stream '//trim(streamID)//' is not an output stream.') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if - - call write_stream(manager, stream_cursor, local_timeLevel, local_mgLevel, local_forceWrite, local_ierr) - else - STREAM_ERROR_WRITE('Stream '//trim(streamID)//' does not exist in call to MPAS_stream_mgr_write().') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if + if ( present(writeTime) ) then + call mpas_set_time(local_writeTime, dateTimeString=writeTime, ierr=ierr) else - nullify(stream_cursor) - stream_cursor => manager % streams % head - do while (associated(stream_cursor)) - STREAM_DEBUG_WRITE('-- Handling write of stream '//trim(stream_cursor % name)) - - ! Verify that the stream is an output stream - if (stream_cursor % direction == MPAS_STREAM_OUTPUT .or. & - stream_cursor % direction == MPAS_STREAM_INPUT_OUTPUT) then + local_writeTime = mpas_get_clock_time(manager % streamClock, MPAS_NOW, ierr=local_ierr) + end if - call write_stream(manager, stream_cursor, local_timeLevel, local_mgLevel, local_forceWrite, temp_ierr) - if (temp_ierr /= MPAS_STREAM_MGR_NOERR) then - local_ierr = temp_ierr - end if + ! This routine assumes global writes, so set blockID = -1 + blockID = -1 - end if - stream_cursor => stream_cursor % next - end do + if ( threadNum == 0 ) then + ! + ! If a stream is specified, we process just that stream; otherwise, + ! process all streams + ! + if (present(streamID)) then + wroteStreams = .false. + nullify(stream_cursor) + do while (MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=ierr)) + STREAM_DEBUG_WRITE('-- Handling write of stream '//trim(stream_cursor % name)) + + ! Verify that the stream is an output stream + if (stream_cursor % direction == MPAS_STREAM_OUTPUT .or. & + stream_cursor % direction == MPAS_STREAM_INPUT_OUTPUT) then + + wroteStreams = .true. + stream_cursor % blockWrite = .false. + call write_stream(manager, stream_cursor, blockID, local_timeLevel, local_mgLevel, local_forceWrite, local_writeTime, local_ierr) + end if + end do + + if ( .not. wroteStreams ) then + STREAM_ERROR_WRITE('No output stream matching '//trim(streamID)//' exists in call to MPAS_stream_mgr_write().') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if + else + nullify(stream_cursor) + stream_cursor => manager % streams % head + do while (associated(stream_cursor)) + STREAM_DEBUG_WRITE('-- Handling write of stream '//trim(stream_cursor % name)) + + ! Verify that the stream is an output stream + if (stream_cursor % direction == MPAS_STREAM_OUTPUT .or. & + stream_cursor % direction == MPAS_STREAM_INPUT_OUTPUT) then + + stream_cursor % blockWrite = .false. + call write_stream(manager, stream_cursor, blockID, local_timeLevel, local_mgLevel, local_forceWrite, local_writeTime, temp_ierr) + if (temp_ierr /= MPAS_STREAM_MGR_NOERR) then + local_ierr = temp_ierr + end if + + end if + stream_cursor => stream_cursor % next + end do + end if end if if (present(ierr)) ierr = local_ierr @@ -2356,341 +2635,551 @@ end subroutine MPAS_stream_mgr_write !}}} !----------------------------------------------------------------------- - ! routine write_stream + ! routine MPAS_stream_mgr_block_write ! - !> \brief Handle the writing of a stream pointed to by the stream list node - !> \author Michael Duda - !> \date 2 September 2014 + !> \brief Write streams that are managed by an MPAS stream manager, using blockWrite = .true. + !> \author Michael Duda, Doug Jacobsen + !> \date 08/25/2016 !> \details - !> Private subroutine to handle the details of actually writing a stream. + !> This routine performs block writes of any streams requested within an + !> MPAS stream manger. Block writes imply each processor should write + !> their own file(s). Additionally, block writes disable global reindexing + !> before and after writing the stream. + !> + !> With no optional arguments, writes all streams whose alarms are ringing. + !> The "streamID" argument optionally specifies a regular expression of streams + !> to be written; if no other optional arguments are given, the specified + !> stream is only written if any of its alarms are ringing. + !> The "timeLevel" argument optionally specifies, for fields with multiple + !> time levels, the time level from which fields should be written. + !> The "mgLevel" argument optionally specifies, for fields that exist for + !> multiple grid levels, the grid level from which fields should be written. + !> The "forceWriteNow" argument optionally specifies that all streams -- or + !> the stream specified by the "streamID" argument -- should be written by + !> the call regardless of whether any alarms associated with the stream(s) + !> are ringing. The "writeTime" argument optionally specifies a time stamp + !> to be used for expanding a filename template, when it is not passed in, + !> the current time of the streamManager's clock is used to expand filename + !> templates. + !> + !> Swapping write types on a stream can cause unexpected behavior. For example: + !> - Writing a stream using the normal write routine, followed by the + !> block write routine, and again by the normal write routine will cause any + !> data in the files prior to the last normal write to be lost. + !> + !> Additionally, this routine is only valid for writing out a single file + !> at a time. If the block write stream is required at multiple time + !> levels, the filename template and filename intervals need to be setup to + !> give unique files for each record. ! !----------------------------------------------------------------------- - subroutine write_stream(manager, stream, timeLevel, mgLevel, forceWritenow, ierr) !{{{ + subroutine MPAS_stream_mgr_block_write(manager, writeBlock, streamID, timeLevel, mgLevel, forceWriteNow, writeTime, ierr) !{{{ implicit none type (MPAS_streamManager_type), intent(inout) :: manager - type (MPAS_stream_list_type), intent(inout) :: stream - integer, intent(in) :: timeLevel - integer, intent(in) :: mgLevel - logical, intent(in) :: forceWriteNow - integer, intent(out) :: ierr - - type (MPAS_stream_list_type), pointer :: alarm_cursor - type (MPAS_Time_type) :: now_time, ref_time - type (MPAS_TimeInterval_type) :: temp_interval - type (MPAS_TimeInterval_type) :: filename_interval - character (len=StrKIND) :: now_string, time_string - character (len=StrKIND) :: temp_filename, actualWhen - character (len=StrKIND) :: err_string - logical :: ringing_alarm, recordSeek, swapRecords - logical :: clobberRecords, clobberFiles, truncateFiles - integer :: maxRecords, tempRecord - integer :: local_ierr - + type (block_type), intent(in) :: writeBlock + character (len=*), intent(in), optional :: streamID + integer, intent(in), optional :: timeLevel + integer, intent(in), optional :: mgLevel + logical, intent(in), optional :: forceWriteNow + character (len=*), intent(in), optional :: writeTime + integer, intent(out), optional :: ierr - ierr = MPAS_STREAM_MGR_NOERR - swapRecords = .false. + type (mpas_pool_type), pointer :: backupAllFields, backupAllStructs - ! - ! Check whether this stream is active - ! - if (.not. stream % active_stream) then - STREAM_DEBUG_WRITE('-- Stream '//trim(stream % name)//' is not currently active and will not be written.') - return - end if + type (MPAS_stream_list_type), pointer :: stream_cursor + integer :: local_timeLevel + integer :: local_mgLevel + logical :: local_forceWrite + integer :: local_ierr + type (MPAS_Time_type) :: local_writeTime + integer :: temp_ierr + integer :: threadNum + logical :: wroteStreams - ! - ! Check whether all packages for this stream are inactive - ! Note: if the stream has no packages, it is assumed to be active - ! - if (.not. stream_active_pkg_check(stream)) then - STREAM_DEBUG_WRITE('-- Stream '//trim(stream % name)//' has only inactive packages and will not be written.') - return - end if + type (mpas_io_context_type), pointer :: debugContext, backupContext - ! - ! Check whether any of the output alarms for the stream are ringing - ! - ringing_alarm = .false. - alarm_cursor => stream % alarmList_out % head - do while (associated(alarm_cursor)) - if (mpas_is_alarm_ringing(manager % streamClock, alarm_cursor % name, ierr=local_ierr)) then - ringing_alarm = .true. - exit - end if - alarm_cursor => alarm_cursor % next - end do + threadNum = mpas_threading_get_thread_num() - if ((.not. ringing_alarm) .and. (.not. forceWriteNow)) then - return - end if + STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_block_write()') + local_ierr = MPAS_STREAM_MGR_NOERR + + if (present(ierr)) ierr = MPAS_STREAM_MGR_NOERR ! - ! Work out file clobbering options + ! Use optional arguments or set defaults ! - if (stream % clobber_mode == MPAS_STREAM_CLOBBER_OVERWRITE) then - clobberRecords = .true. + if (present(timeLevel)) then + local_timeLevel = timeLevel else - clobberRecords = .false. + local_timeLevel = 1 end if - if (stream % clobber_mode == MPAS_STREAM_CLOBBER_OVERWRITE .or. & - stream % clobber_mode == MPAS_STREAM_CLOBBER_TRUNCATE .or. & - stream % clobber_mode == MPAS_STREAM_CLOBBER_APPEND) then - clobberFiles = .true. + if (present(mgLevel)) then + local_mgLevel = mgLevel else - clobberFiles = .false. + local_mgLevel = 1 end if - if (stream % clobber_mode == MPAS_STREAM_CLOBBER_TRUNCATE) then - truncateFiles = .true. + if (present(forceWriteNow)) then + local_forceWrite = forceWriteNow else - truncateFiles = .false. + local_forceWrite = .false. end if - ! - ! If the stream is not valid, assume that we have not yet written this - ! stream, in which case we create the stream from scratch - ! - if (.not. stream % valid) then - if ( stream % filename_interval /= 'none' ) then - now_time = mpas_get_clock_time(manager % streamClock, MPAS_NOW, ierr=local_ierr) - call mpas_set_timeInterval(filename_interval, timeString=stream % filename_interval) - call mpas_build_stream_filename(stream % referenceTime, now_time, filename_interval, stream % filename_template, stream % filename, ierr=local_ierr) - else - call mpas_get_time(stream % referenceTime, dateTimeString=time_string) - call mpas_expand_string(time_string, stream % filename_template, stream % filename) - end if - stream % nRecords = 1 + if ( present(writeTime) ) then + call mpas_set_time(local_writeTime, dateTimeString=writeTime, ierr=ierr) + else + local_writeTime = mpas_get_clock_time(manager % streamClock, MPAS_NOW, ierr=local_ierr) + end if - recordSeek = .false. - ! Based on clobber_mode, determine if it matters if the file exists or not. - if ( stream % clobber_mode == MPAS_STREAM_CLOBBER_OVERWRITE .or. stream % clobber_mode == MPAS_STREAM_CLOBBER_APPEND ) then - STREAM_DEBUG_WRITE(' -- Cobber mode is overwrite or append...') - - ! Check if the file exists - inquire(file=trim(stream % filename), exist=recordSeek) - end if - ! - ! Build stream from pools of fields and attributes - ! - allocate(stream % stream) - call MPAS_createStream(stream % stream, stream % filename, stream % io_type, MPAS_IO_WRITE, & - precision=stream % precision, clobberRecords=clobberRecords, & - clobberFiles=clobberFiles, truncateFiles=truncateFiles, ierr=local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then - if (local_ierr == MPAS_STREAM_CLOBBER_FILE) then - ! - ! We should have only reached this point if clobber_mode = never_modify - ! - write(err_string,'(a)') 'Writing to stream '''//trim(stream % name)//''' would clobber file '''//& - trim(stream % filename)//''',' - STREAM_ERROR_WRITE(trim(err_string)) - write(err_string,'(a)') ' but clobber_mode is set to ''never_modify''.' - STREAM_ERROR_WRITE(trim(err_string)) - ierr = MPAS_STREAM_MGR_ERR_CLOBBER_FILE - else - ierr = MPAS_STREAM_MGR_ERROR - end if - return - end if + if ( threadNum == 0 ) then + ! Setup the debugging ioContext + allocate(debugContext) + allocate(debugContext % dminfo) + call mpas_dmpar_init(debugContext % dminfo, MPI_COMM_SELF) - ! File exists on disk, prior to creating stream. Need to seek the record to ensure we're writing to the correct place. - if ( recordSeek ) then - STREAM_DEBUG_WRITE(' -- File exists on disk: ' COMMA trim(stream % filename)) - now_time = mpas_get_clock_time(manager % streamClock, MPAS_NOW, ierr=local_ierr) - call mpas_get_time(now_time, dateTimeString=now_string) - - ! Look for exact record (in the case of overwriting) - ! This also gets the number of records in the file. - stream % nRecords = MPAS_seekStream(stream % stream, now_string, MPAS_STREAM_EXACT_TIME, actualWhen, maxRecords, local_ierr) - STREAM_DEBUG_WRITE(' -- Seeked record is: ' COMMA stream % nRecords COMMA ' with current records equal to ' COMMA maxRecords COMMA ' and an error of ' COMMA local_ierr) - - if ( stream % nRecords == 0 ) then - ! If we didn't find an exact time, set record to point to the end of the file. - ! This might result in non-monotonic timestamps in the output file. - stream % nRecords = maxRecords + 1 - STREAM_DEBUG_WRITE(' -- No exact time match found for ' COMMA trim(now_string) COMMA ' appending record instead.') - STREAM_DEBUG_WRITE(' -- Setting record to: ' COMMA stream % nRecords) - end if - end if + backupContext => manager % ioContext + manager % ioContext => debugContext - call build_stream(stream, MPAS_STREAM_OUTPUT, manager % allFields, manager % allPackages, timeLevel, mgLevel, local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then - ierr = MPAS_STREAM_MGR_ERROR - return - end if - stream % timeLevel = timeLevel + call mpas_io_init(debugContext, 1, 1) - stream % valid = .true. - else - if ( stream % filename_interval /= 'none' ) then - now_time = mpas_get_clock_time(manager % streamClock, MPAS_NOW, ierr=local_ierr) - call mpas_set_timeInterval(filename_interval, timeString=stream % filename_interval) + ! Setup the backup pools, and swap the manager's allFields / allStructs pools + backupAllFields => manager % allFields + backupAllStructs => manager % allStructs + manager % allFields => writeBlock % allFields + manager % allStructs => writeBlock % allStructs + + ! + ! If a stream is specified, we process just that stream; otherwise, + ! process all streams + ! + if (present(streamID)) then + wroteStreams = .false. + nullify(stream_cursor) + do while (MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=ierr)) + STREAM_DEBUG_WRITE('-- Handling local write of stream '//trim(stream_cursor % name)) + + ! Verify that the stream is an output stream + if (stream_cursor % direction == MPAS_STREAM_OUTPUT .or. & + stream_cursor % direction == MPAS_STREAM_INPUT_OUTPUT) then + + wroteStreams = .true. + stream_cursor % blockWrite = .true. + call write_stream(manager, stream_cursor, writeBlock % blockID, local_timeLevel, local_mgLevel, & + local_forceWrite, local_writeTime, local_ierr) + stream_cursor % blockWrite = .false. + if ( associated(stream_cursor % stream) ) then + stream_cursor % valid = .false. + call MPAS_closeStream(stream_cursor % stream, ierr=local_ierr) + deallocate(stream_cursor % stream) + end if + end if + end do + + if ( .not. wroteStreams ) then + STREAM_ERROR_WRITE('No output stream matching '//trim(streamID)//' exists in call to MPAS_stream_mgr_block_write().') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if + else + nullify(stream_cursor) + stream_cursor => manager % streams % head + do while (associated(stream_cursor)) + STREAM_DEBUG_WRITE('-- Handling local write of stream '//trim(stream_cursor % name)) + + ! Verify that the stream is an output stream + if (stream_cursor % direction == MPAS_STREAM_OUTPUT .or. & + stream_cursor % direction == MPAS_STREAM_INPUT_OUTPUT) then + + stream_cursor % blockWrite = .true. + call write_stream(manager, stream_cursor, writeBlock % blockID, local_timeLevel, local_mgLevel, & + local_forceWrite, local_writeTime, temp_ierr) + stream_cursor % blockWrite = .false. + if ( associated(stream_cursor % stream) ) then + stream_cursor % valid = .false. + call MPAS_closeStream(stream_cursor % stream, ierr=local_ierr) + deallocate(stream_cursor % stream) + end if + if (temp_ierr /= MPAS_STREAM_MGR_NOERR) then + local_ierr = temp_ierr + end if + + end if + stream_cursor => stream_cursor % next + end do + end if + ! Swap the allFields / allStructs back to the backups. + manager % allFields => backupAllFields + manager % allStructs => backupAllStructs + + ! Destroy the IO context, and dminfo that was created + call MPAS_io_finalize(debugContext, .true.) + + manager % ioContext => backupContext + + call mpas_dmpar_finalize(debugContext % dminfo) + + deallocate(debugContext % dminfo) + deallocate(debugContext) + end if + + call mpas_threading_barrier() + + if (present(ierr)) ierr = local_ierr + + end subroutine MPAS_stream_mgr_block_write !}}} + + + !----------------------------------------------------------------------- + ! routine write_stream + ! + !> \brief Handle the writing of a stream pointed to by the stream list node + !> \author Michael Duda + !> \date 2 September 2014 + !> \details + !> Private subroutine to handle the details of actually writing a stream. + ! + !----------------------------------------------------------------------- + subroutine write_stream(manager, stream, blockID, timeLevel, mgLevel, forceWritenow, writeTime, ierr) !{{{ + + implicit none + + type (MPAS_streamManager_type), intent(inout) :: manager + type (MPAS_stream_list_type), intent(inout) :: stream + integer, intent(in) :: blockID + integer, intent(in) :: timeLevel + integer, intent(in) :: mgLevel + logical, intent(in) :: forceWriteNow + type (MPAS_Time_type), intent(in) :: writeTime + integer, intent(out) :: ierr + + type (MPAS_stream_list_type), pointer :: alarm_cursor + type (MPAS_Time_type) :: ref_time + type (MPAS_TimeInterval_type) :: temp_interval + type (MPAS_TimeInterval_type) :: filename_interval + character (len=StrKIND) :: now_string, time_string + character (len=StrKIND) :: temp_filename, actualWhen + character (len=StrKIND) :: err_string + logical :: ringing_alarm, recordSeek, swapRecords + logical :: clobberRecords, clobberFiles, truncateFiles + integer :: maxRecords, tempRecord + integer :: local_ierr, threadNum + + threadNum = mpas_threading_get_thread_num() + + ierr = MPAS_STREAM_MGR_NOERR + if ( threadNum == 0 ) then + swapRecords = .false. + + ! + ! Check whether this stream is active + ! + if (.not. stream % active_stream) then + STREAM_DEBUG_WRITE('-- Stream '//trim(stream % name)//' is not currently active and will not be written.') + return + end if + + ! + ! Check whether all packages for this stream are inactive + ! Note: if the stream has no packages, it is assumed to be active + ! + if (.not. stream_active_pkg_check(stream)) then + STREAM_DEBUG_WRITE('-- Stream '//trim(stream % name)//' has only inactive packages and will not be written.') + return + end if + + ! + ! Check whether any of the output alarms for the stream are ringing + ! + ringing_alarm = .false. + alarm_cursor => stream % alarmList_out % head + do while (associated(alarm_cursor)) + if (mpas_is_alarm_ringing(manager % streamClock, alarm_cursor % name, ierr=local_ierr)) then + ringing_alarm = .true. + exit + end if + alarm_cursor => alarm_cursor % next + end do + + if ((.not. ringing_alarm) .and. (.not. forceWriteNow)) then + return + end if + + ! + ! Work out file clobbering options + ! + if (stream % clobber_mode == MPAS_STREAM_CLOBBER_OVERWRITE) then + clobberRecords = .true. + else + clobberRecords = .false. + end if + + if (stream % clobber_mode == MPAS_STREAM_CLOBBER_OVERWRITE .or. & + stream % clobber_mode == MPAS_STREAM_CLOBBER_TRUNCATE .or. & + stream % clobber_mode == MPAS_STREAM_CLOBBER_APPEND) then + clobberFiles = .true. + else + clobberFiles = .false. + end if + + if (stream % clobber_mode == MPAS_STREAM_CLOBBER_TRUNCATE) then + truncateFiles = .true. + else + truncateFiles = .false. + end if + + ! + ! If the stream is not valid, assume that we have not yet written this + ! stream, in which case we create the stream from scratch + ! + if (.not. stream % valid) then + if ( stream % filename_interval /= 'none' ) then + call mpas_set_timeInterval(filename_interval, timeString=stream % filename_interval) + call mpas_build_stream_filename(stream % referenceTime, writeTime, filename_interval, stream % filename_template, blockID, stream % filename, ierr=local_ierr) + else + call mpas_get_time(stream % referenceTime, dateTimeString=time_string) + call mpas_expand_string(time_string, blockID, stream % filename_template, stream % filename) + end if + + stream % nRecords = 1 + + recordSeek = .false. + ! Based on clobber_mode, determine if it matters if the file exists or not. + if ( stream % clobber_mode == MPAS_STREAM_CLOBBER_OVERWRITE .or. stream % clobber_mode == MPAS_STREAM_CLOBBER_APPEND ) then + STREAM_DEBUG_WRITE(' -- Cobber mode is overwrite or append...') - call mpas_build_stream_filename(stream % referenceTime, now_time, filename_interval, stream % filename_template, temp_filename, ierr=local_ierr) - else - call mpas_get_time(stream % referenceTime, dateTimeString=time_string) - call mpas_expand_string(time_string, stream % filename_template, temp_filename) - end if + ! Check if the file exists + inquire(file=trim(stream % filename), exist=recordSeek) + end if - if (temp_filename /= stream % filename) then + ! + ! Build stream from pools of fields and attributes + ! + allocate(stream % stream) + call MPAS_createStream(stream % stream, manager % ioContext, stream % filename, stream % io_type, MPAS_IO_WRITE, & + precision=stream % precision, clobberRecords=clobberRecords, & + clobberFiles=clobberFiles, truncateFiles=truncateFiles, ierr=local_ierr) + stream % stream % blockWrite = stream % blockWrite + if (local_ierr /= MPAS_STREAM_NOERR) then + if (local_ierr == MPAS_STREAM_CLOBBER_FILE) then + ! + ! We should have only reached this point if clobber_mode = never_modify + ! + write(err_string,'(a)') 'Writing to stream '''//trim(stream % name)//''' would clobber file '''//& + trim(stream % filename)//''',' + STREAM_ERROR_WRITE(trim(err_string)) + write(err_string,'(a)') ' but clobber_mode is set to ''never_modify''.' + STREAM_ERROR_WRITE(trim(err_string)) + ierr = MPAS_STREAM_MGR_ERR_CLOBBER_FILE + else + ierr = MPAS_STREAM_MGR_ERROR + end if + return + end if - stream % filename = temp_filename + ! File exists on disk, prior to creating stream. Need to seek the record to ensure we're writing to the correct place. + if ( recordSeek ) then + STREAM_DEBUG_WRITE(' -- File exists on disk: ' COMMA trim(stream % filename)) + call mpas_get_time(writeTime, dateTimeString=now_string) + + ! Look for exact record (in the case of overwriting) + ! This also gets the number of records in the file. + stream % nRecords = MPAS_seekStream(stream % stream, now_string, MPAS_STREAM_EXACT_TIME, actualWhen, maxRecords, local_ierr) + STREAM_DEBUG_WRITE(' -- Seeked record is: ' COMMA stream % nRecords COMMA ' with current records equal to ' COMMA maxRecords COMMA ' and an error of ' COMMA local_ierr) + + if ( stream % nRecords == 0 ) then + ! If we didn't find an exact time, set record to point to the end of the file. + ! This might result in non-monotonic timestamps in the output file. + stream % nRecords = maxRecords + 1 + STREAM_DEBUG_WRITE(' -- No exact time match found for ' COMMA trim(now_string) COMMA ' appending record instead.') + STREAM_DEBUG_WRITE(' -- Setting record to: ' COMMA stream % nRecords) + end if + end if - ! - ! Close existing stream - ! - call MPAS_closeStream(stream % stream, ierr=local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then - ierr = MPAS_STREAM_MGR_ERROR - return - end if + call build_stream(stream, MPAS_STREAM_OUTPUT, manager % allFields, manager % allPackages, timeLevel, mgLevel, local_ierr) + if (local_ierr /= MPAS_STREAM_NOERR) then + ierr = MPAS_STREAM_MGR_ERROR + return + end if + stream % timeLevel = timeLevel - recordSeek = .false. - ! Based on clobber_mode, determine if it matters if the file exists or not. - if ( stream % clobber_mode == MPAS_STREAM_CLOBBER_OVERWRITE .or. stream % clobber_mode == MPAS_STREAM_CLOBBER_APPEND ) then - STREAM_DEBUG_WRITE(' -- Cobber mode is overwrite or append...') - - ! Check if the file exists - inquire(file=trim(stream % filename), exist=recordSeek) - end if + stream % valid = .true. + else + if ( stream % filename_interval /= 'none' ) then + call mpas_set_timeInterval(filename_interval, timeString=stream % filename_interval) + + call mpas_build_stream_filename(stream % referenceTime, writeTime, filename_interval, stream % filename_template, blockID, temp_filename, ierr=local_ierr) + else + call mpas_get_time(stream % referenceTime, dateTimeString=time_string) + call mpas_expand_string(time_string, blockID, stream % filename_template, temp_filename) + end if - stream % nRecords = 1 + if (temp_filename /= stream % filename) then - ! - ! Build new stream from pools of fields and attributes - ! - call MPAS_createStream(stream % stream, stream % filename, stream % io_type, MPAS_IO_WRITE, & - precision=stream % precision, clobberRecords=clobberRecords, & - clobberFiles=clobberFiles, truncateFiles=truncateFiles, ierr=local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then - if (local_ierr == MPAS_STREAM_CLOBBER_FILE) then - ! - ! We should have only reached this point if clobber_mode = never_modify - ! - write(err_string,'(a)') 'Writing to stream '''//trim(stream % name)//''' would clobber file '''//& - trim(stream % filename)//''',' - STREAM_ERROR_WRITE(trim(err_string)) - write(err_string,'(a)') ' but clobber_mode is set to ''never_modify''.' - STREAM_ERROR_WRITE(trim(err_string)) - ierr = MPAS_STREAM_MGR_ERR_CLOBBER_FILE - else - ierr = MPAS_STREAM_MGR_ERROR - end if - stream % valid = .false. - return - end if + stream % filename = temp_filename - ! File exists on disk, prior to creating stream. Need to seek the record to ensure we're writing to the correct place. - if ( recordSeek ) then - STREAM_DEBUG_WRITE(' -- File exists on disk: ' COMMA trim(stream % filename)) - now_time = mpas_get_clock_time(manager % streamClock, MPAS_NOW, ierr=local_ierr) - call mpas_get_time(now_time, dateTimeString=now_string) - - ! Look for exact record (in the case of overwriting) - ! This also gets the number of records in the file. - stream % nRecords = MPAS_seekStream(stream % stream, now_string, MPAS_STREAM_EXACT_TIME, actualWhen, maxRecords, local_ierr) - STREAM_DEBUG_WRITE(' -- Seeked record is: ' COMMA stream % nRecords COMMA ' with current records equal to ' COMMA maxRecords COMMA ' and an error of ' COMMA local_ierr) - - if ( stream % nRecords == 0 ) then - ! If we didn't find an exact time, set record to point to the end of the file. - ! This might result in non-monotonic timestamps in the output file. - stream % nRecords = maxRecords + 1 - STREAM_DEBUG_WRITE(' -- No exact time match found for ' COMMA trim(now_string) COMMA ' appending record instead.') - STREAM_DEBUG_WRITE(' -- Setting record to: ' COMMA stream % nRecords) - end if - end if + ! + ! Close existing stream + ! + call MPAS_closeStream(stream % stream, ierr=local_ierr) + if (local_ierr /= MPAS_STREAM_NOERR) then + ierr = MPAS_STREAM_MGR_ERROR + return + end if - call build_stream(stream, MPAS_STREAM_OUTPUT, manager % allFields, manager % allPackages, timeLevel, mgLevel, local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then - ierr = MPAS_STREAM_MGR_ERROR - return - end if - stream % timeLevel = timeLevel - else - stream % nRecords = stream % nRecords + 1 - if ( stream % clobber_mode == MPAS_STREAM_CLOBBER_OVERWRITE .or. stream % clobber_mode == MPAS_STREAM_CLOBBER_APPEND ) then - now_time = mpas_get_clock_time(manager % streamClock, MPAS_NOW, ierr=local_ierr) - call mpas_get_time(now_time, dateTimeString=now_string) - - ! Look for exact record (in the case of overwriting) - ! This also gets the number of records in the file. - tempRecord = MPAS_seekStream(stream % stream, now_string, MPAS_STREAM_EXACT_TIME, actualWhen, maxRecords, local_ierr) - STREAM_DEBUG_WRITE(' -- Seeked record is: ' COMMA tempRecord COMMA ' with current records equal to ' COMMA maxRecords COMMA ' and an error of ' COMMA local_ierr) - - if ( tempRecord /= 0 .and. stream % nRecords < maxRecords ) then - ! If we found an exact result - ! This might result in non-monotonic timestamps in the output file. - swapRecords = .true. - maxRecords = stream % nRecords - stream % nRecords = tempRecord - tempRecord = maxRecords - STREAM_DEBUG_WRITE(' -- Exact time match found for ' COMMA trim(now_string) ) - STREAM_DEBUG_WRITE(' -- Setting record to: ' COMMA stream % nRecords) - else if ( tempRecord == 0 .and. stream % nRecords < maxRecords ) then - ! If we didn't find an exact time, set record to point to the end of the file. - ! This might result in non-monotonic timestamps in the output file. - stream % nRecords = maxRecords + 1 - STREAM_DEBUG_WRITE(' -- No exact time match found for ' COMMA trim(now_string) COMMA ' appending record instead.') - STREAM_DEBUG_WRITE(' -- Setting record to: ' COMMA stream % nRecords) - end if - end if - end if - end if + recordSeek = .false. + ! Based on clobber_mode, determine if it matters if the file exists or not. + if ( stream % clobber_mode == MPAS_STREAM_CLOBBER_OVERWRITE .or. stream % clobber_mode == MPAS_STREAM_CLOBBER_APPEND ) then + STREAM_DEBUG_WRITE(' -- Cobber mode is overwrite or append...') + + ! Check if the file exists + inquire(file=trim(stream % filename), exist=recordSeek) + end if + + stream % nRecords = 1 + + ! + ! Build new stream from pools of fields and attributes + ! + call MPAS_createStream(stream % stream, manager % ioContext, stream % filename, stream % io_type, MPAS_IO_WRITE, & + precision=stream % precision, clobberRecords=clobberRecords, & + clobberFiles=clobberFiles, truncateFiles=truncateFiles, ierr=local_ierr) + stream % stream % blockWrite = stream % blockWrite + if (local_ierr /= MPAS_STREAM_NOERR) then + if (local_ierr == MPAS_STREAM_CLOBBER_FILE) then + ! + ! We should have only reached this point if clobber_mode = never_modify + ! + write(err_string,'(a)') 'Writing to stream '''//trim(stream % name)//''' would clobber file '''//& + trim(stream % filename)//''',' + STREAM_ERROR_WRITE(trim(err_string)) + write(err_string,'(a)') ' but clobber_mode is set to ''never_modify''.' + STREAM_ERROR_WRITE(trim(err_string)) + ierr = MPAS_STREAM_MGR_ERR_CLOBBER_FILE + else + ierr = MPAS_STREAM_MGR_ERROR + end if + stream % valid = .false. + return + end if + + ! File exists on disk, prior to creating stream. Need to seek the record to ensure we're writing to the correct place. + if ( recordSeek ) then + STREAM_DEBUG_WRITE(' -- File exists on disk: ' COMMA trim(stream % filename)) + call mpas_get_time(writeTime, dateTimeString=now_string) + + ! Look for exact record (in the case of overwriting) + ! This also gets the number of records in the file. + stream % nRecords = MPAS_seekStream(stream % stream, now_string, MPAS_STREAM_EXACT_TIME, actualWhen, maxRecords, local_ierr) + STREAM_DEBUG_WRITE(' -- Seeked record is: ' COMMA stream % nRecords COMMA ' with current records equal to ' COMMA maxRecords COMMA ' and an error of ' COMMA local_ierr) + + if ( stream % nRecords == 0 ) then + ! If we didn't find an exact time, set record to point to the end of the file. + ! This might result in non-monotonic timestamps in the output file. + stream % nRecords = maxRecords + 1 + STREAM_DEBUG_WRITE(' -- No exact time match found for ' COMMA trim(now_string) COMMA ' appending record instead.') + STREAM_DEBUG_WRITE(' -- Setting record to: ' COMMA stream % nRecords) + end if + end if + + call build_stream(stream, MPAS_STREAM_OUTPUT, manager % allFields, manager % allPackages, timeLevel, mgLevel, local_ierr) + if (local_ierr /= MPAS_STREAM_NOERR) then + ierr = MPAS_STREAM_MGR_ERROR + return + end if + stream % timeLevel = timeLevel + else + stream % nRecords = stream % nRecords + 1 + if ( stream % clobber_mode == MPAS_STREAM_CLOBBER_OVERWRITE .or. stream % clobber_mode == MPAS_STREAM_CLOBBER_APPEND ) then + call mpas_get_time(writeTime, dateTimeString=now_string) + + ! Look for exact record (in the case of overwriting) + ! This also gets the number of records in the file. + tempRecord = MPAS_seekStream(stream % stream, now_string, MPAS_STREAM_EXACT_TIME, actualWhen, maxRecords, local_ierr) + STREAM_DEBUG_WRITE(' -- Seeked record is: ' COMMA tempRecord COMMA ' with current records equal to ' COMMA maxRecords COMMA ' and an error of ' COMMA local_ierr) + + if ( tempRecord /= 0 .and. stream % nRecords < maxRecords ) then + ! If we found an exact result + ! This might result in non-monotonic timestamps in the output file. + swapRecords = .true. + maxRecords = stream % nRecords + stream % nRecords = tempRecord + tempRecord = maxRecords + STREAM_DEBUG_WRITE(' -- Exact time match found for ' COMMA trim(now_string) ) + STREAM_DEBUG_WRITE(' -- Setting record to: ' COMMA stream % nRecords) + else if ( tempRecord == 0 .and. stream % nRecords < maxRecords ) then + ! If we didn't find an exact time, set record to point to the end of the file. + ! This might result in non-monotonic timestamps in the output file. + stream % nRecords = maxRecords + 1 + STREAM_DEBUG_WRITE(' -- No exact time match found for ' COMMA trim(now_string) COMMA ' appending record instead.') + STREAM_DEBUG_WRITE(' -- Setting record to: ' COMMA stream % nRecords) + end if + end if + end if + end if - if (timeLevel /= stream % timeLevel) then + if (timeLevel /= stream % timeLevel) then - call update_stream(stream, manager % allFields, timeLevel, mgLevel, local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then - ierr = MPAS_STREAM_MGR_ERROR - return - end if - stream % timeLevel = timeLevel - end if + call update_stream(stream, manager % allFields, timeLevel, mgLevel, local_ierr) + if (local_ierr /= MPAS_STREAM_NOERR) then + ierr = MPAS_STREAM_MGR_ERROR + return + end if + stream % timeLevel = timeLevel + end if - ! - ! For any connectivity arrays in the stream, temporarily convert local indices to global indices - ! - call prewrite_reindex(manager % allFields, stream % field_pool) + ! + ! For any connectivity arrays in the stream, temporarily convert local indices to global indices + ! unless this stream is using blockWrite + ! + if ( .not. stream % blockWrite ) then + STREAM_DEBUG_WRITE(' -- Prewrite reindex for stream ' COMMA trim(stream % name)) + call prewrite_reindex(manager % allFields, stream % field_pool) + end if - ! - ! Write the stream - ! - STREAM_DEBUG_WRITE(' -- Writing stream ' COMMA trim(stream % name)) - call MPAS_writeStream(stream % stream, stream % nRecords, ierr=local_ierr) + ! + ! Write the stream + ! + STREAM_DEBUG_WRITE(' -- Writing stream ' COMMA trim(stream % name)) + call MPAS_writeStream(stream % stream, stream % nRecords, ierr=local_ierr) - ! - ! Regardless of the error code from MPAS_writeStream, we need to reset global indices in the stream back to local indices - ! - call postwrite_reindex(manager % allFields, stream % field_pool) + ! + ! Regardless of the error code from MPAS_writeStream, we need to reset global indices in the stream back to local indices + ! unless this stream is using blockWrite + ! + if ( .not. stream % blockWrite ) then + STREAM_DEBUG_WRITE(' -- Postwrite reindex for stream ' COMMA trim(stream % name)) + call postwrite_reindex(manager % allFields, stream % field_pool) + end if - if (local_ierr /= MPAS_STREAM_NOERR) then - if (local_ierr == MPAS_STREAM_CLOBBER_RECORD) then - ! - ! We should have only reached this point if clobber_mode = append - ! - write(err_string,'(a,i4,a)') 'Writing to stream '''//trim(stream % name)//''' would overwrite record ', & - stream % nRecords, ' in file '''//trim(stream % filename)//''',' - STREAM_ERROR_WRITE(trim(err_string)) - write(err_string,'(a)') ' but clobber_mode is set to ''append''.' - STREAM_ERROR_WRITE(trim(err_string)) - ierr = MPAS_STREAM_MGR_ERR_CLOBBER_REC - else - ierr = MPAS_STREAM_MGR_ERROR - end if + if (local_ierr /= MPAS_STREAM_NOERR) then + if (local_ierr == MPAS_STREAM_CLOBBER_RECORD) then + ! + ! We should have only reached this point if clobber_mode = append + ! + write(err_string,'(a,i4,a)') 'Writing to stream '''//trim(stream % name)//''' would overwrite record ', & + stream % nRecords, ' in file '''//trim(stream % filename)//''',' + STREAM_ERROR_WRITE(trim(err_string)) + write(err_string,'(a)') ' but clobber_mode is set to ''append''.' + STREAM_ERROR_WRITE(trim(err_string)) + ierr = MPAS_STREAM_MGR_ERR_CLOBBER_REC + else + ierr = MPAS_STREAM_MGR_ERROR + end if - if ( swapRecords ) then - stream % nRecords = tempRecord - end if - return - end if + if ( swapRecords ) then + stream % nRecords = tempRecord + end if + return + end if - if ( swapRecords ) then - stream % nRecords = tempRecord + if ( swapRecords ) then + stream % nRecords = tempRecord + end if end if end subroutine write_stream !}}} @@ -2704,7 +3193,7 @@ end subroutine write_stream !}}} !> \date 13 June 2014 !> \details !> With no optional arguments, reads all streams whose alarms are ringing. - !> The "streamID" argument optionally specifies the ID of a particular stream + !> The "streamID" argument optionally specifies a regular expression of streams !> to be read; if no other optional arguments are given, the specified stream !> is only read if any of its alarms are ringing. !> The "timeLevel" argument optionally specifies, for fields with multiple @@ -2748,7 +3237,10 @@ subroutine MPAS_stream_mgr_read(manager, streamID, timeLevel, mgLevel, rightNow, integer :: local_ierr integer :: temp_ierr type (MPAS_Time_type) :: now_time + integer :: threadNum + logical :: readStreams + threadNum = mpas_threading_get_thread_num() STREAM_DEBUG_WRITE('-- Called MPAS_stream_mgr_read()') @@ -2790,51 +3282,53 @@ subroutine MPAS_stream_mgr_read(manager, streamID, timeLevel, mgLevel, rightNow, end if - ! - ! If a stream is specified, we process just that stream; otherwise, - ! process all streams - ! - if (present(streamID)) then - nullify(stream_cursor) - if (MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=ierr)) then - STREAM_DEBUG_WRITE('-- Handling read of stream '//trim(stream_cursor % name)) - - ! Verify that the stream is an input stream - if (stream_cursor % direction /= MPAS_STREAM_INPUT .and. stream_cursor % direction /= MPAS_STREAM_INPUT_OUTPUT) then - STREAM_ERROR_WRITE('Stream '//trim(streamID)//' is not an input stream.') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if - - call read_stream(manager, stream_cursor, local_timeLevel, local_mgLevel, local_rightNow, local_when, local_whence, & - actualWhen, local_ierr) - else - STREAM_ERROR_WRITE('Stream '//trim(streamID)//' does not exist in call to MPAS_stream_mgr_read().') - if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR - return - end if - else - nullify(stream_cursor) - stream_cursor => manager % streams % head - do while (associated(stream_cursor)) - STREAM_DEBUG_WRITE('-- Handling read of stream '//trim(stream_cursor % name)) - - ! Verify that the stream is an input stream - if (stream_cursor % direction == MPAS_STREAM_INPUT .or. & - stream_cursor % direction /= MPAS_STREAM_INPUT_OUTPUT) then - - ! - ! What should be the meaning of actualWhen if we read multiple streams in this call? - ! - call read_stream(manager, stream_cursor, local_timeLevel, local_mgLevel, local_rightNow, & - local_when, local_whence, actualWhen, temp_ierr) - if (temp_ierr /= MPAS_STREAM_MGR_NOERR) then - local_ierr = MPAS_STREAM_MGR_ERROR - end if - end if - - stream_cursor => stream_cursor % next - end do + if ( threadNum == 0 ) then + ! + ! If a stream is specified, we process just that stream; otherwise, + ! process all streams + ! + if (present(streamID)) then + readStreams = .false. + nullify(stream_cursor) + do while (MPAS_stream_list_query(manager % streams, streamID, stream_cursor, ierr=ierr)) + STREAM_DEBUG_WRITE('-- Handling read of stream '//trim(stream_cursor % name)) + + ! Verify that the stream is an input stream + if (stream_cursor % direction == MPAS_STREAM_INPUT .or. stream_cursor % direction == MPAS_STREAM_INPUT_OUTPUT) then + readStreams = .true. + call read_stream(manager, stream_cursor, local_timeLevel, local_mgLevel, local_rightNow, local_when, & + local_whence, actualWhen, local_ierr) + end if + end do + + if ( .not. readStreams ) then + STREAM_ERROR_WRITE('No input stream matching '//trim(streamID)//' exists in call to MPAS_stream_mgr_read().') + if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR + return + end if + else + nullify(stream_cursor) + stream_cursor => manager % streams % head + do while (associated(stream_cursor)) + STREAM_DEBUG_WRITE('-- Handling read of stream '//trim(stream_cursor % name)) + + ! Verify that the stream is an input stream + if (stream_cursor % direction == MPAS_STREAM_INPUT .or. & + stream_cursor % direction == MPAS_STREAM_INPUT_OUTPUT) then + + ! + ! What should be the meaning of actualWhen if we read multiple streams in this call? + ! + call read_stream(manager, stream_cursor, local_timeLevel, local_mgLevel, local_rightNow, & + local_when, local_whence, actualWhen, temp_ierr) + if (temp_ierr /= MPAS_STREAM_MGR_NOERR) then + local_ierr = MPAS_STREAM_MGR_ERROR + end if + end if + + stream_cursor => stream_cursor % next + end do + end if end if if (present(ierr)) ierr = local_ierr @@ -2866,6 +3360,7 @@ subroutine read_stream(manager, stream, timeLevel, mgLevel, forceReadNow, when, character (len=*), intent(out), optional :: actualWhen integer, intent(out) :: ierr + integer :: blockID_local character (len=StrKIND) :: err_string type (MPAS_stream_list_type), pointer :: alarm_cursor @@ -2889,359 +3384,370 @@ subroutine read_stream(manager, stream, timeLevel, mgLevel, forceReadNow, when, character (len=StrKIND) :: test_actualWhen integer :: test_record, test_maxRecords logical :: retestFile, rebuildStream + integer :: threadNum + threadNum = mpas_threading_get_thread_num() - ierr = MPAS_STREAM_MGR_NOERR - rebuildStream = .false. - - ! - ! Check whether this stream is active - ! - if (.not. stream % active_stream) then - STREAM_DEBUG_WRITE('-- Stream '//trim(stream % name)//' is not currently active and will not be read.') - return - end if - - ! - ! Check whether all packages for this stream are inactive - ! Note: if the stream has no packages, it is assumed to be active - ! - if (.not. stream_active_pkg_check(stream)) then - STREAM_DEBUG_WRITE('-- Stream '//trim(stream % name)//' has only inactive packages and will not be read.') - return - end if - - ! - ! Check whether any of the input alarms for the stream are ringing - ! - ringing_alarm = .false. - alarm_cursor => stream % alarmList_in % head - do while (associated(alarm_cursor)) - if (mpas_is_alarm_ringing(manager % streamClock, alarm_cursor % name, ierr=local_ierr)) then - ringing_alarm = .true. - exit - end if - alarm_cursor => alarm_cursor % next - end do - - if ((.not. ringing_alarm) .and. (.not. forceReadNow)) then - return - end if - - ! - ! First we need to build the filename for the current read time. - ! - if ( stream % filename_interval /= 'none' ) then - call mpas_set_time(now_time, dateTimeString=when, ierr=local_ierr) - call mpas_set_timeInterval(filename_interval, timeString=stream % filename_interval) - - call mpas_build_stream_filename(stream % referenceTime, now_time, filename_interval, stream % filename_template, temp_filename, ierr=local_ierr) - else - call mpas_expand_string(when, stream % filename_template, temp_filename) - end if + ! For now, reading only supports global reads. + ! In the future, if reading is supposed to support local reads as well, + ! blockID can become an input argument and then blockID_local can be + ! set to blockID + blockID_local = -1 - STREAM_DEBUG_WRITE(' -- Stream filename is: ' COMMA trim(temp_filename) ) + ierr = MPAS_STREAM_MGR_NOERR - ! - ! If the stream is not valid, assume that we have not yet written this - ! stream, in which case we create the stream from scratch - ! - if (.not. stream % valid) then - stream % filename = temp_filename + if ( threadNum == 0 ) then + rebuildStream = .false. - ! - ! Build stream from pools of fields and attributes - ! - allocate(stream % stream) - call MPAS_createStream(stream % stream, stream % filename, stream % io_type, MPAS_IO_READ, & - precision=stream % precision, ierr=local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then - if (local_ierr == MPAS_IO_ERR_NOEXIST_READ) then - write(err_string,'(a)') 'Stream '''//trim(stream % name)//''' attempted to read non-existent file '''//trim(stream % filename)//'''' - STREAM_ERROR_WRITE(trim(err_string)) - ierr = MPAS_STREAM_MGR_ERROR - else - ierr = MPAS_STREAM_MGR_ERROR - end if - return - end if + ! + ! Check whether this stream is active + ! + if (.not. stream % active_stream) then + STREAM_DEBUG_WRITE('-- Stream '//trim(stream % name)//' is not currently active and will not be read.') + return + end if - call build_stream(stream, MPAS_STREAM_INPUT, manager % allFields, manager % allPackages, timeLevel, mgLevel, local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then - ierr = MPAS_STREAM_MGR_ERROR - return - end if - stream % timeLevel = timeLevel + ! + ! Check whether all packages for this stream are inactive + ! Note: if the stream has no packages, it is assumed to be active + ! + if (.not. stream_active_pkg_check(stream)) then + STREAM_DEBUG_WRITE('-- Stream '//trim(stream % name)//' has only inactive packages and will not be read.') + return + end if - stream % valid = .true. - else if (temp_filename /= stream % filename) then - STREAM_DEBUG_WRITE('-- Changing filename from '//trim(stream % filename)//' to '//trim(temp_filename)) + ! + ! Check whether any of the input alarms for the stream are ringing + ! + ringing_alarm = .false. + alarm_cursor => stream % alarmList_in % head + do while (associated(alarm_cursor)) + if (mpas_is_alarm_ringing(manager % streamClock, alarm_cursor % name, ierr=local_ierr)) then + ringing_alarm = .true. + exit + end if + alarm_cursor => alarm_cursor % next + end do - stream % filename = temp_filename + if ((.not. ringing_alarm) .and. (.not. forceReadNow)) then + return + end if ! - ! Close existing stream + ! First we need to build the filename for the current read time. ! - call MPAS_closeStream(stream % stream, ierr=local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then - ierr = MPAS_STREAM_MGR_ERROR - return + if ( stream % filename_interval /= 'none' ) then + call mpas_set_time(now_time, dateTimeString=when, ierr=local_ierr) + call mpas_set_timeInterval(filename_interval, timeString=stream % filename_interval) + + call mpas_build_stream_filename(stream % referenceTime, now_time, filename_interval, stream % filename_template, blockID_local, temp_filename, ierr=local_ierr) + else + call mpas_expand_string(when, blockID_local, stream % filename_template, temp_filename) end if + STREAM_DEBUG_WRITE(' -- Stream filename is: ' COMMA trim(temp_filename) ) + ! - ! Build new stream from pools of fields and attributes + ! If the stream is not valid, assume that we have not yet written this + ! stream, in which case we create the stream from scratch ! - call MPAS_createStream(stream % stream, stream % filename, stream % io_type, MPAS_IO_READ, precision=stream % precision, ierr=local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then - if (local_ierr == MPAS_IO_ERR_NOEXIST_READ) then - write(err_string,'(a)') 'Stream '''//trim(stream % name)//''' attempted to read non-existent file '''//trim(stream % filename)//'''' - STREAM_ERROR_WRITE(trim(err_string)) - ierr = MPAS_STREAM_MGR_ERROR - else + if (.not. stream % valid) then + stream % filename = temp_filename + + ! + ! Build stream from pools of fields and attributes + ! + allocate(stream % stream) + call MPAS_createStream(stream % stream, manager % ioContext, stream % filename, stream % io_type, MPAS_IO_READ, & + precision=stream % precision, ierr=local_ierr) + if (local_ierr /= MPAS_STREAM_NOERR) then + if (local_ierr == MPAS_IO_ERR_NOEXIST_READ) then + write(err_string,'(a)') 'Stream '''//trim(stream % name)//''' attempted to read non-existent file '''//trim(stream % filename)//'''' + STREAM_ERROR_WRITE(trim(err_string)) + ierr = MPAS_STREAM_MGR_ERROR + else + ierr = MPAS_STREAM_MGR_ERROR + end if + return + end if + + call build_stream(stream, MPAS_STREAM_INPUT, manager % allFields, manager % allPackages, timeLevel, mgLevel, local_ierr) + if (local_ierr /= MPAS_STREAM_NOERR) then ierr = MPAS_STREAM_MGR_ERROR + return end if - return - end if + stream % timeLevel = timeLevel - call build_stream(stream, MPAS_STREAM_INPUT, manager % allFields, manager % allPackages, timeLevel, mgLevel, local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then - ierr = MPAS_STREAM_MGR_ERROR - return - end if - stream % timeLevel = timeLevel - end if + stream % valid = .true. + else if (temp_filename /= stream % filename) then + STREAM_DEBUG_WRITE('-- Changing filename from '//trim(stream % filename)//' to '//trim(temp_filename)) - STREAM_DEBUG_WRITE(' Seeking time of ' COMMA trim(when)) + stream % filename = temp_filename - ! - ! With multiple times per file, we need to get the record number from MPAS_seekStream. - ! - stream % nRecords = MPAS_seekStream(stream % stream, when, whence, temp_actualWhen, maxRecords=temp_maxRecords, ierr=local_ierr) - - if ( stream % nRecords == 0 .and. temp_maxRecords == 0 ) then - stream % nRecords = 1 - STREAM_WARNING_WRITE('File ' COMMA trim(stream % filename) COMMA ' does not contain a seekable xtime variable. Forcing a read of the first time record.') - else if (stream % nRecords /= 0) then - STREAM_DEBUG_WRITE(' Seeked record is: ' COMMA stream % nRecords COMMA ' out of ' COMMA temp_maxRecords COMMA ' with a time stamp of ' COMMA trim(temp_actualWhen) COMMA ' filename was ' COMMA trim(stream % filename)) - else if (temp_maxRecords /= 0 .and. whence == MPAS_STREAM_EXACT_TIME) then - STREAM_ERROR_WRITE('File ' COMMA trim(stream % filename) COMMA ' does not contain the time ' COMMA trim(when)) - ierr = MPAS_STREAM_MGR_ERROR - return - end if + ! + ! Close existing stream + ! + call MPAS_closeStream(stream % stream, ierr=local_ierr) + if (local_ierr /= MPAS_STREAM_NOERR) then + ierr = MPAS_STREAM_MGR_ERROR + return + end if + + ! + ! Build new stream from pools of fields and attributes + ! + call MPAS_createStream(stream % stream, manager % ioContext, stream % filename, stream % io_type, MPAS_IO_READ, precision=stream % precision, ierr=local_ierr) + if (local_ierr /= MPAS_STREAM_NOERR) then + if (local_ierr == MPAS_IO_ERR_NOEXIST_READ) then + write(err_string,'(a)') 'Stream '''//trim(stream % name)//''' attempted to read non-existent file '''//trim(stream % filename)//'''' + STREAM_ERROR_WRITE(trim(err_string)) + ierr = MPAS_STREAM_MGR_ERROR + else + ierr = MPAS_STREAM_MGR_ERROR + end if + return + end if - retestFile = .false. - if ( trim(stream % filename_interval) /= 'none' .and. whence /= MPAS_STREAM_EXACT_TIME ) then - currentTime = mpas_get_clock_time(manager % streamClock, MPAS_NOW, ierr=local_ierr) - call mpas_set_timeInterval(filenameInterval, timeString=stream % filename_interval, ierr=local_ierr) + call build_stream(stream, MPAS_STREAM_INPUT, manager % allFields, manager % allPackages, timeLevel, mgLevel, local_ierr) + if (local_ierr /= MPAS_STREAM_NOERR) then + ierr = MPAS_STREAM_MGR_ERROR + return + end if + stream % timeLevel = timeLevel + end if + + STREAM_DEBUG_WRITE(' Seeking time of ' COMMA trim(when)) - ! Need to handle the case where the time requested was not found. ! - ! Things that need to be handled here are when we're at the beginning - ! or end of a file and we're looking for the next or previous time - ! record. + ! With multiple times per file, we need to get the record number from MPAS_seekStream. ! - ! Currently this only checks one file each direction (forward or - ! backward). It will fail finding a file more than one interval away - ! from when. - if ( stream % nRecords == 0) then - if ( ( whence == MPAS_STREAM_LATEST_BEFORE .and. temp_actualWhen /= when ) .or. whence == MPAS_STREAM_LATEST_STRICTLY_BEFORE ) then - ! Subtract filename_interval from when, build new filename, and - ! check for a time latest before in that file. - filenameTime = currentTime - filenameInterval - retestFile = .true. - STREAM_DEBUG_WRITE(' Retest latest before...') - else if ( ( whence == MPAS_STREAM_EARLIEST_AFTER .and. temp_actualWhen /= when ).or. whence == MPAS_STREAM_EARLIEST_STRICTLY_AFTER ) then - ! Add filename_interval from when, build new filename, and - ! check for a time latest before in that file. - filenameTime = currentTime + filenameInterval - retestFile = .true. - STREAM_DEBUG_WRITE(' Retest earliest after...') - end if - else - ! If time was found, and we were looking for nearest need to make sure nearest isn't in previous or next file. - ! - ! This only needs to be checked if we found the first or last time slice in the file. - if ( whence == MPAS_STREAM_NEAREST ) then - if ( stream % nRecords == 1 .and. stream % nRecords == temp_maxRecords ) then - call mpas_set_time(temp_time, dateTimeString=temp_actualWhen) + stream % nRecords = MPAS_seekStream(stream % stream, when, whence, temp_actualWhen, maxRecords=temp_maxRecords, ierr=local_ierr) + + if ( stream % nRecords == 0 .and. temp_maxRecords == 0 ) then + stream % nRecords = 1 + STREAM_WARNING_WRITE('File ' COMMA trim(stream % filename) COMMA ' does not contain a seekable xtime variable. Forcing a read of the first time record.') + else if (stream % nRecords /= 0) then + STREAM_DEBUG_WRITE(' Seeked record is: ' COMMA stream % nRecords COMMA ' out of ' COMMA temp_maxRecords COMMA ' with a time stamp of ' COMMA trim(temp_actualWhen) COMMA ' filename was ' COMMA trim(stream % filename)) + else if (temp_maxRecords /= 0 .and. whence == MPAS_STREAM_EXACT_TIME) then + STREAM_ERROR_WRITE('File ' COMMA trim(stream % filename) COMMA ' does not contain the time ' COMMA trim(when)) + ierr = MPAS_STREAM_MGR_ERROR + return + end if - ! If an exact time was found, read that one, and don't bother re-testing. - if ( currentTime == temp_time ) then - retestFile = .false. + retestFile = .false. + if ( trim(stream % filename_interval) /= 'none' .and. whence /= MPAS_STREAM_EXACT_TIME ) then + currentTime = mpas_get_clock_time(manager % streamClock, MPAS_NOW, ierr=local_ierr) + call mpas_set_timeInterval(filenameInterval, timeString=stream % filename_interval, ierr=local_ierr) - ! If current time is before the time that was read, re-test using the previous file - else if ( currentTime < temp_time ) then + ! Need to handle the case where the time requested was not found. + ! + ! Things that need to be handled here are when we're at the beginning + ! or end of a file and we're looking for the next or previous time + ! record. + ! + ! Currently this only checks one file each direction (forward or + ! backward). It will fail finding a file more than one interval away + ! from when. + if ( stream % nRecords == 0) then + if ( ( whence == MPAS_STREAM_LATEST_BEFORE .and. temp_actualWhen /= when ) .or. whence == MPAS_STREAM_LATEST_STRICTLY_BEFORE ) then + ! Subtract filename_interval from when, build new filename, and + ! check for a time latest before in that file. + filenameTime = currentTime - filenameInterval + retestFile = .true. + STREAM_DEBUG_WRITE(' Retest latest before...') + else if ( ( whence == MPAS_STREAM_EARLIEST_AFTER .and. temp_actualWhen /= when ).or. whence == MPAS_STREAM_EARLIEST_STRICTLY_AFTER ) then + ! Add filename_interval from when, build new filename, and + ! check for a time latest before in that file. + filenameTime = currentTime + filenameInterval + retestFile = .true. + STREAM_DEBUG_WRITE(' Retest earliest after...') + end if + else + ! If time was found, and we were looking for nearest need to make sure nearest isn't in previous or next file. + ! + ! This only needs to be checked if we found the first or last time slice in the file. + if ( whence == MPAS_STREAM_NEAREST ) then + if ( stream % nRecords == 1 .and. stream % nRecords == temp_maxRecords ) then + call mpas_set_time(temp_time, dateTimeString=temp_actualWhen) + + ! If an exact time was found, read that one, and don't bother re-testing. + if ( currentTime == temp_time ) then + retestFile = .false. + + ! If current time is before the time that was read, re-test using the previous file + else if ( currentTime < temp_time ) then + filenameTime = currentTime - filenameInterval + retestFile = .true. + STREAM_DEBUG_WRITE('Retest nearest prev file') + + ! If current time is before the time that was read, re-test using the next file + else if ( currentTime > temp_time ) then + filenameTime = currentTime + filenameInterval + retestFile = .true. + STREAM_DEBUG_WRITE('Retest nearest next file') + end if + else if ( stream % nRecords == 1 ) then + ! Subtract filename_interval from when, build new filename, and check for nearest time in that file. + ! Compare the two, and keep the one closest to when. filenameTime = currentTime - filenameInterval retestFile = .true. - STREAM_DEBUG_WRITE('Retest nearest prev file') - - ! If current time is before the time that was read, re-test using the next file - else if ( currentTime > temp_time ) then + STREAM_DEBUG_WRITE('Retest nearest beginning') + else if ( stream % nRecords == temp_maxRecords ) then + ! Add filename_interval from when, build new filename, and check for nearest time in that file. + ! Compare the two, and keep the one closest to when. filenameTime = currentTime + filenameInterval retestFile = .true. - STREAM_DEBUG_WRITE('Retest nearest next file') - end if - else if ( stream % nRecords == 1 ) then - ! Subtract filename_interval from when, build new filename, and check for nearest time in that file. - ! Compare the two, and keep the one closest to when. - filenameTime = currentTime - filenameInterval - retestFile = .true. - STREAM_DEBUG_WRITE('Retest nearest beginning') - else if ( stream % nRecords == temp_maxRecords ) then - ! Add filename_interval from when, build new filename, and check for nearest time in that file. - ! Compare the two, and keep the one closest to when. - filenameTime = currentTime + filenameInterval - retestFile = .true. - STREAM_DEBUG_WRITE('Retest nearest end') + STREAM_DEBUG_WRITE('Retest nearest end') + end if end if end if end if - end if - if ( retestFile ) then - STREAM_DEBUG_WRITE(' --- Retesting file... ') - call mpas_get_time(filenameTime, dateTimeString=test_when) + if ( retestFile ) then + STREAM_DEBUG_WRITE(' --- Retesting file... ') + call mpas_get_time(filenameTime, dateTimeString=test_when) - call mpas_set_timeInterval(filename_interval, timeString=stream % filename_interval) - - call mpas_build_stream_filename(stream % referenceTime, filenameTime, filename_interval, stream % filename_template, test_filename, ierr=local_ierr) + call mpas_set_timeInterval(filename_interval, timeString=stream % filename_interval) + + call mpas_build_stream_filename(stream % referenceTime, filenameTime, filename_interval, stream % filename_template, blockID_local, test_filename, ierr=local_ierr) - STREAM_DEBUG_WRITE(' --- Retesting filename is ' COMMA trim(test_filename)) + STREAM_DEBUG_WRITE(' --- Retesting filename is ' COMMA trim(test_filename)) - inquire(file=trim(test_filename), exist=retestFile) + inquire(file=trim(test_filename), exist=retestFile) - ! If file exists, the testing stream needs to be built. - if ( retestFile ) then - call mpas_createStream(testStream, test_filename, stream % io_type, MPAS_IO_READ, precision=stream % precision, ierr=local_ierr) - else - STREAM_DEBUG_WRITE(' Filename: ' COMMA trim(test_filename) COMMA ' does not exist.') + ! If file exists, the testing stream needs to be built. + if ( retestFile ) then + call mpas_createStream(testStream, manager % ioContext, test_filename, stream % io_type, MPAS_IO_READ, precision=stream % precision, ierr=local_ierr) + else + STREAM_DEBUG_WRITE(' Filename: ' COMMA trim(test_filename) COMMA ' does not exist.') + end if end if - end if - ! Only continue testing file it if was found. - if ( retestFile ) then - test_record = MPAS_seekStream(testStream, when, whence, test_actualWhen, maxRecords=test_maxRecords, ierr=local_ierr) + ! Only continue testing file it if was found. + if ( retestFile ) then + test_record = MPAS_seekStream(testStream, when, whence, test_actualWhen, maxRecords=test_maxRecords, ierr=local_ierr) - STREAM_DEBUG_WRITE(' -- Test record is ' COMMA test_record COMMA ' out of ' COMMA test_maxRecords COMMA ' with a time of ' COMMA trim(test_actualWhen)) + STREAM_DEBUG_WRITE(' -- Test record is ' COMMA test_record COMMA ' out of ' COMMA test_maxRecords COMMA ' with a time of ' COMMA trim(test_actualWhen)) - if ( test_record /= 0 ) then - if ( whence == MPAS_STREAM_NEAREST ) then - call mpas_set_time(whenTime, dateTimeString=when) - call mpas_set_time(firstTime, dateTimeString=temp_actualWhen) - call mpas_set_time(secondTime, dateTimeString=test_actualWhen) + if ( test_record /= 0 ) then + if ( whence == MPAS_STREAM_NEAREST ) then + call mpas_set_time(whenTime, dateTimeString=when) + call mpas_set_time(firstTime, dateTimeString=temp_actualWhen) + call mpas_set_time(secondTime, dateTimeString=test_actualWhen) - ! Build first diff - if ( firstTime > whenTime ) then - firstDiff = firstTime - whenTime - else - firstDiff = whenTime - firstTime - end if + ! Build first diff + if ( firstTime > whenTime ) then + firstDiff = firstTime - whenTime + else + firstDiff = whenTime - firstTime + end if - ! Build second diff - if ( secondTime > whenTime ) then - secondDiff = secondTime - whenTime - else - secondDiff = whenTime - secondTime - end if + ! Build second diff + if ( secondTime > whenTime ) then + secondDiff = secondTime - whenTime + else + secondDiff = whenTime - secondTime + end if - ! Compare first and second diff, keeping the closest one to when. - ! Only need to rebuild stream if the second* ones are closer. - if ( secondDiff == firstDiff ) then + ! Compare first and second diff, keeping the closest one to when. + ! Only need to rebuild stream if the second* ones are closer. + if ( secondDiff == firstDiff ) then - ! If times are equidistance, take the later of the two. - if ( firstTime > secondTime ) then - rebuildStream = .false. - else + ! If times are equidistance, take the later of the two. + if ( firstTime > secondTime ) then + rebuildStream = .false. + else + rebuildStream = .true. + end if + else if ( secondDiff < firstDiff ) then rebuildStream = .true. + STREAM_DEBUG_WRITE(' --- New time is closer than old time') + else + STREAM_DEBUG_WRITE(' --- Old time is closer than test time') end if - else if ( secondDiff < firstDiff ) then + else if ( stream % nRecords == 0 ) then rebuildStream = .true. - STREAM_DEBUG_WRITE(' --- New time is closer than old time') - else - STREAM_DEBUG_WRITE(' --- Old time is closer than test time') - end if - else if ( stream % nRecords == 0 ) then - rebuildStream = .true. + end if + else + rebuildStream = .false. end if - else - rebuildStream = .false. + call MPAS_closeStream(testStream, ierr=local_ierr) end if - call MPAS_closeStream(testStream, ierr=local_ierr) - end if - ! Rebuild stream if we need to, because a different file has a closer time. - if ( rebuildStream ) then - STREAM_DEBUG_WRITE(' --- rebuilding stream...') - stream % filename = test_filename + ! Rebuild stream if we need to, because a different file has a closer time. + if ( rebuildStream ) then + STREAM_DEBUG_WRITE(' --- rebuilding stream...') + stream % filename = test_filename - ! - ! Close existing stream - ! - call MPAS_closeStream(stream % stream, ierr=local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then - ierr = MPAS_STREAM_MGR_ERROR - return + ! + ! Close existing stream + ! + call MPAS_closeStream(stream % stream, ierr=local_ierr) + if (local_ierr /= MPAS_STREAM_NOERR) then + ierr = MPAS_STREAM_MGR_ERROR + return + end if + + ! + ! Build new stream from pools of fields and attributes + ! + call MPAS_createStream(stream % stream, manager % ioContext, stream % filename, stream % io_type, MPAS_IO_READ, precision=stream % precision, ierr=local_ierr) + if (local_ierr /= MPAS_STREAM_NOERR) then + ierr = MPAS_STREAM_MGR_ERROR + return + end if + + call build_stream(stream, MPAS_STREAM_INPUT, manager % allFields, manager % allPackages, timeLevel, mgLevel, local_ierr) + if (local_ierr /= MPAS_STREAM_NOERR) then + ierr = MPAS_STREAM_MGR_ERROR + return + end if + stream % timeLevel = timeLevel + + ! Set record number based on test_record from the read we just did. + stream % nRecords = test_record end if - ! - ! Build new stream from pools of fields and attributes - ! - call MPAS_createStream(stream % stream, stream % filename, stream % io_type, MPAS_IO_READ, precision=stream % precision, ierr=local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then - ierr = MPAS_STREAM_MGR_ERROR - return + if (timeLevel /= stream % timeLevel) then + + call update_stream(stream, manager % allFields, timeLevel, mgLevel, local_ierr) + if (local_ierr /= MPAS_STREAM_NOERR) then + ierr = MPAS_STREAM_MGR_ERROR + return + end if + stream % timeLevel = timeLevel end if - call build_stream(stream, MPAS_STREAM_INPUT, manager % allFields, manager % allPackages, timeLevel, mgLevel, local_ierr) + ! + ! Read the stream + ! + call MPAS_readStream(stream % stream, stream % nRecords, ierr=local_ierr) if (local_ierr /= MPAS_STREAM_NOERR) then ierr = MPAS_STREAM_MGR_ERROR return end if - stream % timeLevel = timeLevel - - ! Set record number based on test_record from the read we just did. - stream % nRecords = test_record - end if - - if (timeLevel /= stream % timeLevel) then - - call update_stream(stream, manager % allFields, timeLevel, mgLevel, local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then - ierr = MPAS_STREAM_MGR_ERROR - return - end if - stream % timeLevel = timeLevel - end if - - ! - ! Read the stream - ! - call MPAS_readStream(stream % stream, stream % nRecords, ierr=local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then - ierr = MPAS_STREAM_MGR_ERROR - return - end if - if (present(actualWhen)) then - call MPAS_streamTime(stream % stream, stream % nRecords, actualWhen, ierr=local_ierr) - if (local_ierr /= MPAS_STREAM_NOERR) then + if (present(actualWhen)) then + call MPAS_streamTime(stream % stream, stream % nRecords, actualWhen, ierr=local_ierr) + if (local_ierr /= MPAS_STREAM_NOERR) then ! ! TODO: Add debug prints for all error conditions ! - ierr = MPAS_STREAM_MGR_ERROR - return - end if - end if + ierr = MPAS_STREAM_MGR_ERROR + return + end if + end if - ! - ! Exchange halos for all decomposed fields in this stream - ! - call exch_all_halos(manager % allFields, stream % field_pool, stream % timeLevel, local_ierr) + ! + ! Exchange halos for all decomposed fields in this stream + ! + call exch_all_halos(manager % allFields, stream % field_pool, stream % timeLevel, local_ierr) - ! - ! For any connectivity arrays in this stream, convert global indices to local indices - ! - call postread_reindex(manager % allFields, stream % field_pool) + ! + ! For any connectivity arrays in this stream, convert global indices to local indices + ! + call postread_reindex(manager % allFields, stream % field_pool) + end if end subroutine read_stream !}}} @@ -3287,20 +3793,23 @@ end subroutine MPAS_stream_mesg!}}} !> !> Return error codes: !> 0 no error + !> + !> NOTE: This routine does not support streamID regular expressions !----------------------------------------------------------------------- - subroutine mpas_get_stream_filename(manager, streamID, when, filename, ierr) !{{{ + subroutine mpas_get_stream_filename(manager, streamID, when, blockID, filename, ierr) !{{{ implicit none type (MPAS_streamManager_type), intent(in) :: manager !< Input: Stream manager to get stream from character (len=*), intent(in) :: streamID !< Input: Stream name to use for building the filename character (len=*), intent(in), optional :: when !< Optional Input: Time file should contain + integer, intent(in), optional :: blockID !< Input: ID of the block that will be written character (len=StrKIND), intent(out) :: filename !< Output: Name of file containing time integer, intent(out), optional :: ierr !< Optional Output: Error code type (mpas_stream_list_type), pointer :: streamCursor - integer :: err_local + integer :: err_local, blockID_local character(len=StrKIND) :: when_string type (MPAS_TimeInterval_type) :: filename_interval @@ -3308,6 +3817,13 @@ subroutine mpas_get_stream_filename(manager, streamID, when, filename, ierr) !{{ ierr = 0 + if ( present(blockID) ) then + blockID_local = blockID + else + blockID_local = -1 + end if + + nullify(streamCursor) if ( mpas_stream_list_query(manager % streams, streamID, streamCursor, err_local) ) then if ( present(when) ) then call mpas_set_time(now_time, dateTimeString=when, ierr=err_local) @@ -3320,10 +3836,10 @@ subroutine mpas_get_stream_filename(manager, streamID, when, filename, ierr) !{{ ! if ( streamCursor % filename_interval /= 'none' ) then call mpas_set_timeInterval(filename_interval, timeString=streamCursor % filename_interval) - call mpas_build_stream_filename(streamCursor % referenceTime, now_time, filename_interval, streamCursor % filename_template, filename, ierr=err_local) + call mpas_build_stream_filename(streamCursor % referenceTime, now_time, filename_interval, streamCursor % filename_template, blockID_local, filename, ierr=err_local) else call mpas_get_time(now_time, dateTimeString=when_string, ierr=err_local) - call mpas_expand_string(when_string, streamCursor % filename_template, filename) + call mpas_expand_string(when_string, blockID_local, streamCursor % filename_template, filename) end if else @@ -3357,7 +3873,7 @@ end subroutine mpas_get_stream_filename !}}} !> Return error codes: !> 0 no error !----------------------------------------------------------------------- - subroutine mpas_build_stream_filename(ref_time, when, filename_interval, filename_template, filename, ierr) !{{{ + subroutine mpas_build_stream_filename(ref_time, when, filename_interval, filename_template, blockID, filename, ierr) !{{{ implicit none @@ -3365,6 +3881,7 @@ subroutine mpas_build_stream_filename(ref_time, when, filename_interval, filenam type (MPAS_Time_type), intent(in) :: when type (MPAS_TimeInterval_type), intent(in) :: filename_interval character(len=*), intent(in) :: filename_template + integer, intent(in) :: blockID character(len=*), intent(out) :: filename integer, intent(out) :: ierr @@ -3418,7 +3935,7 @@ subroutine mpas_build_stream_filename(ref_time, when, filename_interval, filenam call mpas_get_time(filetime, dateTimeString=when_string) STREAM_DEBUG_WRITE(' ** filetime start is: ' COMMA trim(when_string)) - call mpas_expand_string(when_string, filename_template, filename) + call mpas_expand_string(when_string, blockID, filename_template, filename) end subroutine mpas_build_stream_filename !}}} @@ -3489,19 +4006,19 @@ subroutine build_stream(stream, direction, allFields, allPackages, timeLevelIn, if ( itr % memberType == MPAS_POOL_CONFIG) then if ( itr % dataType == MPAS_POOL_REAL ) then call mpas_pool_get_config(stream % att_pool, itr % memberName, realAtt) - call mpas_writeStreamAtt(stream % stream, itr % memberName, realAtt, local_ierr) + call mpas_writeStreamAtt(stream % stream, itr % memberName, realAtt, syncVal=.false., ierr=local_ierr) else if ( itr % dataType == MPAS_POOL_INTEGER ) then call mpas_pool_get_config(stream % att_pool, itr % memberName, intAtt) - call mpas_writeStreamAtt(stream % stream, itr % memberName, intAtt, local_ierr) + call mpas_writeStreamAtt(stream % stream, itr % memberName, intAtt, syncVal=.false., ierr=local_ierr) else if ( itr % dataType == MPAS_POOL_CHARACTER ) then call mpas_pool_get_config(stream % att_pool, itr % memberName, charAtt) - call mpas_writeStreamAtt(stream % stream, itr % memberName, charAtt, local_ierr) + call mpas_writeStreamAtt(stream % stream, itr % memberName, charAtt, syncVal=.false., ierr=local_ierr) else if ( itr % dataType == MPAS_POOL_LOGICAL ) then call mpas_pool_get_config(stream % att_pool, itr % memberName, logAtt) if (logAtt) then - call mpas_writeStreamAtt(stream % stream, itr % memberName, 'YES', local_ierr) + call mpas_writeStreamAtt(stream % stream, itr % memberName, 'YES', syncVal=.false., ierr=local_ierr) else - call mpas_writeStreamAtt(stream % stream, itr % memberName, 'NO', local_ierr) + call mpas_writeStreamAtt(stream % stream, itr % memberName, 'NO', syncVal=.false., ierr=local_ierr) end if end if @@ -3516,7 +4033,7 @@ subroutine build_stream(stream, direction, allFields, allPackages, timeLevelIn, ! Generate file_id and write to stream ! call gen_random(idLength, file_id) - call mpas_writeStreamAtt(stream % stream, 'file_id', file_id, local_ierr) + call mpas_writeStreamAtt(stream % stream, 'file_id', file_id, syncVal=.true., ierr=local_ierr) if (local_ierr /= MPAS_STREAM_NOERR) then ierr = MPAS_STREAM_MGR_ERROR return @@ -3604,9 +4121,8 @@ subroutine build_stream(stream, direction, allFields, allPackages, timeLevelIn, call mpas_pool_get_field(allFields, itr % memberName, char0d, timeLevel) call MPAS_streamAddField(stream % stream, char0d) case (1) -! call mpas_pool_get_field(allFields, itr % memberName, char1d, timeLevel) -! call MPAS_streamAddField(stream % stream, char1d) - write(stderrUnit,*) 'Error: In build_stream, unsupported type field1DChar.' + call mpas_pool_get_field(allFields, itr % memberName, char1d, timeLevel) + call MPAS_streamAddField(stream % stream, char1d) end select end select @@ -3927,7 +4443,7 @@ subroutine exch_all_halos(allFields, streamFields, timeLevel, ierr) !{{{ if ( is_decomposed_dim(real1DField % dimNames(1))) then STREAM_DEBUG_WRITE(' -- Exchange halo for '//trim(fieldItr % memberName)) - call mpas_dmpar_exch_halo_field(real1DField) + call mpas_dmpar_field_halo_exch(real1DField % block % domain, fieldItr % memberName, timeLevel=timeLevel) end if else if ( fieldInfo % fieldType == MPAS_POOL_INTEGER ) then if ( timeLevel <= fieldInfo % nTimeLevels ) then @@ -3937,7 +4453,7 @@ subroutine exch_all_halos(allFields, streamFields, timeLevel, ierr) !{{{ end if if ( is_decomposed_dim(int1DField % dimNames(1))) then STREAM_DEBUG_WRITE(' -- Exchange halo for '//trim(fieldItr % memberName)) - call mpas_dmpar_exch_halo_field(int1DField) + call mpas_dmpar_field_halo_exch(int1DField % block % domain, fieldItr % memberName, timeLevel=timeLevel) end if end if @@ -3950,7 +4466,7 @@ subroutine exch_all_halos(allFields, streamFields, timeLevel, ierr) !{{{ end if if ( is_decomposed_dim(real2DField % dimNames(2))) then STREAM_DEBUG_WRITE(' -- Exchange halo for '//trim(fieldItr % memberName)) - call mpas_dmpar_exch_halo_field(real2DField) + call mpas_dmpar_field_halo_exch(real2DField % block % domain, fieldItr % memberName, timeLevel=timeLevel) end if else if ( fieldInfo % fieldType == MPAS_POOL_INTEGER ) then if ( timeLevel <= fieldInfo % nTimeLevels ) then @@ -3960,7 +4476,7 @@ subroutine exch_all_halos(allFields, streamFields, timeLevel, ierr) !{{{ end if if ( is_decomposed_dim(int2DField % dimNames(2))) then STREAM_DEBUG_WRITE(' -- Exchange halo for '//trim(fieldItr % memberName)) - call mpas_dmpar_exch_halo_field(int2DField) + call mpas_dmpar_field_halo_exch(int2DField % block % domain, fieldItr % memberName, timeLevel=timeLevel) end if end if @@ -3973,7 +4489,7 @@ subroutine exch_all_halos(allFields, streamFields, timeLevel, ierr) !{{{ end if if ( is_decomposed_dim(real3DField % dimNames(3))) then STREAM_DEBUG_WRITE(' -- Exchange halo for '//trim(fieldItr % memberName)) - call mpas_dmpar_exch_halo_field(real3DField) + call mpas_dmpar_field_halo_exch(real3DField % block % domain, fieldItr % memberName, timeLevel=timeLevel) end if else if ( fieldInfo % fieldType == MPAS_POOL_INTEGER ) then if ( timeLevel <= fieldInfo % nTimeLevels ) then @@ -3983,7 +4499,7 @@ subroutine exch_all_halos(allFields, streamFields, timeLevel, ierr) !{{{ end if if ( is_decomposed_dim(int3DField % dimNames(3))) then STREAM_DEBUG_WRITE(' -- Exchange halo for '//trim(fieldItr % memberName)) - call mpas_dmpar_exch_halo_field(int3DField) + call mpas_dmpar_field_halo_exch(int3DField % block % domain, fieldItr % memberName, timeLevel=timeLevel) end if end if @@ -3996,7 +4512,7 @@ subroutine exch_all_halos(allFields, streamFields, timeLevel, ierr) !{{{ end if if ( is_decomposed_dim(real4DField % dimNames(4))) then STREAM_DEBUG_WRITE(' -- Exchange halo for '//trim(fieldItr % memberName)) - call mpas_dmpar_exch_halo_field(real4DField) + call mpas_dmpar_field_halo_exch(real4DField % block % domain, fieldItr % memberName, timeLevel=timeLevel) end if end if @@ -4009,7 +4525,7 @@ subroutine exch_all_halos(allFields, streamFields, timeLevel, ierr) !{{{ end if if ( is_decomposed_dim(real5DField % dimNames(5))) then STREAM_DEBUG_WRITE(' -- Exchange halo for '//trim(fieldItr % memberName)) - call mpas_dmpar_exch_halo_field(real5DField) + call mpas_dmpar_field_halo_exch(real5DField % block % domain, fieldItr % memberName, timeLevel=timeLevel) end if end if end if @@ -4097,252 +4613,255 @@ subroutine prewrite_reindex(allFields, streamFields) !{{{ logical :: handle_cellsOnCell, handle_edgesOnCell, handle_verticesOnCell, handle_cellsOnEdge, handle_verticesOnEdge, & handle_edgesOnEdge, handle_cellsOnVertex, handle_edgesOnVertex - integer :: i, j - - - nullify(cellsOnCell_save) - nullify(edgesOnCell_save) - nullify(verticesOnCell_save) - nullify(cellsOnEdge_save) - nullify(verticesOnEdge_save) - nullify(edgesOnEdge_save) - nullify(cellsOnVertex_save) - nullify(edgesOnVertex_save) - - nullify(cellsOnCell) - nullify(edgesOnCell) - nullify(verticesOnCell) - nullify(cellsOnEdge) - nullify(verticesOnEdge) - nullify(edgesOnEdge) - nullify(cellsOnVertex) - nullify(edgesOnVertex) - - ! - ! Determine which connectivity fields exist in this stream - ! - call mpas_pool_begin_iteration(streamFields) - do while ( mpas_pool_get_next_member(streamFields, fieldItr) ) - - ! Note: in a stream's field_pool, the names of fields are stored as configs - if ( fieldItr % memberType == MPAS_POOL_CONFIG ) then - call mpas_pool_get_field_info(allFields, fieldItr % memberName, fieldInfo) - - if (trim(fieldItr % memberName) == 'cellsOnCell') then - allocate(cellsOnCell_save) - cellsOnCell_ptr => cellsOnCell_save - call mpas_pool_get_field(allFields, 'cellsOnCell', cellsOnCell) - else if (trim(fieldItr % memberName) == 'edgesOnCell') then - allocate(edgesOnCell_save) - edgesOnCell_ptr => edgesOnCell_save - call mpas_pool_get_field(allFields, 'edgesOnCell', edgesOnCell) - else if (trim(fieldItr % memberName) == 'verticesOnCell') then - allocate(verticesOnCell_save) - verticesOnCell_ptr => verticesOnCell_save - call mpas_pool_get_field(allFields, 'verticesOnCell', verticesOnCell) - else if (trim(fieldItr % memberName) == 'cellsOnEdge') then - allocate(cellsOnEdge_save) - cellsOnEdge_ptr => cellsOnEdge_save - call mpas_pool_get_field(allFields, 'cellsOnEdge', cellsOnEdge) - else if (trim(fieldItr % memberName) == 'verticesOnEdge') then - allocate(verticesOnEdge_save) - verticesOnEdge_ptr => verticesOnEdge_save - call mpas_pool_get_field(allFields, 'verticesOnEdge', verticesOnEdge) - else if (trim(fieldItr % memberName) == 'edgesOnEdge') then - allocate(edgesOnEdge_save) - edgesOnEdge_ptr => edgesOnEdge_save - call mpas_pool_get_field(allFields, 'edgesOnEdge', edgesOnEdge) - else if (trim(fieldItr % memberName) == 'cellsOnVertex') then - allocate(cellsOnVertex_save) - cellsOnVertex_ptr => cellsOnVertex_save - call mpas_pool_get_field(allFields, 'cellsOnVertex', cellsOnVertex) - else if (trim(fieldItr % memberName) == 'edgesOnVertex') then - allocate(edgesOnVertex_save) - edgesOnVertex_ptr => edgesOnVertex_save - call mpas_pool_get_field(allFields, 'edgesOnVertex', edgesOnVertex) - end if - end if - - end do - - ! - ! Reindex connectivity from local to global index space - ! - call mpas_pool_get_field(allFields, 'nEdgesOnCell', nEdgesOnCell) - call mpas_pool_get_field(allFields, 'nEdgesOnEdge', nEdgesOnEdge) - call mpas_pool_get_field(allFields, 'indexToCellID', indexToCellID) - call mpas_pool_get_field(allFields, 'indexToEdgeID', indexToEdgeID) - call mpas_pool_get_field(allFields, 'indexToVertexID', indexToVertexID) - - do while (associated(indexToCellID)) - - call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'nCells', nCells) - call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'nEdges', nEdges) - call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'nVertices', nVertices) - call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'nCellsSolve', nCellsSolve) - call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'nEdgesSolve', nEdgesSolve) - call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'nVerticesSolve', nVerticesSolve) - call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'maxEdges', maxEdges) - call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'maxEdges2', maxEdges2) - call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'vertexDegree', vertexDegree) - - if (associated(cellsOnCell)) then - cellsOnCell_ptr % array => cellsOnCell % array - allocate(cellsOnCell % array(maxEdges, nCells+1)) - - do i = 1, nCellsSolve - do j = 1, nEdgesOnCell % array(i) - cellsOnCell % array(j,i) = indexToCellID % array(cellsOnCell_ptr % array(j,i)) - end do - - cellsOnCell % array(nEdgesOnCell%array(i)+1:maxEdges,i) = nCells+1 - end do - - cellsOnCell => cellsOnCell % next - if (associated(cellsOnCell)) then - allocate(cellsOnCell_ptr % next) - cellsOnCell_ptr => cellsOnCell_ptr % next - end if - nullify(cellsOnCell_ptr % next) - end if - - if (associated(edgesOnCell)) then - edgesOnCell_ptr % array => edgesOnCell % array - allocate(edgesOnCell % array(maxEdges, nCells+1)) - - do i = 1, nCellsSolve - do j = 1, nEdgesOnCell % array(i) - edgesOnCell % array(j,i) = indexToEdgeID % array(edgesOnCell_ptr % array(j,i)) - end do + integer :: i, j, threadNum + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + nullify(cellsOnCell_save) + nullify(edgesOnCell_save) + nullify(verticesOnCell_save) + nullify(cellsOnEdge_save) + nullify(verticesOnEdge_save) + nullify(edgesOnEdge_save) + nullify(cellsOnVertex_save) + nullify(edgesOnVertex_save) + + nullify(cellsOnCell) + nullify(edgesOnCell) + nullify(verticesOnCell) + nullify(cellsOnEdge) + nullify(verticesOnEdge) + nullify(edgesOnEdge) + nullify(cellsOnVertex) + nullify(edgesOnVertex) - edgesOnCell % array(nEdgesOnCell%array(i)+1:maxEdges,i) = nEdges+1 - end do - - edgesOnCell => edgesOnCell % next - if (associated(edgesOnCell)) then - allocate(edgesOnCell_ptr % next) - edgesOnCell_ptr => edgesOnCell_ptr % next - end if - nullify(edgesOnCell_ptr % next) - end if - - if (associated(verticesOnCell)) then - verticesOnCell_ptr % array => verticesOnCell % array - allocate(verticesOnCell % array(maxEdges, nCells+1)) - - do i = 1, nCellsSolve - do j = 1, nEdgesOnCell % array(i) - verticesOnCell % array(j,i) = indexToVertexID % array(verticesOnCell_ptr % array(j,i)) - end do + ! + ! Determine which connectivity fields exist in this stream + ! + call mpas_pool_begin_iteration(streamFields) + do while ( mpas_pool_get_next_member(streamFields, fieldItr) ) + + ! Note: in a stream's field_pool, the names of fields are stored as configs + if ( fieldItr % memberType == MPAS_POOL_CONFIG ) then + call mpas_pool_get_field_info(allFields, fieldItr % memberName, fieldInfo) + + if (trim(fieldItr % memberName) == 'cellsOnCell') then + allocate(cellsOnCell_save) + cellsOnCell_ptr => cellsOnCell_save + call mpas_pool_get_field(allFields, 'cellsOnCell', cellsOnCell) + else if (trim(fieldItr % memberName) == 'edgesOnCell') then + allocate(edgesOnCell_save) + edgesOnCell_ptr => edgesOnCell_save + call mpas_pool_get_field(allFields, 'edgesOnCell', edgesOnCell) + else if (trim(fieldItr % memberName) == 'verticesOnCell') then + allocate(verticesOnCell_save) + verticesOnCell_ptr => verticesOnCell_save + call mpas_pool_get_field(allFields, 'verticesOnCell', verticesOnCell) + else if (trim(fieldItr % memberName) == 'cellsOnEdge') then + allocate(cellsOnEdge_save) + cellsOnEdge_ptr => cellsOnEdge_save + call mpas_pool_get_field(allFields, 'cellsOnEdge', cellsOnEdge) + else if (trim(fieldItr % memberName) == 'verticesOnEdge') then + allocate(verticesOnEdge_save) + verticesOnEdge_ptr => verticesOnEdge_save + call mpas_pool_get_field(allFields, 'verticesOnEdge', verticesOnEdge) + else if (trim(fieldItr % memberName) == 'edgesOnEdge') then + allocate(edgesOnEdge_save) + edgesOnEdge_ptr => edgesOnEdge_save + call mpas_pool_get_field(allFields, 'edgesOnEdge', edgesOnEdge) + else if (trim(fieldItr % memberName) == 'cellsOnVertex') then + allocate(cellsOnVertex_save) + cellsOnVertex_ptr => cellsOnVertex_save + call mpas_pool_get_field(allFields, 'cellsOnVertex', cellsOnVertex) + else if (trim(fieldItr % memberName) == 'edgesOnVertex') then + allocate(edgesOnVertex_save) + edgesOnVertex_ptr => edgesOnVertex_save + call mpas_pool_get_field(allFields, 'edgesOnVertex', edgesOnVertex) + end if + end if - verticesOnCell % array(nEdgesOnCell%array(i)+1:maxEdges,i) = nVertices+1 - end do + end do - verticesOnCell => verticesOnCell % next - if (associated(verticesOnCell)) then - allocate(verticesOnCell_ptr % next) - verticesOnCell_ptr => verticesOnCell_ptr % next - end if - nullify(verticesOnCell_ptr % next) - end if + ! + ! Reindex connectivity from local to global index space + ! + call mpas_pool_get_field(allFields, 'nEdgesOnCell', nEdgesOnCell) + call mpas_pool_get_field(allFields, 'nEdgesOnEdge', nEdgesOnEdge) + call mpas_pool_get_field(allFields, 'indexToCellID', indexToCellID) + call mpas_pool_get_field(allFields, 'indexToEdgeID', indexToEdgeID) + call mpas_pool_get_field(allFields, 'indexToVertexID', indexToVertexID) + + do while (associated(indexToCellID)) + + call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'nCells', nCells) + call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'nEdges', nEdges) + call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'nVertices', nVertices) + call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'nEdgesSolve', nEdgesSolve) + call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'nVerticesSolve', nVerticesSolve) + call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'maxEdges', maxEdges) + call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'maxEdges2', maxEdges2) + call mpas_pool_get_dimension(indexToCellID % block % dimensions, 'vertexDegree', vertexDegree) + + if (associated(cellsOnCell)) then + cellsOnCell_ptr % array => cellsOnCell % array + allocate(cellsOnCell % array(maxEdges, nCells+1)) + + do i = 1, nCellsSolve + do j = 1, nEdgesOnCell % array(i) + cellsOnCell % array(j,i) = indexToCellID % array(cellsOnCell_ptr % array(j,i)) + end do + + cellsOnCell % array(nEdgesOnCell%array(i)+1:maxEdges,i) = nCells+1 + end do + + cellsOnCell => cellsOnCell % next + if (associated(cellsOnCell)) then + allocate(cellsOnCell_ptr % next) + cellsOnCell_ptr => cellsOnCell_ptr % next + end if + nullify(cellsOnCell_ptr % next) + end if - if (associated(cellsOnEdge)) then - cellsOnEdge_ptr % array => cellsOnEdge % array - allocate(cellsOnEdge % array(2, nEdges+1)) + if (associated(edgesOnCell)) then + edgesOnCell_ptr % array => edgesOnCell % array + allocate(edgesOnCell % array(maxEdges, nCells+1)) - do i = 1, nEdgesSolve - cellsOnEdge % array(1,i) = indexToCellID % array(cellsOnEdge_ptr % array(1,i)) - cellsOnEdge % array(2,i) = indexToCellID % array(cellsOnEdge_ptr % array(2,i)) - end do + do i = 1, nCellsSolve + do j = 1, nEdgesOnCell % array(i) + edgesOnCell % array(j,i) = indexToEdgeID % array(edgesOnCell_ptr % array(j,i)) + end do - cellsOnEdge => cellsOnEdge % next - if (associated(cellsOnEdge)) then - allocate(cellsOnEdge_ptr % next) - cellsOnEdge_ptr => cellsOnEdge_ptr % next - end if - nullify(cellsOnEdge_ptr % next) - end if + edgesOnCell % array(nEdgesOnCell%array(i)+1:maxEdges,i) = nEdges+1 + end do - if (associated(verticesOnEdge)) then - verticesOnEdge_ptr % array => verticesOnEdge % array - allocate(verticesOnEdge % array(2, nEdges+1)) + edgesOnCell => edgesOnCell % next + if (associated(edgesOnCell)) then + allocate(edgesOnCell_ptr % next) + edgesOnCell_ptr => edgesOnCell_ptr % next + end if + nullify(edgesOnCell_ptr % next) + end if - do i = 1, nEdgesSolve - verticesOnEdge % array(1,i) = indexToVertexID % array(verticesOnEdge_ptr % array(1,i)) - verticesOnEdge % array(2,i) = indexToVertexID % array(verticesOnEdge_ptr % array(2,i)) - end do + if (associated(verticesOnCell)) then + verticesOnCell_ptr % array => verticesOnCell % array + allocate(verticesOnCell % array(maxEdges, nCells+1)) - verticesOnEdge => verticesOnEdge % next - if (associated(verticesOnEdge)) then - allocate(verticesOnEdge_ptr % next) - verticesOnEdge_ptr => verticesOnEdge_ptr % next - end if - nullify(verticesOnEdge_ptr % next) - end if + do i = 1, nCellsSolve + do j = 1, nEdgesOnCell % array(i) + verticesOnCell % array(j,i) = indexToVertexID % array(verticesOnCell_ptr % array(j,i)) + end do - if (associated(edgesOnEdge)) then - edgesOnEdge_ptr % array => edgesOnEdge % array - allocate(edgesOnEdge % array(maxEdges2, nEdges+1)) + verticesOnCell % array(nEdgesOnCell%array(i)+1:maxEdges,i) = nVertices+1 + end do - do i = 1, nEdgesSolve - do j = 1, nEdgesOnEdge % array(i) - edgesOnEdge % array(j,i) = indexToEdgeID % array(edgesOnEdge_ptr % array(j,i)) - end do + verticesOnCell => verticesOnCell % next + if (associated(verticesOnCell)) then + allocate(verticesOnCell_ptr % next) + verticesOnCell_ptr => verticesOnCell_ptr % next + end if + nullify(verticesOnCell_ptr % next) + end if - edgesOnEdge % array(nEdgesOnEdge%array(i)+1:maxEdges2,i) = nEdges+1 - end do + if (associated(cellsOnEdge)) then + cellsOnEdge_ptr % array => cellsOnEdge % array + allocate(cellsOnEdge % array(2, nEdges+1)) + + do i = 1, nEdgesSolve + cellsOnEdge % array(1,i) = indexToCellID % array(cellsOnEdge_ptr % array(1,i)) + cellsOnEdge % array(2,i) = indexToCellID % array(cellsOnEdge_ptr % array(2,i)) + end do + + cellsOnEdge => cellsOnEdge % next + if (associated(cellsOnEdge)) then + allocate(cellsOnEdge_ptr % next) + cellsOnEdge_ptr => cellsOnEdge_ptr % next + end if + nullify(cellsOnEdge_ptr % next) + end if - edgesOnEdge => edgesOnEdge % next - if (associated(edgesOnEdge)) then - allocate(edgesOnEdge_ptr % next) - edgesOnEdge_ptr => edgesOnEdge_ptr % next - end if - nullify(edgesOnEdge_ptr % next) - end if + if (associated(verticesOnEdge)) then + verticesOnEdge_ptr % array => verticesOnEdge % array + allocate(verticesOnEdge % array(2, nEdges+1)) + + do i = 1, nEdgesSolve + verticesOnEdge % array(1,i) = indexToVertexID % array(verticesOnEdge_ptr % array(1,i)) + verticesOnEdge % array(2,i) = indexToVertexID % array(verticesOnEdge_ptr % array(2,i)) + end do + + verticesOnEdge => verticesOnEdge % next + if (associated(verticesOnEdge)) then + allocate(verticesOnEdge_ptr % next) + verticesOnEdge_ptr => verticesOnEdge_ptr % next + end if + nullify(verticesOnEdge_ptr % next) + end if - if (associated(cellsOnVertex)) then - cellsOnVertex_ptr % array => cellsOnVertex % array - allocate(cellsOnVertex % array(vertexDegree, nVertices+1)) + if (associated(edgesOnEdge)) then + edgesOnEdge_ptr % array => edgesOnEdge % array + allocate(edgesOnEdge % array(maxEdges2, nEdges+1)) - do i = 1, nVerticesSolve - do j = 1, vertexDegree - cellsOnVertex % array(j,i) = indexToCellID % array(cellsOnVertex_ptr % array(j,i)) - end do - end do + do i = 1, nEdgesSolve + do j = 1, nEdgesOnEdge % array(i) + edgesOnEdge % array(j,i) = indexToEdgeID % array(edgesOnEdge_ptr % array(j,i)) + end do - cellsOnVertex => cellsOnVertex % next - if (associated(cellsOnVertex)) then - allocate(cellsOnVertex_ptr % next) - cellsOnVertex_ptr => cellsOnVertex_ptr % next - end if - nullify(cellsOnVertex_ptr % next) - end if + edgesOnEdge % array(nEdgesOnEdge%array(i)+1:maxEdges2,i) = nEdges+1 + end do - if (associated(edgesOnVertex)) then - edgesOnVertex_ptr % array => edgesOnVertex % array - allocate(edgesOnVertex % array(vertexDegree, nVertices+1)) + edgesOnEdge => edgesOnEdge % next + if (associated(edgesOnEdge)) then + allocate(edgesOnEdge_ptr % next) + edgesOnEdge_ptr => edgesOnEdge_ptr % next + end if + nullify(edgesOnEdge_ptr % next) + end if - do i = 1, nVerticesSolve - do j = 1, vertexDegree - edgesOnVertex % array(j,i) = indexToEdgeID % array(edgesOnVertex_ptr % array(j,i)) - end do - end do + if (associated(cellsOnVertex)) then + cellsOnVertex_ptr % array => cellsOnVertex % array + allocate(cellsOnVertex % array(vertexDegree, nVertices+1)) + + do i = 1, nVerticesSolve + do j = 1, vertexDegree + cellsOnVertex % array(j,i) = indexToCellID % array(cellsOnVertex_ptr % array(j,i)) + end do + end do + + cellsOnVertex => cellsOnVertex % next + if (associated(cellsOnVertex)) then + allocate(cellsOnVertex_ptr % next) + cellsOnVertex_ptr => cellsOnVertex_ptr % next + end if + nullify(cellsOnVertex_ptr % next) + end if - edgesOnVertex => edgesOnVertex % next - if (associated(edgesOnVertex)) then - allocate(edgesOnVertex_ptr % next) - edgesOnVertex_ptr => edgesOnVertex_ptr % next - end if - nullify(edgesOnVertex_ptr % next) - end if + if (associated(edgesOnVertex)) then + edgesOnVertex_ptr % array => edgesOnVertex % array + allocate(edgesOnVertex % array(vertexDegree, nVertices+1)) + + do i = 1, nVerticesSolve + do j = 1, vertexDegree + edgesOnVertex % array(j,i) = indexToEdgeID % array(edgesOnVertex_ptr % array(j,i)) + end do + end do + + edgesOnVertex => edgesOnVertex % next + if (associated(edgesOnVertex)) then + allocate(edgesOnVertex_ptr % next) + edgesOnVertex_ptr => edgesOnVertex_ptr % next + end if + nullify(edgesOnVertex_ptr % next) + end if - nEdgesOnCell => nEdgesOnCell % next - nEdgesOnEdge => nEdgesOnEdge % next - indexToCellID => indexToCellID % next - indexToEdgeID => indexToEdgeID % next - indexToVertexID => indexToVertexID % next + nEdgesOnCell => nEdgesOnCell % next + nEdgesOnEdge => nEdgesOnEdge % next + indexToCellID => indexToCellID % next + indexToEdgeID => indexToEdgeID % next + indexToVertexID => indexToVertexID % next - end do + end do + end if end subroutine prewrite_reindex !}}} @@ -4380,141 +4899,144 @@ subroutine postwrite_reindex(allFields, streamFields) !{{{ cellsOnEdge, verticesOnEdge, edgesOnEdge, & cellsOnVertex, edgesOnVertex - integer :: i, j - - - nullify(cellsOnCell) - nullify(edgesOnCell) - nullify(verticesOnCell) - nullify(cellsOnEdge) - nullify(verticesOnEdge) - nullify(edgesOnEdge) - nullify(cellsOnVertex) - nullify(edgesOnVertex) - - if (associated(cellsOnCell_save)) then - cellsOnCell_ptr => cellsOnCell_save - call mpas_pool_get_field(allFields, 'cellsOnCell', cellsOnCell) - end if - if (associated(edgesOnCell_save)) then - edgesOnCell_ptr => edgesOnCell_save - call mpas_pool_get_field(allFields, 'edgesOnCell', edgesOnCell) - end if - if (associated(verticesOnCell_save)) then - verticesOnCell_ptr => verticesOnCell_save - call mpas_pool_get_field(allFields, 'verticesOnCell', verticesOnCell) - end if - if (associated(cellsOnEdge_save)) then - cellsOnEdge_ptr => cellsOnEdge_save - call mpas_pool_get_field(allFields, 'cellsOnEdge', cellsOnEdge) - end if - if (associated(verticesOnEdge_save)) then - verticesOnEdge_ptr => verticesOnEdge_save - call mpas_pool_get_field(allFields, 'verticesOnEdge', verticesOnEdge) - end if - if (associated(edgesOnEdge_save)) then - edgesOnEdge_ptr => edgesOnEdge_save - call mpas_pool_get_field(allFields, 'edgesOnEdge', edgesOnEdge) - end if - if (associated(cellsOnVertex_save)) then - cellsOnVertex_ptr => cellsOnVertex_save - call mpas_pool_get_field(allFields, 'cellsOnVertex', cellsOnVertex) - end if - if (associated(edgesOnVertex_save)) then - edgesOnVertex_ptr => edgesOnVertex_save - call mpas_pool_get_field(allFields, 'edgesOnVertex', edgesOnVertex) - end if + integer :: i, j, threadNum + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + nullify(cellsOnCell) + nullify(edgesOnCell) + nullify(verticesOnCell) + nullify(cellsOnEdge) + nullify(verticesOnEdge) + nullify(edgesOnEdge) + nullify(cellsOnVertex) + nullify(edgesOnVertex) + + if (associated(cellsOnCell_save)) then + cellsOnCell_ptr => cellsOnCell_save + call mpas_pool_get_field(allFields, 'cellsOnCell', cellsOnCell) + end if + if (associated(edgesOnCell_save)) then + edgesOnCell_ptr => edgesOnCell_save + call mpas_pool_get_field(allFields, 'edgesOnCell', edgesOnCell) + end if + if (associated(verticesOnCell_save)) then + verticesOnCell_ptr => verticesOnCell_save + call mpas_pool_get_field(allFields, 'verticesOnCell', verticesOnCell) + end if + if (associated(cellsOnEdge_save)) then + cellsOnEdge_ptr => cellsOnEdge_save + call mpas_pool_get_field(allFields, 'cellsOnEdge', cellsOnEdge) + end if + if (associated(verticesOnEdge_save)) then + verticesOnEdge_ptr => verticesOnEdge_save + call mpas_pool_get_field(allFields, 'verticesOnEdge', verticesOnEdge) + end if + if (associated(edgesOnEdge_save)) then + edgesOnEdge_ptr => edgesOnEdge_save + call mpas_pool_get_field(allFields, 'edgesOnEdge', edgesOnEdge) + end if + if (associated(cellsOnVertex_save)) then + cellsOnVertex_ptr => cellsOnVertex_save + call mpas_pool_get_field(allFields, 'cellsOnVertex', cellsOnVertex) + end if + if (associated(edgesOnVertex_save)) then + edgesOnVertex_ptr => edgesOnVertex_save + call mpas_pool_get_field(allFields, 'edgesOnVertex', edgesOnVertex) + end if - ! - ! Reset indices for connectivity arrays from global to local index space - ! - call mpas_pool_get_field(allFields, 'indexToCellID', indexToCellID) - do while (associated(indexToCellID)) - - if (associated(cellsOnCell)) then - deallocate(cellsOnCell % array) - cellsOnCell % array => cellsOnCell_ptr % array - nullify(cellsOnCell_ptr % array) - cellsOnCell_ptr => cellsOnCell_ptr % next - cellsOnCell => cellsOnCell % next - end if + ! + ! Reset indices for connectivity arrays from global to local index space + ! + call mpas_pool_get_field(allFields, 'indexToCellID', indexToCellID) + do while (associated(indexToCellID)) + + if (associated(cellsOnCell)) then + deallocate(cellsOnCell % array) + cellsOnCell % array => cellsOnCell_ptr % array + nullify(cellsOnCell_ptr % array) + cellsOnCell_ptr => cellsOnCell_ptr % next + cellsOnCell => cellsOnCell % next + end if - if (associated(edgesOnCell)) then - deallocate(edgesOnCell % array) - edgesOnCell % array => edgesOnCell_ptr % array - nullify(edgesOnCell_ptr % array) - edgesOnCell_ptr => edgesOnCell_ptr % next - edgesOnCell => edgesOnCell % next - end if + if (associated(edgesOnCell)) then + deallocate(edgesOnCell % array) + edgesOnCell % array => edgesOnCell_ptr % array + nullify(edgesOnCell_ptr % array) + edgesOnCell_ptr => edgesOnCell_ptr % next + edgesOnCell => edgesOnCell % next + end if - if (associated(verticesOnCell)) then - deallocate(verticesOnCell % array) - verticesOnCell % array => verticesOnCell_ptr % array - nullify(verticesOnCell_ptr % array) - verticesOnCell_ptr => verticesOnCell_ptr % next - verticesOnCell => verticesOnCell % next - end if + if (associated(verticesOnCell)) then + deallocate(verticesOnCell % array) + verticesOnCell % array => verticesOnCell_ptr % array + nullify(verticesOnCell_ptr % array) + verticesOnCell_ptr => verticesOnCell_ptr % next + verticesOnCell => verticesOnCell % next + end if - if (associated(cellsOnEdge)) then - deallocate(cellsOnEdge % array) - cellsOnEdge % array => cellsOnEdge_ptr % array - nullify(cellsOnEdge_ptr % array) - cellsOnEdge_ptr => cellsOnEdge_ptr % next - cellsOnEdge => cellsOnEdge % next - end if + if (associated(cellsOnEdge)) then + deallocate(cellsOnEdge % array) + cellsOnEdge % array => cellsOnEdge_ptr % array + nullify(cellsOnEdge_ptr % array) + cellsOnEdge_ptr => cellsOnEdge_ptr % next + cellsOnEdge => cellsOnEdge % next + end if - if (associated(verticesOnEdge)) then - deallocate(verticesOnEdge % array) - verticesOnEdge % array => verticesOnEdge_ptr % array - nullify(verticesOnEdge_ptr % array) - verticesOnEdge_ptr => verticesOnEdge_ptr % next - verticesOnEdge => verticesOnEdge % next - end if + if (associated(verticesOnEdge)) then + deallocate(verticesOnEdge % array) + verticesOnEdge % array => verticesOnEdge_ptr % array + nullify(verticesOnEdge_ptr % array) + verticesOnEdge_ptr => verticesOnEdge_ptr % next + verticesOnEdge => verticesOnEdge % next + end if - if (associated(edgesOnEdge)) then - deallocate(edgesOnEdge % array) - edgesOnEdge % array => edgesOnEdge_ptr % array - nullify(edgesOnEdge_ptr % array) - edgesOnEdge_ptr => edgesOnEdge_ptr % next - edgesOnEdge => edgesOnEdge % next - end if + if (associated(edgesOnEdge)) then + deallocate(edgesOnEdge % array) + edgesOnEdge % array => edgesOnEdge_ptr % array + nullify(edgesOnEdge_ptr % array) + edgesOnEdge_ptr => edgesOnEdge_ptr % next + edgesOnEdge => edgesOnEdge % next + end if - if (associated(cellsOnVertex)) then - deallocate(cellsOnVertex % array) - cellsOnVertex % array => cellsOnVertex_ptr % array - nullify(cellsOnVertex_ptr % array) - cellsOnVertex_ptr => cellsOnVertex_ptr % next - cellsOnVertex => cellsOnVertex % next - end if + if (associated(cellsOnVertex)) then + deallocate(cellsOnVertex % array) + cellsOnVertex % array => cellsOnVertex_ptr % array + nullify(cellsOnVertex_ptr % array) + cellsOnVertex_ptr => cellsOnVertex_ptr % next + cellsOnVertex => cellsOnVertex % next + end if - if (associated(edgesOnVertex)) then - deallocate(edgesOnVertex % array) - edgesOnVertex % array => edgesOnVertex_ptr % array - nullify(edgesOnVertex_ptr % array) - edgesOnVertex_ptr => edgesOnVertex_ptr % next - edgesOnVertex => edgesOnVertex % next - end if + if (associated(edgesOnVertex)) then + deallocate(edgesOnVertex % array) + edgesOnVertex % array => edgesOnVertex_ptr % array + nullify(edgesOnVertex_ptr % array) + edgesOnVertex_ptr => edgesOnVertex_ptr % next + edgesOnVertex => edgesOnVertex % next + end if - indexToCellID => indexToCellID % next - end do + indexToCellID => indexToCellID % next + end do + + if (associated(cellsOnCell_save)) call mpas_deallocate_field(cellsOnCell_save) + if (associated(edgesOnCell_save)) call mpas_deallocate_field(edgesOnCell_save) + if (associated(verticesOnCell_save)) call mpas_deallocate_field(verticesOnCell_save) + if (associated(cellsOnEdge_save)) call mpas_deallocate_field(cellsOnEdge_save) + if (associated(verticesOnEdge_save)) call mpas_deallocate_field(verticesOnEdge_save) + if (associated(edgesOnEdge_save)) call mpas_deallocate_field(edgesOnEdge_save) + if (associated(cellsOnVertex_save)) call mpas_deallocate_field(cellsOnVertex_save) + if (associated(edgesOnVertex_save)) call mpas_deallocate_field(edgesOnVertex_save) - if (associated(cellsOnCell_save)) call mpas_deallocate_field(cellsOnCell_save) - if (associated(edgesOnCell_save)) call mpas_deallocate_field(edgesOnCell_save) - if (associated(verticesOnCell_save)) call mpas_deallocate_field(verticesOnCell_save) - if (associated(cellsOnEdge_save)) call mpas_deallocate_field(cellsOnEdge_save) - if (associated(verticesOnEdge_save)) call mpas_deallocate_field(verticesOnEdge_save) - if (associated(edgesOnEdge_save)) call mpas_deallocate_field(edgesOnEdge_save) - if (associated(cellsOnVertex_save)) call mpas_deallocate_field(cellsOnVertex_save) - if (associated(edgesOnVertex_save)) call mpas_deallocate_field(edgesOnVertex_save) - - nullify(cellsOnCell_save) - nullify(edgesOnCell_save) - nullify(verticesOnCell_save) - nullify(cellsOnEdge_save) - nullify(verticesOnEdge_save) - nullify(edgesOnEdge_save) - nullify(cellsOnVertex_save) - nullify(edgesOnVertex_save) + nullify(cellsOnCell_save) + nullify(edgesOnCell_save) + nullify(verticesOnCell_save) + nullify(cellsOnEdge_save) + nullify(verticesOnEdge_save) + nullify(edgesOnEdge_save) + nullify(cellsOnVertex_save) + nullify(edgesOnVertex_save) + end if end subroutine postwrite_reindex !}}} @@ -4761,6 +5283,8 @@ end subroutine postread_reindex !}}} !> routine will reset the iterator for fields within the specified stream, !> which may subsequently iterated over using the !> MPAS_stream_mgr_get_next_field() routine. + !> + !> NOTE: This routine does not support regular expressions for StreamID ! !----------------------------------------------------------------------- subroutine MPAS_stream_mgr_begin_iteration(manager, streamID, ierr) !{{{ @@ -4772,18 +5296,23 @@ subroutine MPAS_stream_mgr_begin_iteration(manager, streamID, ierr) !{{{ integer, intent(out), optional :: ierr !< Output: Return error code type (MPAS_stream_list_type), pointer :: stream - integer :: err_local + integer :: err_local, threadNum + + threadNum = mpas_threading_get_thread_num() if (.not. present(streamID)) then - nullify(manager % currentStream) + if ( threadNum == 0 ) then + nullify(manager % currentStream) + end if else ! ! Check that stream exists ! + nullify(stream) if (.not. MPAS_stream_list_query(manager % streams, streamID, stream, ierr=err_local)) then STREAM_ERROR_WRITE('Requested stream '//trim(streamID)//' does not exist in stream manager') if (present(ierr)) ierr = MPAS_STREAM_MGR_ERROR @@ -4794,6 +5323,8 @@ subroutine MPAS_stream_mgr_begin_iteration(manager, streamID, ierr) !{{{ end if + !$omp barrier + end subroutine MPAS_stream_mgr_begin_iteration !}}} @@ -4827,20 +5358,32 @@ logical function MPAS_stream_mgr_get_next_stream(manager, streamID, directionPro character (len=StrKIND), intent(out), optional :: filenameIntervalProperty !< Output: String containing the filename interval for the stream integer, intent(out), optional :: clobberProperty !< Output: Interger describing the clobber mode of the stream + integer :: threadNum - if ( associated(manager % currentStream) .and. .not. associated(manager % currentStream % next) ) then - validStream = .false. - return - end if + threadNum = mpas_threading_get_thread_num() - if ( .not. associated(manager % currentStream) ) then + if ( associated(manager % currentStream) ) then + if (.not. associated(manager % currentStream % next) ) then + validStream = .false. + return + else + validStream = .true. + if ( threadNum == 0 ) then + manager % currentStream => manager % currentStream % next + end if + end if + else if ( associated(manager % streams % head) ) then validStream = .true. - manager % currentStream => manager % streams % head + if ( threadNum == 0 ) then + manager % currentStream => manager % streams % head + end if else - validStream = .true. - manager % currentStream => manager % currentStream % next + validStream = .false. + return end if + !$omp barrier + if ( present(streamID) ) then streamID = manager % currentStream % name end if @@ -4902,6 +5445,7 @@ end function MPAS_stream_mgr_get_next_stream !}}} !> provides the name of this field, and .FALSE. otherwise. If a field name !> is returned, the optional logical argument isActive may be used to !> determine whether the field is currently active in the stream. + !> NOTE: This routine does not support regular expressions for StreamID ! !----------------------------------------------------------------------- logical function MPAS_stream_mgr_get_next_field(manager, streamID, fieldName, isActive) result(validField) !{{{ @@ -4925,6 +5469,7 @@ logical function MPAS_stream_mgr_get_next_field(manager, streamID, fieldName, is ! ! Check that stream exists ! + nullify(stream) if (.not. MPAS_stream_list_query(manager % streams, streamID, stream, ierr=err_local)) then STREAM_ERROR_WRITE('Requested stream '//trim(streamID)//' does not exist in stream manager') return @@ -4957,6 +5502,37 @@ logical function MPAS_stream_mgr_get_next_field(manager, streamID, fieldName, is end if end function MPAS_stream_mgr_get_next_field !}}} + + + !----------------------------------------------------------------------- + ! logical function MPAS_stream_mgr_stream_exists + ! + !> \brief Determine if a stream exists in a stream manager + !> \author Doug Jacobsen + !> \date 07/29/2015 + !> \details + !> This function takes a stream manager and the name of a stream, and + !> returns a logical describing if the stream exists within the manager or + !> not. It gives no information about if the stream will be "handled" at + !> any point in time, only if it exists within the manager queried. + !> NOTE: This routine does not support regular expressions for StreamID + ! + !----------------------------------------------------------------------- + logical function MPAS_stream_mgr_stream_exists(manager, streamID) result(validStream)!{{{ + + implicit none + + type (MPAS_streamManager_type), intent(inout) :: manager + character (len=*), intent(in) :: streamID + + type (mpas_stream_list_type), pointer :: streamPtr + + nullify(streamPtr) + validStream = mpas_stream_list_query(manager % streams, streamID, streamPtr) + + return + + end function MPAS_stream_mgr_stream_exists!}}} end module mpas_stream_manager diff --git a/src/framework/mpas_stream_manager_types.inc b/src/framework/mpas_stream_manager_types.inc index 7738f884f4..dd53601e34 100644 --- a/src/framework/mpas_stream_manager_types.inc +++ b/src/framework/mpas_stream_manager_types.inc @@ -32,6 +32,7 @@ integer :: numStreams = 0 integer :: errorLevel + type (MPAS_IO_context_type), pointer :: ioContext type (MPAS_Clock_type), pointer :: streamClock type (MPAS_Pool_type), pointer :: allFields type (MPAS_Pool_type), pointer :: allPackages diff --git a/src/framework/mpas_threading.F b/src/framework/mpas_threading.F new file mode 100644 index 0000000000..7ec85a7a99 --- /dev/null +++ b/src/framework/mpas_threading.F @@ -0,0 +1,187 @@ +! Copyright (c) 2013, Los Alamos National Security, LLC (LANS) +! and the University Corporation for Atmospheric Research (UCAR). +! +! Unless noted otherwise source code is licensed under the BSD license. +! Additional copyright and license information can be found in the LICENSE file +! distributed with this code, or at http://mpas-dev.github.com/license.html +! +!----------------------------------------------------------------------- +! mpas_threading +! +!> \brief MPAS Threading Support +!> \author Doug Jacobsen +!> \date 09/09/2015 +!> \details +!> This module will provide interfaces to support functions / routines for OpenMP threading. +! +!----------------------------------------------------------------------- + +#define COMMA , +#define THREADING_DEBUG_WRITE(M) ! write(stderrUnit, *) M +#define THREADING_WARN_WRITE(M) write(stderrUnit, *) 'WARNING: '//M +#define THREADING_ERROR_WRITE(M) write(stderrUnit, *) 'ERROR: '//M + +module mpas_threading + + use mpas_kind_types + use mpas_io_units + + implicit none + private + + public :: mpas_threading_get_num_threads, mpas_threading_set_num_threads, mpas_threading_in_parallel + public :: mpas_threading_get_thread_num, mpas_threading_barrier, mpas_threading_get_max_threads + public :: mpas_threading_get_thread_limit + + contains + +!----------------------------------------------------------------------- +! function mpas_threading_get_num_threads +! +!> \brief MPAS Threading number of threads function +!> \author Doug Jacobsen +!> \date 09/09/2015 +!> \details +!> This function returns the number of threads currently available. +! +!----------------------------------------------------------------------- + function mpas_threading_get_num_threads() result(numThreads)!{{{ + integer :: numThreads + integer :: omp_get_num_threads + + numThreads = 1 + +#ifdef MPAS_OPENMP + numThreads = omp_get_num_threads() +#endif + + end function mpas_threading_get_num_threads!}}} + + +!----------------------------------------------------------------------- +! routine mpas_threading_set_num_threads +! +!> \brief MPAS Threading set number of threads routine +!> \author Doug Jacobsen +!> \date 09/09/2015 +!> \details +!> This routine sets the number of threads for the next parallel region. +! +!----------------------------------------------------------------------- + subroutine mpas_threading_set_num_threads(numThreads)!{{{ + integer, intent(in) :: numThreads + +#ifdef MPAS_OPENMP + call omp_set_num_threads(numThreads) +#endif + + end subroutine mpas_threading_set_num_threads!}}} + +!----------------------------------------------------------------------- +! function mpas_threading_in_parallel +! +!> \brief MPAS Threading in parallel function +!> \author Doug Jacobsen +!> \date 09/09/2015 +!> \details +!> This function returns a logical where true means it was called within a +!> parallel region, and false means it was not. +! +!----------------------------------------------------------------------- + function mpas_threading_in_parallel() result(parallelRegion)!{{{ + logical :: parallelRegion + logical :: omp_in_parallel + + parallelRegion = .false. + +#ifdef MPAS_OPENMP + parallelRegion = omp_in_parallel() +#endif + + end function mpas_threading_in_parallel!}}} + +!----------------------------------------------------------------------- +! function mpas_threading_get_thread_num +! +!> \brief MPAS Threading get thread number function +!> \author Doug Jacobsen +!> \date 09/09/2015 +!> \details +!> This function returns current thread's number +! +!----------------------------------------------------------------------- + function mpas_threading_get_thread_num() result(threadNum)!{{{ + integer :: threadNum + integer :: omp_get_thread_num + + threadNum = 0 + +#ifdef MPAS_OPENMP + threadNum = omp_get_thread_num() +#endif + + end function mpas_threading_get_thread_num!}}} + +!----------------------------------------------------------------------- +! routine mpas_threading_barrier +! +!> \brief MPAS Threading barrier routine +!> \author Doug Jacobsen +!> \date 10/15/2015 +!> \details +!> This routine implements an OpenMP barrier to synchronize all threads. +! +!----------------------------------------------------------------------- + subroutine mpas_threading_barrier()!{{{ + +#ifdef MPAS_OPENMP + !$omp barrier +#endif + + end subroutine mpas_threading_barrier!}}} + +!----------------------------------------------------------------------- +! function mpas_threading_get_max_threads +! +!> \brief MPAS Threading maximum number of threads function +!> \author Doug Jacobsen +!> \date 09/09/2015 +!> \details +!> This function returns maximum number of threads a single MPI process can use. +! +!----------------------------------------------------------------------- + function mpas_threading_get_max_threads() result(maxThreads)!{{{ + integer :: maxThreads + integer :: omp_get_max_threads + + maxThreads = 1 + +#ifdef MPAS_OPENMP + maxThreads = omp_get_max_threads() +#endif + + end function mpas_threading_get_max_threads!}}} + +!----------------------------------------------------------------------- +! function mpas_threading_get_thread_limit +! +!> \brief MPAS Threading thread limit function +!> \author Doug Jacobsen +!> \date 09/09/2015 +!> \details +!> This function returns limit on the total number of threads. +! +!----------------------------------------------------------------------- + function mpas_threading_get_thread_limit() result(threadLimit)!{{{ + integer :: threadLimit + integer :: omp_get_thread_limit + + threadLimit = 1 + +#ifdef MPAS_OPENMP + threadLimit = omp_get_thread_limit() +#endif + + end function mpas_threading_get_thread_limit!}}} + +end module mpas_threading diff --git a/src/framework/mpas_timekeeping.F b/src/framework/mpas_timekeeping.F index dacd62623b..1a3ec956ee 100644 --- a/src/framework/mpas_timekeeping.F +++ b/src/framework/mpas_timekeeping.F @@ -10,6 +10,9 @@ module mpas_timekeeping use mpas_kind_types use mpas_derived_types use mpas_io_units + use mpas_dmpar + use mpas_threading + use mpas_abort, only : mpas_dmpar_global_abort use ESMF use ESMF_BaseMod @@ -166,31 +169,38 @@ subroutine mpas_create_clock(clock, startTime, timeStep, stopTime, runDuration, integer, intent(out), optional :: ierr type (MPAS_Time_type) :: stop_time - - if (present(runDuration)) then - stop_time = startTime + runDuration - if (present(stopTime)) then - if (stopTime /= stop_time) then - if (present(ierr)) ierr = 1 ! stopTime and runDuration are inconsistent - write(stderrUnit,*) 'ERROR: MPAS_createClock: stopTime and runDuration are inconsistent' - return + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if (present(runDuration)) then + stop_time = startTime + runDuration + if (present(stopTime)) then + if (stopTime /= stop_time) then + if (present(ierr)) ierr = 1 ! stopTime and runDuration are inconsistent + write(stderrUnit,*) 'ERROR: MPAS_createClock: stopTime and runDuration are inconsistent' + return + end if end if + else if (present(stopTime)) then + stop_time = stopTime + else + if (present(ierr)) ierr = 1 ! neither stopTime nor runDuration are specified + write(stderrUnit,*) 'ERROR: MPAS_createClock: neither stopTime nor runDuration are specified' + return end if - else if (present(stopTime)) then - stop_time = stopTime - else - if (present(ierr)) ierr = 1 ! neither stopTime nor runDuration are specified - write(stderrUnit,*) 'ERROR: MPAS_createClock: neither stopTime nor runDuration are specified' - return - end if - clock % c = ESMF_ClockCreate(TimeStep=timeStep%ti, StartTime=startTime%t, StopTime=stop_time%t, rc=ierr) - if (present(ierr)) then - if (ierr == ESMF_SUCCESS) ierr = 0 + clock % c = ESMF_ClockCreate(TimeStep=timeStep%ti, StartTime=startTime%t, StopTime=stop_time%t, rc=ierr) + if (present(ierr)) then + if (ierr == ESMF_SUCCESS) ierr = 0 + end if + clock % direction = MPAS_FORWARD + clock % nAlarms = 0 + nullify(clock % alarmListHead) end if - clock % direction = MPAS_FORWARD - clock % nAlarms = 0 - nullify(clock % alarmListHead) + + !$omp barrier end subroutine mpas_create_clock @@ -203,19 +213,26 @@ subroutine mpas_destroy_clock(clock, ierr) integer, intent(out), optional :: ierr type (MPAS_Alarm_type), pointer :: alarmPtr + integer :: threadNum - alarmPtr => clock % alarmListHead - do while (associated(alarmPtr)) - clock % alarmListHead => alarmPtr % next - deallocate(alarmPtr) + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then alarmPtr => clock % alarmListHead - end do + do while (associated(alarmPtr)) + clock % alarmListHead => alarmPtr % next + deallocate(alarmPtr) + alarmPtr => clock % alarmListHead + end do - call ESMF_ClockDestroy(clock % c, rc=ierr) - if (present(ierr)) then - if (ierr == ESMF_SUCCESS) ierr = 0 + call ESMF_ClockDestroy(clock % c, rc=ierr) + if (present(ierr)) then + if (ierr == ESMF_SUCCESS) ierr = 0 + end if end if + !$omp barrier + end subroutine mpas_destroy_clock @@ -278,22 +295,29 @@ subroutine mpas_set_clock_direction(clock, direction, ierr) integer, intent(out), optional :: ierr type (MPAS_TimeInterval_type) :: timeStep + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() if (direction == MPAS_FORWARD .and. clock % direction == MPAS_FORWARD) return if (direction == MPAS_BACKWARD .and. clock % direction == MPAS_BACKWARD) return - clock % direction = direction - call ESMF_ClockGet(clock % c, TimeStep=timeStep%ti, rc=ierr) - timeStep = neg_ti(timeStep) - call ESMF_ClockSet(clock % c, TimeStep=timeStep%ti, rc=ierr) + if ( threadNum == 0 ) then + clock % direction = direction + call ESMF_ClockGet(clock % c, TimeStep=timeStep%ti, rc=ierr) + timeStep = neg_ti(timeStep) + call ESMF_ClockSet(clock % c, TimeStep=timeStep%ti, rc=ierr) - ! specify a valid previousRingTime for each alarm - call mpas_calibrate_alarms(clock, ierr); + ! specify a valid previousRingTime for each alarm + call mpas_calibrate_alarms(clock, ierr); - if (present(ierr)) then - if (ierr == ESMF_SUCCESS) ierr = 0 + if (present(ierr)) then + if (ierr == ESMF_SUCCESS) ierr = 0 + end if end if + !$omp barrier + end subroutine mpas_set_clock_direction @@ -319,12 +343,19 @@ subroutine mpas_set_clock_timestep(clock, timeStep, ierr) type (MPAS_Clock_type), intent(inout) :: clock type (MPAS_TimeInterval_type), intent(in) :: timeStep integer, intent(out), optional :: ierr + integer :: threadNum - call ESMF_ClockSet(clock % c, TimeStep=timeStep%ti, rc=ierr) - if (present(ierr)) then - if (ierr == ESMF_SUCCESS) ierr = 0 + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + call ESMF_ClockSet(clock % c, TimeStep=timeStep%ti, rc=ierr) + if (present(ierr)) then + if (ierr == ESMF_SUCCESS) ierr = 0 + end if end if + !$omp barrier + end subroutine mpas_set_clock_timestep @@ -356,19 +387,26 @@ subroutine mpas_advance_clock(clock, timeStep, ierr) integer, intent(out), optional :: ierr type (ESMF_TimeInterval) :: time_step + integer :: threadNum - if (present(timeStep)) then - call ESMF_ClockGet(clock % c, TimeStep=time_step, rc=ierr) - call ESMF_ClockSet(clock % c, TimeStep=timeStep % ti, rc=ierr) - call ESMF_ClockAdvance(clock % c, rc=ierr) - call ESMF_ClockSet(clock % c, TimeStep=time_step, rc=ierr) - else - call ESMF_ClockAdvance(clock % c, rc=ierr) - end if - if (present(ierr)) then - if (ierr == ESMF_SUCCESS) ierr = 0 + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if (present(timeStep)) then + call ESMF_ClockGet(clock % c, TimeStep=time_step, rc=ierr) + call ESMF_ClockSet(clock % c, TimeStep=timeStep % ti, rc=ierr) + call ESMF_ClockAdvance(clock % c, rc=ierr) + call ESMF_ClockSet(clock % c, TimeStep=time_step, rc=ierr) + else + call ESMF_ClockAdvance(clock % c, rc=ierr) + end if + if (present(ierr)) then + if (ierr == ESMF_SUCCESS) ierr = 0 + end if end if + !$omp barrier + end subroutine mpas_advance_clock @@ -380,21 +418,28 @@ subroutine mpas_set_clock_time(clock, clock_time, whichTime, ierr) type (MPAS_Time_type), intent(in) :: clock_time integer, intent(in) :: whichTime integer, intent(out), optional :: ierr - - if (whichTime == MPAS_NOW) then - call ESMF_ClockSet(clock % c, CurrTime=clock_time%t, rc=ierr) - call mpas_calibrate_alarms(clock, ierr); - else if (whichTime == MPAS_START_TIME) then - call ESMF_ClockSet(clock % c, StartTime=clock_time%t, rc=ierr) - else if (whichTime == MPAS_STOP_TIME) then - call ESMF_ClockSet(clock % c, StopTime=clock_time%t, rc=ierr) - else if (present(ierr)) then - ierr = 1 - end if - if (present(ierr)) then - if (ierr == ESMF_SUCCESS) ierr = 0 + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() + + if ( threadNum == 0 ) then + if (whichTime == MPAS_NOW) then + call ESMF_ClockSet(clock % c, CurrTime=clock_time%t, rc=ierr) + call mpas_calibrate_alarms(clock, ierr); + else if (whichTime == MPAS_START_TIME) then + call ESMF_ClockSet(clock % c, StartTime=clock_time%t, rc=ierr) + else if (whichTime == MPAS_STOP_TIME) then + call ESMF_ClockSet(clock % c, StopTime=clock_time%t, rc=ierr) + else if (present(ierr)) then + ierr = 1 + end if + if (present(ierr)) then + if (ierr == ESMF_SUCCESS) ierr = 0 + end if end if + !$omp barrier + end subroutine mpas_set_clock_time @@ -438,56 +483,70 @@ subroutine mpas_add_clock_alarm(clock, alarmID, alarmTime, alarmTimeInterval, ie integer, intent(out), optional :: ierr type (MPAS_Alarm_type), pointer :: alarmPtr + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() + + if ( len_trim(alarmID) > ShortStrKIND ) then + if ( threadNum == 0 ) then + write(stderrUnit, *) 'ERROR: Length of alarmID ''' // trim(alarmID) // ''' is ', len_trim(alarmID), ' which is longer than the allowable length of ', ShortStrKIND + end if + call mpas_dmpar_global_abort('ERROR: alarmID length is too long') + end if ! Add a new entry to the linked list of alarms for this clock - if (.not. associated(clock % alarmListHead)) then - allocate(clock % alarmListHead) - nullify(clock % alarmListHead % next) - alarmPtr => clock % alarmListHead - else - alarmPtr => clock % alarmListHead - do while (associated(alarmPtr % next)) - if (trim(alarmPtr % alarmID) == trim(alarmID)) then - write(stderrUnit,*) 'OOPS -- we have a duplicate alarmID', trim(alarmID) - if (present(ierr)) ierr = 1 - return - end if + if ( threadNum == 0 ) then + if (.not. associated(clock % alarmListHead)) then + allocate(clock % alarmListHead) + nullify(clock % alarmListHead % next) + alarmPtr => clock % alarmListHead + else + alarmPtr => clock % alarmListHead + do while (associated(alarmPtr % next)) + if (trim(alarmPtr % alarmID) == trim(alarmID)) then + write(stderrUnit,*) 'OOPS -- we have a duplicate alarmID', trim(alarmID) + if (present(ierr)) ierr = 1 + return + end if + alarmPtr => alarmPtr % next + end do + if (trim(alarmPtr % alarmID) == trim(alarmID)) then + write(stderrUnit,*) 'OOPS -- we have a duplicate alarmID', trim(alarmID) + if (present(ierr)) ierr = 1 + return + end if + allocate(alarmPtr % next) alarmPtr => alarmPtr % next - end do - if (trim(alarmPtr % alarmID) == trim(alarmID)) then - write(stderrUnit,*) 'OOPS -- we have a duplicate alarmID', trim(alarmID) - if (present(ierr)) ierr = 1 - return - end if - allocate(alarmPtr % next) - alarmPtr => alarmPtr % next - nullify(alarmPtr % next) - end if + nullify(alarmPtr % next) + end if - alarmPtr % alarmID = trim(alarmID) + alarmPtr % alarmID = trim(alarmID) - clock % nAlarms = clock % nAlarms + 1 + clock % nAlarms = clock % nAlarms + 1 - alarmPtr % isSet = .true. - alarmPtr % ringTime = alarmTime - + alarmPtr % isSet = .true. + alarmPtr % ringTime = alarmTime + - if (present(alarmTimeInterval)) then - alarmPtr % isRecurring = .true. - alarmPtr % ringTimeInterval = alarmTimeInterval - if(clock % direction == MPAS_FORWARD) then - alarmPtr % prevRingTime = alarmTime - alarmTimeInterval + if (present(alarmTimeInterval)) then + alarmPtr % isRecurring = .true. + alarmPtr % ringTimeInterval = alarmTimeInterval + if(clock % direction == MPAS_FORWARD) then + alarmPtr % prevRingTime = alarmTime - alarmTimeInterval + else + alarmPtr % prevRingTime = alarmTime + alarmTimeInterval + end if else - alarmPtr % prevRingTime = alarmTime + alarmTimeInterval + alarmPtr % isRecurring = .false. + alarmPtr % prevRingTime = alarmTime + end if + if (present(ierr)) then + if (ierr == ESMF_SUCCESS) ierr = 0 end if - else - alarmPtr % isRecurring = .false. - alarmPtr % prevRingTime = alarmTime - end if - if (present(ierr)) then - if (ierr == ESMF_SUCCESS) ierr = 0 end if + !$omp barrier + end subroutine mpas_add_clock_alarm @@ -501,24 +560,31 @@ subroutine mpas_remove_clock_alarm(clock, alarmID, ierr) type (MPAS_Alarm_type), pointer :: alarmPtr type (MPAS_Alarm_type), pointer :: alarmParentPtr + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() if (present(ierr)) ierr = 0 - alarmPtr => clock % alarmListHead - alarmParentPtr => alarmPtr - do while (associated(alarmPtr)) - if (trim(alarmPtr % alarmID) == trim(alarmID)) then - if (trim(alarmPtr % alarmID) == trim(clock % alarmListHead % alarmID)) then - clock % alarmListHead => alarmPtr % next - else - alarmParentPtr % next => alarmPtr % next - end if - deallocate(alarmPtr) - exit - end if + if ( threadNum == 0 ) then + alarmPtr => clock % alarmListHead alarmParentPtr => alarmPtr - alarmPtr => alarmPtr % next - end do + do while (associated(alarmPtr)) + if (trim(alarmPtr % alarmID) == trim(alarmID)) then + if (trim(alarmPtr % alarmID) == trim(clock % alarmListHead % alarmID)) then + clock % alarmListHead => alarmPtr % next + else + alarmParentPtr % next => alarmPtr % next + end if + deallocate(alarmPtr) + exit + end if + alarmParentPtr => alarmPtr + alarmPtr => alarmPtr % next + end do + end if + + !$omp barrier end subroutine mpas_remove_clock_alarm @@ -647,31 +713,37 @@ subroutine mpas_print_alarm(clock, alarmID, ierr) type (MPAS_Alarm_type), pointer :: alarmPtr character (len=StrKIND) :: printString + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() ierr = 0 - alarmPtr => clock % alarmListHead - do while (associated(alarmPtr)) - if (trim(alarmPtr % alarmID) == trim(alarmID)) then - write(stderrUnit,*) 'ALARM ', trim(alarmID) + if ( threadNum == 0 ) then + alarmPtr => clock % alarmListHead + do while (associated(alarmPtr)) + if (trim(alarmPtr % alarmID) == trim(alarmID)) then + write(stderrUnit,*) 'ALARM ', trim(alarmID) - write(stderrUnit,*) 'isRecurring', alarmPtr % isRecurring - - write(stderrUnit,*) 'isSet', alarmPtr % isSet + write(stderrUnit,*) 'isRecurring', alarmPtr % isRecurring + + write(stderrUnit,*) 'isSet', alarmPtr % isSet - call mpas_get_time(alarmPtr % ringTime, dateTimeString=printString, ierr=ierr) - write(stderrUnit,*) 'ringTime', printString + call mpas_get_time(alarmPtr % ringTime, dateTimeString=printString, ierr=ierr) + write(stderrUnit,*) 'ringTime', printString - call mpas_get_time(alarmPtr % prevRingTime, dateTimeString=printString, ierr=ierr) - write(stderrUnit,*) 'prevRingTime', printString + call mpas_get_time(alarmPtr % prevRingTime, dateTimeString=printString, ierr=ierr) + write(stderrUnit,*) 'prevRingTime', printString - call mpas_get_timeInterval(alarmPtr % ringTimeInterval, timeString=printString, ierr=ierr) - write(stderrUnit,*) 'ringTimeInterval', printString - - exit - end if - alarmPtr => alarmPtr % next - end do + call mpas_get_timeInterval(alarmPtr % ringTimeInterval, timeString=printString, ierr=ierr) + write(stderrUnit,*) 'ringTimeInterval', printString + + exit + end if + alarmPtr => alarmPtr % next + end do + end if + !$omp barrier end subroutine mpas_print_alarm @@ -802,44 +874,51 @@ subroutine mpas_reset_clock_alarm(clock, alarmID, interval, ierr) type (MPAS_TimeInterval_type) :: nowInterval, nowRemainder integer :: nDivs + integer :: threadNum - if (present(ierr)) ierr = 0 + threadNum = mpas_threading_get_thread_num() - alarmPtr => clock % alarmListHead - do while (associated(alarmPtr)) - - if (trim(alarmPtr % alarmID) == trim(alarmID)) then - - if (mpas_in_ringing_envelope(clock, alarmPtr, interval, ierr)) then + if (present(ierr)) ierr = 0 - if (.not. alarmPtr % isRecurring) then - alarmPtr % isSet = .false. - else - alarmNow = mpas_get_clock_time(clock, MPAS_NOW, ierr) + if ( threadNum == 0 ) then + alarmPtr => clock % alarmListHead + do while (associated(alarmPtr)) + + if (trim(alarmPtr % alarmID) == trim(alarmID)) then - if(clock % direction == MPAS_FORWARD) then - if (present(interval)) then - alarmNow = alarmNow + interval - end if + if (mpas_in_ringing_envelope(clock, alarmPtr, interval, ierr)) then - nowInterval = alarmNow - alarmPtr % prevRingTime - call mpas_interval_division(alarmPtr % prevRingTime, nowInterval, alarmPtr % ringTimeInterval, nDivs, nowRemainder) - alarmPtr % prevRingTime = alarmNow - nowRemainder + if (.not. alarmPtr % isRecurring) then + alarmPtr % isSet = .false. else - if (present(interval)) then - alarmNow = alarmNow - interval + alarmNow = mpas_get_clock_time(clock, MPAS_NOW, ierr) + + if(clock % direction == MPAS_FORWARD) then + if (present(interval)) then + alarmNow = alarmNow + interval + end if + + nowInterval = alarmNow - alarmPtr % prevRingTime + call mpas_interval_division(alarmPtr % prevRingTime, nowInterval, alarmPtr % ringTimeInterval, nDivs, nowRemainder) + alarmPtr % prevRingTime = alarmNow - nowRemainder + else + if (present(interval)) then + alarmNow = alarmNow - interval + end if + + nowInterval = alarmPtr % prevRingTime - alarmNow + call mpas_interval_division(alarmPtr % prevRingTime, nowInterval, alarmPtr % ringTimeInterval, nDivs, nowRemainder) + alarmPtr % prevRingTime = alarmNow + nowRemainder end if - - nowInterval = alarmPtr % prevRingTime - alarmNow - call mpas_interval_division(alarmPtr % prevRingTime, nowInterval, alarmPtr % ringTimeInterval, nDivs, nowRemainder) - alarmPtr % prevRingTime = alarmNow + nowRemainder end if end if + exit end if - exit - end if - alarmPtr => alarmPtr % next - end do + alarmPtr => alarmPtr % next + end do + end if + + !$omp barrier end subroutine mpas_reset_clock_alarm @@ -858,59 +937,66 @@ subroutine mpas_calibrate_alarms(clock, ierr) type (MPAS_Time_type) :: negativeNeighborRingTime type (MPAS_Time_type) :: positiveNeighborRingTime type (MPAS_Alarm_type), pointer :: alarmPtr + integer :: threadNum + + threadNum = mpas_threading_get_thread_num() now = mpas_get_clock_time(clock, MPAS_NOW, ierr) - alarmPtr => clock % alarmListHead - do while (associated(alarmPtr)) - - if (.not. alarmPtr % isRecurring) then - alarmPtr % isSet = .true. - else - - previousRingTime = alarmPtr % prevRingTime - - if (previousRingTime <= now) then - - do while(previousRingTime <= now) - previousRingTime = previousRingTime + alarmPtr % ringTimeInterval - end do - positiveNeighborRingTime = previousRingTime - - do while(previousRingTime >= now) - previousRingTime = previousRingTime - alarmPtr % ringTimeInterval - end do - negativeNeighborRingTime = previousRingTime + if ( threadNum == 0 ) then + alarmPtr => clock % alarmListHead + do while (associated(alarmPtr)) + if (.not. alarmPtr % isRecurring) then + alarmPtr % isSet = .true. else + + previousRingTime = alarmPtr % prevRingTime - do while(previousRingTime >= now) - previousRingTime = previousRingTime - alarmPtr % ringTimeInterval - end do - negativeNeighborRingTime = previousRingTime + if (previousRingTime <= now) then + + do while(previousRingTime <= now) + previousRingTime = previousRingTime + alarmPtr % ringTimeInterval + end do + positiveNeighborRingTime = previousRingTime + + do while(previousRingTime >= now) + previousRingTime = previousRingTime - alarmPtr % ringTimeInterval + end do + negativeNeighborRingTime = previousRingTime + + else - do while(previousRingTime <= now) - previousRingTime = previousRingTime + alarmPtr % ringTimeInterval - end do - positiveNeighborRingTime = previousRingTime - - end if + do while(previousRingTime >= now) + previousRingTime = previousRingTime - alarmPtr % ringTimeInterval + end do + negativeNeighborRingTime = previousRingTime - if (clock % direction == MPAS_FORWARD) then - alarmPtr % prevRingTime = negativeNeighborRingTime - else - alarmPtr % prevRingTime = positiveNeighborRingTime - end if + do while(previousRingTime <= now) + previousRingTime = previousRingTime + alarmPtr % ringTimeInterval + end do + positiveNeighborRingTime = previousRingTime + + end if - end if + if (clock % direction == MPAS_FORWARD) then + alarmPtr % prevRingTime = negativeNeighborRingTime + else + alarmPtr % prevRingTime = positiveNeighborRingTime + end if + + end if - alarmPtr => alarmPtr % next - - end do + alarmPtr => alarmPtr % next + + end do - if (present(ierr)) then - if (ierr == ESMF_SUCCESS) ierr = 0 + if (present(ierr)) then + if (ierr == ESMF_SUCCESS) ierr = 0 + end if end if + + !$omp barrier end subroutine mpas_calibrate_alarms @@ -1230,6 +1316,8 @@ subroutine mpas_set_timeInterval(interval, YY, MM, DD, H, M, S, S_n, S_d, S_i8, if (present(ierr)) ierr = 1 write(stderrUnit,*) 'ERROR: Invalid TimeInterval string ', trim(timeString) return + else ! no second decimals are present - do nothing here + deallocate(subStrings) end if call mpas_split_string(timeString_, "_", subStrings) @@ -1492,14 +1580,20 @@ type (MPAS_TimeInterval_type) function div_ti_n(ti, n) end function div_ti_n + !----------------------------------------------------------------------- ! routine mpas_interval_division ! !> \brief This routine computes the number intervals that fit into another interval. - !> \author Michael Duda, Doug Jacobsen - !> \date 10/02/2014 - !> \details This routine is a wrapper to two different methods of computing - !> the number of intervals that fit into another interval. + !> \author Michael Duda, Doug Jacobsen, Daniel Henderson + !> \date 4 October 2016 + !> \details + !> This routine performs time-interval division on any intervals with cost + !> that is proportional to the log of the result, n. + !> It works by first performing a forward search, doubling intervals, until + !> it finds the first time larger than the target time. Once this time is + !> found, intervals are added (halving the interval) while the time is still + !> less than the target time. !> !----------------------------------------------------------------------- subroutine mpas_interval_division(ref_time, num, den, n, rem) @@ -1512,191 +1606,80 @@ subroutine mpas_interval_division(ref_time, num, den, n, rem) integer, intent(out) :: n type (MPAS_TimeInterval_type), intent(out) :: rem - type (MPAS_TimeInterval_type) :: newNum, newDen - integer :: days, secondsNum, secondsDen - integer (kind=I8KIND) :: seconds - - if ( num % ti % YR == 0 .and. num % ti % MM == 0 .and. den % ti % YR == 0 .and. den % ti % MM == 0 ) then - call mpas_interval_division_log(num, den, n, rem) - else - call mpas_interval_division_linear(ref_time, num, den, n, rem) - end if - - end subroutine mpas_interval_division - - !----------------------------------------------------------------------- - ! routine mpas_interval_division_log - ! - !> \brief This routine computes the number intervals that fit into another interval using a log search. - !> \author Michael Duda, Doug Jacobsen - !> \date 10/02/2014 - !> \details This routine computes the number of intervals that fit into - !> another time interval using a log search. It is preferred over the - !> _linear alternative, but only works when the intervals are in terms of days - !> or smaller. - !> - !----------------------------------------------------------------------- - subroutine mpas_interval_division_log(num, den, n, rem) - - implicit none - - type (MPAS_TimeInterval_type), intent(in) :: num - type (MPAS_TimeInterval_type), intent(in) :: den - integer, intent(out) :: n - type (MPAS_TimeInterval_type), intent(out) :: rem + integer :: m + type (MPAS_Time_type) :: target_time + type (MPAS_Time_type) :: updated_time type (MPAS_TimeInterval_type) :: temp type (MPAS_TimeInterval_type) :: zero - integer :: nn - - call mpas_set_timeInterval(zero, S=0) - ! - ! If the numerator is smaller than the denominator, just return the numerator as the remainder - ! - if (num < den) then - n = 0 - rem = num - return - end if + call mpas_set_timeInterval(zero, S=0) ! ! Avoid division by zero ! if (den == zero) then - write(stderrUnit,*) 'Error: Attempting to divide by zero.\n' + write(stderrUnit,*) 'Error: Attempting to divide by zero.' n = 0 rem = zero return end if - ! - ! Begin by finding the smallest multiple of the denominator that is at least as large as the numerator and is also a power of two - ! - temp = den - nn = 1 - do while (temp <= num) - temp = temp * 2 - nn = nn * 2 - end do + target_time = ref_time + num + updated_time = ref_time + den ! - ! Dividing by two, we're guaranteed that temp is at most the value of the numerator + ! numerator == denominator ! - temp = temp / 2 - nn = nn / 2 + if (target_time == updated_time) then + n = 1 + rem = zero + return + end if + ! - ! Work backwards to zero + ! denominator > numerator ! - n = 0 - rem = num - do while (nn > 0) - if (temp <= rem) then - rem = rem - temp - n = n + nn - end if - nn = nn / 2 - temp = temp / 2 - end do - - end subroutine mpas_interval_division_log - - - !----------------------------------------------------------------------- - ! routine mpas_interval_division_linear - ! - !> \brief This routine computes the number intervals that fit into another interval using a linear search. - !> \author Michael Duda, Doug Jacobsen - !> \date 10/02/2014 - !> \details This routine computes the number of intervals that fit into - !> another time interval using a linear search. It is slower than the _log - !> alternative, but works when intervals contain months or longer interval - !> sections. - !> - !----------------------------------------------------------------------- - subroutine mpas_interval_division_linear(ref_time, num, den, n, rem) - - implicit none - - type (MPAS_Time_type), intent(in) :: ref_time - type (MPAS_TimeInterval_type), intent(in) :: num - type (MPAS_TimeInterval_type), intent(in) :: den - integer, intent(out) :: n - type (MPAS_TimeInterval_type), intent(out) :: rem - - integer :: m - - type (MPAS_Time_type) :: target_time - type (MPAS_Time_type) :: updated_time, mid_time - - type (MPAS_TimeInterval_type) :: temp, mid_int - type (MPAS_TimeInterval_type) :: zero - - target_time = ref_time + num - - updated_time = ref_time + den - - n = 0 - - ! If the denominator is larger than the numerator, return 0 intervals, - ! and the numerator as the remainder if ( target_time < updated_time ) then + n = 0 rem = num return end if ! One interval of den already fits into num - n = n + 1 - temp = den + n = 1 - ! Search forward, doubling the interval each time. + ! Search forward, doubling the interval each time do while (target_time > updated_time) n = n * 2 temp = den * n updated_time = ref_time + temp end do - ! Setup midpoint of search - ! The last value of n puts updated_time after target_time, need to back off and find the final time. n = n / 2 m = n - mid_int = den * n - temp = mid_int - updated_time = ref_time + mid_int + temp + temp = den * n + updated_time = ref_time + temp - ! Seach backward, halving the interval each time. - do while (target_time < updated_time) - m = m / 2 - temp = den * m - updated_time = ref_time + mid_int + temp - end do - ! Final number of interavls is n + m - n = n + m + ! Search backward, halving the interval each time + do while ( m > 0 ) + temp = den * m - ! Do a final linear search, just to ensure we aren't missing any divisions. - temp = den * n - updated_time = ref_time + temp + if ( updated_time + temp <= target_time ) then + updated_time = updated_time + temp + n = n + m + end if - do while (target_time > updated_time) - n = n + 1 - updated_time = updated_time + den + m = m / 2 end do - ! Here, if updated_time is larger than target time. Need to subtract den once, and compute remainder - if ( updated_time > target_time ) then - updated_time = updated_time - den - n = n - 1 - rem = target_time - updated_time - else - call mpas_set_timeInterval(rem, S=0) - end if + rem = target_time - updated_time - return - end subroutine mpas_interval_division_linear + end subroutine mpas_interval_division logical function eq_t_t(t1, t2) @@ -2021,12 +2004,14 @@ end function isLeapYear !> $m -> minute !> $s -> second !> $g -> multi-grid level + !> $B -> block ID, if blockID == -1, $B -> 'all' !----------------------------------------------------------------------- - subroutine mpas_expand_string(timeStamp, inString, outString)!{{{ + subroutine mpas_expand_string(timeStamp, blockID, inString, outString)!{{{ implicit none character (len=*), intent(in) :: timeStamp + integer, intent(in) :: blockID character (len=*), intent(in) :: inString character (len=StrKIND), intent(out) :: outString @@ -2084,8 +2069,36 @@ subroutine mpas_expand_string(timeStamp, inString, outString)!{{{ call mpas_get_time(curTime, S=second) write(timePart, '(i0.2)') second outString = trim(outString) // trim(timePart) + case ('S') + call mpas_get_time(curTime, H=hour) + call mpas_get_time(curTime, M=minute) + call mpas_get_time(curTime, S=second) + second = second + 60 * minute + 3600 * hour + write(timePart, '(i0.5)') second + outString = trim(outString) // trim(timePart) ! case ('G') ! Expands to multi-grid level + case ('B') + if ( blockID == -1 ) then + write(timePart, '(a)') 'all' + else if ( blockID < 10 ) then + write(timePart, '(i1)') blockID + else if ( blockID < 100 ) then + write(timePart, '(i2)') blockID + else if ( blockID < 1000 ) then + write(timePart, '(i3)') blockID + else if ( blockID < 10000 ) then + write(timePart, '(i4)') blockID + else if ( blockID < 100000 ) then + write(timePart, '(i5)') blockID + else if ( blockID < 1000000 ) then + write(timePart, '(i6)') blockID + else if ( blockID < 10000000 ) then + write(timePart, '(i7)') blockID + else + call mpas_dmpar_global_abort('ERROR: Block expansion does not yet support block IDs with more than 7 digits') + end if + outString = trim(outString) // trim(timePart) case default write(stderrUnit, *) 'ERROR: mpas_expand_string option $', inString(i:i), ' is not a valid expansion character.' call mpas_dmpar_global_abort('ERROR: mpas_timekeeping') @@ -2111,6 +2124,8 @@ end module mpas_timekeeping subroutine wrf_error_fatal(msg) + use mpas_abort, only : mpas_dmpar_global_abort + implicit none character (len=*) :: msg diff --git a/src/framework/mpas_timer.F b/src/framework/mpas_timer.F index b4ac3c1af9..05d2e18041 100644 --- a/src/framework/mpas_timer.F +++ b/src/framework/mpas_timer.F @@ -10,12 +10,13 @@ ! mpas_timer ! !> \brief MPAS Timer module -!> \author Michael Duda, Doug Jacobsen -!> \date 03/27/13 -!> \details -!> This module provides developers with internal timer routines. These can be -!> use to profile various parts of code within MPAS. Calls to TAU happen in this module as well -!> to provide more detailed profiling. +!> \author Doug Jacobsen +!> \date 12/22/2015 +!> \details +!> This module provides developers with internal timer routines. +!> Additionally it provides standard interfaces to additional timer libarries, +!> such as tau or gptl. +!> Timers are stored as trees of lists of timers. ! !----------------------------------------------------------------------- module mpas_timer @@ -23,6 +24,17 @@ module mpas_timer use mpas_kind_types use mpas_derived_types use mpas_io_units + use mpas_dmpar + use mpas_threading + use mpas_abort, only : mpas_dmpar_global_abort + +#ifdef MPAS_PERF_MOD_TIMERS + use perf_mod +#endif + +#ifdef MPAS_GPTL_TIMERS + use gptl +#endif implicit none save @@ -31,346 +43,441 @@ module mpas_timer include 'f90papi.h' #endif - type (timer_node), pointer :: all_timers - integer :: levels, synced - - type (dm_info), pointer :: domain_info + type (mpas_timer_root), pointer :: timer_root => null() - public :: mpas_timer_start, & + public :: mpas_timer_set_context, & + mpas_timer_start, & mpas_timer_stop, & mpas_timer_write, & - mpas_timer_init + mpas_timer_init, & + mpas_timer_finalize contains +!*********************************************************************** +! +! routine mpas_timer_set_context +! +!> \brief MPAS Timer set context routine +!> \author Doug Jacobsen +!> \date 12/22/2015 +!> \details +!> This routine sets the timer_root for the timer infrastructure to point to a specific domain's timer_root. +!> This allows multiple cores to have MPAS timers, and keep their timing information separate. +! +!----------------------------------------------------------------------- + subroutine mpas_timer_set_context(domain)!{{{ + + type (domain_type), intent(in) :: domain + + timer_root => domain % timer_root + + end subroutine mpas_timer_set_context!}}} + !*********************************************************************** ! ! routine mpas_timer_start ! !> \brief MPAS Timer start routine !> \author Doug Jacobsen -!> \date 03/27/13 -!> \details -!> This routine starts a timer. By default, timer_name is searched for in the linked list of timers. -!> If timer_ptr is provided, the search doesn't happen and the pointer to the timer is used (or allocated if not created yet). +!> \date 12/22/2015 +!> \details +!> This routine starts a timer named 'timer_name' as a child of whatever the +!> most recently started timer that is still running is. ! !----------------------------------------------------------------------- - subroutine mpas_timer_start(timer_name, clear_timer, timer_ptr)!{{{ -# ifdef _MPI - include 'mpif.h' -# endif + subroutine mpas_timer_start(timer_name, clear_timer_in)!{{{ character (len=*), intent (in) :: timer_name !< Input: name of timer, stored as name of timer - logical, optional, intent(in) :: clear_timer !< Input: flag to clear timer - type (timer_node), optional, pointer :: timer_ptr !< Output: pointer to store timer in module + logical, optional, intent(in) :: clear_timer_in !< Input: flag to clear timer + + logical :: setup_timer, timer_found, clear_timer, check_flag + + type (mpas_timer_node), pointer :: current_timer, prev_timer + + character (len=len_trim(timer_name)) :: trimmed_name + + integer :: usecs, nlen, iErr, threadNum, numThreads + + numThreads = mpas_threading_get_max_threads() + threadNum = mpas_threading_get_thread_num() - character (len=len(timer_name)) :: trimed_name - logical :: timer_added, timer_found, string_equal, check_flag - type (timer_node), pointer :: current, temp + trimmed_name = trim(timer_name) + nlen = len(trimmed_name) - integer :: clock, hz, usecs, nlen - trimed_name = trim(timer_name) - nlen = len(trimed_name) + clear_timer = .false. -#ifdef MPAS_TAU - call tau_start(trimed_name) + if ( present(clear_timer_in) ) then + clear_timer = clear_timer_in + end if + +#ifdef MPAS_TAU_TIMERS + call tau_start(trimmed_name) #endif - timer_added = .false. - timer_found = .false. - - if(.not.associated(all_timers)) then - timer_added = .true. - allocate(all_timers) - allocate(all_timers%next) - levels = 0 - - all_timers%timer_name = '' - all_timers%nlen = 0 - current => all_timers%next - nullify(current%next) - else - current => all_timers%next - timer_search: do while ((.not.timer_found) .and. associated(current)) - string_equal = (current%timer_name(1:current%nlen) == trimed_name) - if(string_equal) then - timer_found = .true. - else - current => current%next - endif - end do timer_search - endif - - if(present(timer_ptr)) then - timer_found = .true. - if(.not.associated(timer_ptr)) then - current => all_timers - find_end_ptr: do while((.not.timer_added) .and. (associated(current%next))) - current => current%next - end do find_end_ptr - - allocate(timer_ptr) - - current%next => timer_ptr - current => timer_ptr - nullify(timer_ptr%next) - current%levels = levels - current%timer_name = trimed_name - current%nlen = nlen - current%running = .false. - current%total_time = 0.0 - current%max_time = 0.0 - current%min_time = 100000000.0 - current%avg_time = 0.0 - current%calls = 0 - current%efficiency = 0.0 - else - current => timer_ptr - endif - endif - - if(.not.timer_found) then - current => all_timers - find_end: do while((.not.timer_added) .and. (associated(current%next))) - current => current%next - end do find_end - - allocate(current%next) - current => current%next - - nullify(current%next) - timer_added = .true. - endif - - if(timer_added .and. (.not.timer_found)) then - current%levels = levels - current%timer_name = trimed_name - current%nlen = nlen - current%running = .false. - current%total_time = 0.0 - current%max_time = 0.0 - current%min_time = 100000000.0 - current%avg_time = 0.0 - current%calls = 0 - current%efficiency = 0.0 - endif - - if((timer_added .or. timer_found) .and. (.not.current%running)) then - current%running = .true. - levels = levels + 1 +#ifdef MPAS_PERF_MOD_TIMERS + call t_startf(trimmed_name) +#endif + +#ifdef MPAS_GPTL_TIMERS + iErr = gptlstart(trimmed_name) +#endif +#ifdef MPAS_NATIVE_TIMERS + if ( threadNum == 0 ) then + setup_timer = .false. + + ! If the root_timer in timer_root has not been set, create it and use it for the current timer + if ( .not. associated(timer_root % root_timer) ) then + allocate(timer_root % root_timer) + current_timer => timer_root % root_timer + setup_timer = .true. + ! If root_timer is set, current_timer should be as well. So look for this timer under current_timer, or + ! create it as a child. + else + ! If current_timer is not set, set it to root_timer and search... + if ( .not. associated(timer_root % current_timer) ) then + timer_root % current_timer => timer_root % root_timer + end if + + ! If current_timer doesn't have any children, make a new child that will be this timer + if ( .not. associated(timer_root % current_timer % child) ) then + allocate(timer_root % current_timer % child) + current_timer => timer_root % current_timer % child + current_timer % parent => timer_root % current_timer + setup_timer = .true. + else ! Search through current_timer's children for this timer, or create it as a new child + timer_found = .false. + current_timer => timer_root % current_timer % child + do while (associated(current_timer) .and. .not. timer_found) + if ( current_timer % nlen == nlen) then + if ( current_timer % timer_name(1:current_timer % nlen) == trimmed_name(1:nlen) ) then + timer_found = .true. + end if + end if + + if ( .not. timer_found ) then + prev_timer => current_timer + current_timer => current_timer % next + end if + end do + + if (.not. timer_found) then + allocate(prev_timer % next) + current_timer => prev_timer % next + current_timer % parent => timer_root % current_timer + setup_timer = .true. + end if + end if + end if + + ! Setup timer if needed + if ( setup_timer ) then + current_timer % timer_name = trimmed_name + current_timer % nlen = nlen + current_timer % printed = .false. + allocate(current_timer % running(numThreads)) + allocate(current_timer % start_time(numThreads)) + allocate(current_timer % end_time(numThreads)) + allocate(current_timer % total_time(numThreads)) + allocate(current_timer % max_time(numThreads)) + allocate(current_timer % min_time(numThreads)) + allocate(current_timer % avg_time(numThreads)) + current_timer % running(:) = .false. + current_timer % start_time(:) = 0.0_R8KIND + current_timer % end_time(:) = 0.0_R8KIND + current_timer % total_time(:) = 0.0_R8KIND + current_timer % max_time(:) = -huge(RKIND) + current_timer % min_time(:) = huge(RKIND) + current_timer % avg_time(:) = 0.0_RKIND + current_timer % calls = 0 + end if + + ! Set current timer to be current timer (so timers started after will be children of this timer instance) + timer_root % current_timer => current_timer + end if + call mpas_threading_barrier() + + current_timer => timer_root % current_timer + + if ( clear_timer) then + if ( threadNum == 0 ) then + current_timer % calls = 0 + end if + current_timer % running(threadNum + 1) = .false. + current_timer % start_time(threadNum + 1) = 0.0_R8KIND + current_timer % end_time(threadNum + 1) = 0.0_R8KIND + current_timer % total_time(threadNum + 1) = 0.0_R8KIND + current_timer % max_time(threadNum + 1) = -huge(RKIND) + current_timer % min_time(threadNum + 1) = huge(RKIND) + current_timer % avg_time(threadNum + 1) = 0.0_RKIND + end if + + ! Set start time + ! TODO after making sure the timer structures work as expected... + if ( threadNum == 0 ) then + current_timer % calls = current_timer % calls + 1 + end if + current_timer % running(threadNum + 1) = .true. #ifdef _PAPI - call PAPIF_get_real_usec(usecs, check_flag) - current%start_time = usecs/1.0e6 -#elif _MPI - current%start_time = MPI_Wtime() + call PAPIF_get_real_usec(usecs, check_flag) + current_timer % start_time(threadNum + 1) = usecs/1.0e6 #else - call system_clock (count=clock) - call system_clock (count_rate=hz) - current%start_time = real(clock,kind=R8KIND)/real(hz,kind=R8KIND) + call mpas_dmpar_get_time(current_timer % start_time(threadNum + 1)) #endif - endif - - if(present(clear_timer)) then - if(clear_timer) then - current%start_time = 0.0 - current%end_time = 0.0 - current%total_time = 0.0 - current%max_time = 0.0 - current%min_time = 0.0 - current%avg_time = 0.0 - current%calls = 0 - current%running = .false. - endif - endif - - if(present(timer_ptr)) then - timer_ptr => current - endif - +#endif + end subroutine mpas_timer_start!}}} - + !*********************************************************************** ! ! routine mpas_timer_stop ! !> \brief MPAS Timer stop routine !> \author Doug Jacobsen -!> \date 03/27/13 -!> \details -!> This routine stops a timer. By default, timer_name is searched for in the linked list of timers. -!> If timer_ptr is provided, the search doesn't happen and the pointer to the timer is used. +!> \date 12/22/2015 +!> \details +!> This routine stops a timer named 'timer_name'. It should be the most +!> recently started timer, so an error is thrown if it is not. ! !----------------------------------------------------------------------- - subroutine mpas_timer_stop(timer_name, timer_ptr)!{{{ -# ifdef _MPI - include 'mpif.h' -# endif + subroutine mpas_timer_stop(timer_name)!{{{ character (len=*), intent(in) :: timer_name !< Input: name of timer to stop - type (timer_node), pointer, optional :: timer_ptr !< Input: pointer to timer, for stopping - character (len=len(timer_name)) :: trimed_name !< Trimed timer name - - type (timer_node), pointer :: current - - real (kind=R8KIND) :: time_temp - logical :: timer_found, string_equal, check_flag - integer :: clock, hz, usecs, nlen - trimed_name = trim(timer_name) - nlen = len(trimed_name) - -#ifdef MPAS_TAU - call tau_stop(trimed_name) -#endif - - timer_found = .false. - - if(present(timer_ptr)) then - timer_found = .true. - current => timer_ptr - endif - - if(.not.associated(all_timers)) then - print *,' timer_stop :: timer_stop called with no timers initialized' - else if(.not. timer_found) then - current => all_timers - timer_find: do while(.not.timer_found .and. associated(current)) - string_equal = (current%timer_name(1:current%nlen) == trimed_name) - - if(string_equal) then - timer_found = .true. - else - current => current%next - endif - end do timer_find - endif - if(.not.timer_found) then - print *,' timer_stop :: timer_stop called with timer_name =', timer_name,' when timer has not been started.' - call mpas_dmpar_global_abort('ERROR: in subroutine mpas_timer_stop()') - endif + character (len=len_trim(timer_name)) :: trimmed_name !< Trimmed timer name - if(current%running) then - current%running = .false. - levels = levels - 1 + real (kind=R8KIND) :: temp_time -#ifdef _PAPI - call PAPIF_get_real_usec(usecs, check_flag) - current%end_time = usecs/1.0e6 -#elif _MPI - current%end_time = MPI_Wtime() -#else - call system_clock(count=clock) - call system_clock(count_rate=hz) - current%end_time = real(clock,kind=R8KIND)/real(hz,kind=R8KIND) + integer :: usecs, nlen, iErr, threadNum + + threadNum = mpas_threading_get_thread_num() + + trimmed_name = trim(timer_name) + nlen = len(trimmed_name) + +#ifdef MPAS_TAU_TIMERS + call tau_stop(trimmed_name) #endif - - time_temp = current%end_time - current%start_time - current%total_time = current%total_time + time_temp - if(real(time_temp,RKIND) > current%max_time) then - current%max_time = real(time_temp,RKIND) - endif +#ifdef MPAS_PERF_MOD_TIMERS + call t_stopf(trimmed_name) +#endif - if(real(time_temp,RKIND) < current%min_time) then - current%min_time = real(time_temp,RKIND) - endif +#ifdef MPAS_GPTL_TIMERS + iErr = gptlstop(trimmed_name) +#endif - current%calls = current%calls + 1 - endif +#ifdef MPAS_NATIVE_TIMERS + ! Timer to stop should be timer_root % current_timer, since you should only be allowed to stop the most + ! recently started timer + if ( .not. associated(timer_root % current_timer) ) then + write(stderrUnit, *) 'ERROR: Trying to stop a timer when no timer has been started.' + call mpas_dmpar_global_abort('ERROR: Incorrect timer usage') + end if + + if ( .not. timer_root % current_timer % nlen == nlen ) then + if ( .not. timer_root % current_timer % timer_name(1:timer_root % current_timer % nlen) == trimmed_name(1:nlen) ) then + write(stderrUnit, * ) 'ERROR: Trying to stop timer ', trim(trimmed_name), ' when ', & + trim(timer_root % current_timer % timer_name), ' is the most recently started timer.' + call mpas_dmpar_global_abort('ERROR: Incorrect timer usage') + end if + end if + + ! Set stop time + ! TODO: Compute timer statistics... + timer_root % current_timer % running(threadNum + 1) = .false. +#ifdef _PAPI + call PAPIF_get_real_usec(usecs, check_flag) + current_timer % end_time(threadNum + 1) = usecs/1.0e6 +#else + call mpas_dmpar_get_time(timer_root % current_timer % end_time(threadNum + 1)) +#endif + temp_time = timer_root % current_timer % end_time(threadNum + 1) & + - timer_root % current_timer % start_time(threadNum + 1) + timer_root % current_timer % total_time(threadNum + 1) = timer_root % current_timer % total_time(threadNum + 1) & + + temp_time + timer_root % current_timer % max_time(threadNum + 1) = max(timer_root % current_timer % max_time(threadNum + 1), & + real(temp_time, kind=RKIND)) + timer_root % current_timer % min_time(threadNum + 1) = min(timer_root % current_timer % min_time(threadNum + 1), & + real(temp_time, kind=RKIND)) + + ! Set current_timer to the parent of current_timer + call mpas_threading_barrier() + if ( threadNum == 0 ) then + timer_root % current_timer => timer_root % current_timer % parent + end if + call mpas_threading_barrier() +#endif end subroutine mpas_timer_stop!}}} !*********************************************************************** ! -! recursive routine mpas_timer_write +! routine mpas_timer_write_header ! -!> \brief MPAS Timer write routine +!> \brief MPAS Timer write header routine !> \author Doug Jacobsen -!> \date 03/27/13 -!> \details -!> This routine writes all timer output to stdout. It recursively calls -!> itself until all timers have been written out. Prior to writing timers, -!> this routine calls mpas_timer_sync. +!> \date 12/22/2015 +!> \details +!> This routine writes header information about what will be printed for each timer_root. ! !----------------------------------------------------------------------- - recursive subroutine mpas_timer_write(timer_ptr, total_ptr)!{{{ - type (timer_node), pointer, optional :: timer_ptr !< Input - Optional: Pointer to a specific timer to write out. - type (timer_node), pointer, optional :: total_ptr !< Input - Optional: Pointer to the total_time timer. - character (len=StrKIND) :: tname - - logical :: total_found, string_equals - type (timer_node), pointer :: current, total - real (kind=RKIND) :: percent - integer :: i, nlen - - total_found = .false. - -#ifdef MPAS_SYNC_TIMERS - if(associated(domain_info) .and. synced == 0) then - call mpas_timer_sync() - endif + subroutine mpas_timer_write_header()!{{{ + +#ifdef MPAS_NATIVE_TIMERS + write(stdoutUnit, *) '' + write(stdoutUnit, *) '' + write(stdoutUnit, *) ' Timer information: ' + write(stdoutUnit, *) ' Globals are computed across all threads and processors' + write(stdoutUnit, *) '' + write(stdoutUnit, *) ' Columns:' + write(stdoutUnit, *) ' total time: Global max of accumulated time spent in timer' + write(stdoutUnit, *) ' calls: Total number of times this timer was started / stopped.' + write(stdoutUnit, *) ' min: Global min of time spent in a single start / stop' + write(stdoutUnit, *) ' max: Global max of time spent in a single start / stop' + write(stdoutUnit, *) ' avg: Global max of average time spent in a single start / stop' + write(stdoutUnit, *) ' pct_tot: Percent of the timer at level 1' + write(stdoutUnit, *) ' pct_par: Percent of the parent timer (one level up)' + write(stdoutUnit, *) ' par_eff: Parallel efficiency, global average total time / global max total time' + write(stdoutUnit, *) '' + write(stdoutUnit, *) '' + + write(stdoutUnit,'(3x, a10, 34x, a15, a12, a11, a15, a15, a13, a10, a12)') 'timer_name', 'total', 'calls', 'min', 'max', & + 'avg', 'pct_tot', 'pct_par', 'par_eff' #endif - if(present(timer_ptr) .and. (.not.present(total_ptr))) then - print *,'timer_write :: timer_ptr valid, but total_ptr is not assigned.' - call mpas_dmpar_global_abort('ERROR: in subroutine mpas_timer_write()') - else if(present(timer_ptr)) then - tname = '' - do i=0,timer_ptr%levels+2 - tname = tname//' ' - end do - - if(timer_ptr%total_time == 0.0_R8KIND) then - timer_ptr%min_time = 0.0_RKIND - timer_ptr%max_time = 0.0_RKIND - timer_ptr%avg_time = 0.0_RKIND - percent = 0.0_RKIND - else - timer_ptr%avg_time = timer_ptr%total_time / real(timer_ptr%calls, kind=R8KIND) - percent = timer_ptr%total_time / total_ptr%total_time - endif - - write(stdoutUnit,'(i2, 1x, a35, f15.5, i10, 3f15.5, 2f8.2)') timer_ptr%levels, tname(1:timer_ptr%levels)// & - timer_ptr%timer_name, timer_ptr%total_time, timer_ptr%calls, timer_ptr%min_time, timer_ptr%max_time, & - timer_ptr%avg_time, percent, timer_ptr%efficiency - return - endif - - total => all_timers - - find_total: do while((.not.total_found) .and. associated(total)) - string_equals = (total%timer_name(1:total%nlen) == "total time") - if(string_equals) then - total_found = .true. - else - total => total%next - endif - end do find_total - - if(.not.total_found) then - print *,' timer_write :: no timer named "total time" found.' - call mpas_dmpar_global_abort('ERROR: in subroutine mpas_timer_write()') - end if - - write(stdoutUnit,'(3x, a10, 24x, a15, a10, a13, a15, a15, a12, a12)') 'timer_name', 'total', 'calls', 'min', 'max', & - 'avg', 'percent', 'efficiency' - write(stdoutUnit,'(i2, 1x, a35, f15.5, i10, 3f15.5)') total%levels, total%timer_name, total%total_time, total%calls, & - total%min_time, total%max_time, total%avg_time + end subroutine mpas_timer_write_header!}}} - current => all_timers - - print_timers: do while(associated(current)) - string_equals = (current%timer_name(1:current%nlen) == "total time") - string_equals = string_equals .or. (current%timer_name(1:current%nlen) == "") +!*********************************************************************** +! +! routine mpas_timer_write +! +!> \brief MPAS Timer write routine +!> \author Doug Jacobsen +!> \date 12/22/2015 +!> \details +!> This routine writes all timer output to stdout for the currently set +!> timer_root. It prints timer information in depth first format. +! +!----------------------------------------------------------------------- + subroutine mpas_timer_write()!{{{ + type (mpas_timer_node), pointer :: current_timer + character (len=StrKIND) :: indentation + logical :: next_timer_found + integer :: iErr, levels, i, numThreads + + real (kind=RKIND) :: percent_inc, percent_exc, efficiency + real (kind=RKIND) :: global_time, global_max_total, global_ave_total + +#ifdef MPAS_GPTL_TIMERS +#ifdef MPAS_DEBUG + iErr = gptlpr(timer_root % dminfo % my_proc_id) +#else + if ( timer_root % dminfo % my_proc_id == 0 ) then + iErr = gptlpr(timer_root % dminfo % my_proc_id) + end if +#endif +#endif - if(.not.string_equals) then - call mpas_timer_write(current, total) - current => current%next - else - current => current%next - endif - end do print_timers +#ifdef MPAS_NATIVE_TIMERS + numThreads = mpas_threading_get_max_threads() + current_timer => timer_root % root_timer + levels = 1 + + ! Initialize indentation with all spaces. + indentation = '' + do i = 1, StrKIND + indentation(i:i) = ' ' + end do + + do while ( associated(current_timer) ) + if ( current_timer % running(1) ) then + write(stderrUnit, *) 'ERROR: Timer ', trim(current_timer % timer_name), ' is still running.' + end if + + if ( .not. current_timer % printed ) then + ! Compute average timers for each thread + do i = 1, numThreads + current_timer % avg_time(i) = current_timer % total_time(i) / current_timer % calls + end do + + ! Synchronize timers across threads + global_ave_total = sum(current_timer % total_time(:)) + global_ave_total = global_ave_total / numThreads + current_timer % total_time(1) = maxval(current_timer % total_time(:)) + current_timer % max_time(1) = maxval(current_timer % max_time(:)) + current_timer % min_time(1) = minval(current_timer % min_time(:)) + current_timer % avg_time(1) = sum(current_timer % avg_time(:)) / numThreads + + ! Synchronize timers across procs + call mpas_dmpar_sum_real(timer_root % dminfo, global_ave_total, global_time) + global_ave_total = global_time / timer_root % dminfo % nprocs + call mpas_dmpar_max_real(timer_root % dminfo, real(current_timer % total_time(1), kind=RKIND), global_time) + global_max_total = global_time + current_timer % total_time(1) = global_time + call mpas_dmpar_max_real(timer_root % dminfo, current_timer % max_time(1), global_time) + current_timer % max_time(1) = global_time + call mpas_dmpar_min_real(timer_root % dminfo, current_timer % min_time(1), global_time) + current_timer % min_time(1) = global_time + call mpas_dmpar_sum_real(timer_root % dminfo, current_timer % avg_time(1), global_time) + current_timer % avg_time(1) = global_time / timer_root % dminfo % nprocs + + ! Compute percent of run time + if ( timer_root % root_timer % total_time(1) /= 0.0_RKIND ) then + percent_inc = current_timer % total_time(1) / timer_root % root_timer % total_time(1) + else + percent_inc = 0.0_RKIND + end if + + if ( associated(current_timer % parent) ) then + percent_exc = current_timer % total_time(1) / current_timer % parent % total_time(1) + else + percent_exc = 0.0_RKIND + end if + + ! Compute efficiency + if ( global_max_total /= 0.0_RKIND ) then + efficiency = global_ave_total / global_max_total + else + efficiency = 1.0_RKIND + end if + + ! Print current_timer + write(stdoutUnit,'(i2, 1x, a45, f15.5, i10, 3f15.5, 1x, f8.2, 3x, f8.2, 3x, f8.2)') levels, indentation(1:levels-1) // & + current_timer % timer_name, current_timer % total_time(1), current_timer % calls, & + current_timer % min_time(1), current_timer % max_time(1), current_timer % avg_time(1), & + percent_inc * 100.0_RKIND, percent_exc * 100.0_RKIND, efficiency + current_timer % printed = .true. + end if + + + ! If current_timer has children, move down a level (and increment levels) + ! If current_timer has no children, print siblings (without incrementing levels) + ! If current_timer has no children, move back to the parent timer, and decrement levels. + next_timer_found = .false. + if ( associated(current_timer % child) ) then + if ( .not. current_timer % child % printed ) then + current_timer => current_timer % child + levels = levels + 1 + next_timer_found = .true. + end if + end if + + if ( .not. next_timer_found ) then + if ( associated(current_timer % next) ) then + current_timer => current_timer % next + else + if ( associated(current_timer % parent) ) then + current_timer => current_timer % parent + levels = levels - 1 + else + nullify(current_timer) + end if + end if + end if + end do +#endif end subroutine mpas_timer_write!}}} @@ -380,89 +487,123 @@ end subroutine mpas_timer_write!}}} ! !> \brief MPAS Timer init routine !> \author Doug Jacobsen -!> \date 03/27/13 -!> \details -!> This routine initializes the mpas_timer setup. It needs to have access to the dminfo object in order to sync timers. +!> \date 12/22/2015 +!> \details +!> This routine initializes the timers within a given domain. +!> Additionally, it sets the context to the timers within the domain, which can +!> be changed later using the mpas_timer_set_context routine. ! !----------------------------------------------------------------------- subroutine mpas_timer_init(domain)!{{{ - type (domain_type), intent(in), optional :: domain !< Input - Optional: Domain structure + type (domain_type), intent(inout) :: domain !< Input/Output: Domain structure + + integer :: iErr - if( present(domain) ) then - domain_info => domain % dminfo - endif + allocate(domain % timer_root) + timer_root => domain % timer_root + timer_root % dminfo => domain % dminfo - synced = 0 +#ifdef MPAS_GPTL_TIMERS + iErr = gptlsetoption(gptloverhead, 0) + iErr = gptlsetoption(gptlpercent, 0) + iErr = gptlsetoption(gptlsync_mpi, 1) + + iErr = gptlinitialize() +#endif end subroutine mpas_timer_init!}}} !*********************************************************************** ! -! routine mpas_timer_sync +! routine mpas_timer_finalize ! -!> \brief MPAS Timer sync routine +!> \brief MPAS Timer finalize routine !> \author Doug Jacobsen -!> \date 03/27/13 -!> \details -!> This routine synchronizes timers across all processors in order to better represent -!> the entire run domain with the timer output. -!> -!> Note: To avoid the need for additional implementations of -!> mpas_dmpar_max_real() and mpas_dmpar_sum_real(), the total_time is -!> cast from R8KIND to RKIND in the process of computing all_total_time -!> and all_ave_time. This is not expected to have a practical impact on -!> the reported times. +!> \date 12/22/2015 +!> \details +!> This routine destroys all timers for the given domain. +!> Timers are destroyed in depth first format ! !----------------------------------------------------------------------- - subroutine mpas_timer_sync()!{{{ - use mpas_dmpar - - type (timer_node), pointer :: current - real (kind=RKIND) :: all_total_time, all_max_time, all_min_time, all_ave_time - - current => all_timers - - sync_timers: do while(associated(current)) - all_total_time = 0.0 - all_ave_time = 0.0 - all_max_time = 0.0 - all_min_time = 0.0 - - call mpas_dmpar_max_real(domain_info, real(current % total_time, kind=RKIND), all_total_time) - call mpas_dmpar_sum_real(domain_info, real(current % total_time, kind=RKIND), all_ave_time) - - all_ave_time = all_ave_time / domain_info % nprocs - - current % total_time = all_total_time - -#ifdef _MPI - if ( all_total_time > 0.0_RKIND ) then - current % efficiency = all_ave_time / all_total_time - else - current % efficiency = 1.0 - end if -#else - current % efficiency = 1.0 + subroutine mpas_timer_finalize(domain)!{{{ + type (domain_type), intent(inout) :: domain !< Input/Output: Domain structure + +#ifdef MPAS_NATIVE_TIMERS + ! Destroy all children of domain's root_timer + call mpas_timer_destroy_children(domain % timer_root % root_timer) + + ! Deallocate all arrays of the root_timer, and the timer itself + deallocate(domain % timer_root % root_timer % start_time) + deallocate(domain % timer_root % root_timer % end_time) + deallocate(domain % timer_root % root_timer % total_time) + deallocate(domain % timer_root % root_timer % max_time) + deallocate(domain % timer_root % root_timer % min_time) + deallocate(domain % timer_root % root_timer % avg_time) + deallocate(domain % timer_root % root_timer % running) + deallocate(domain % timer_root % root_timer) #endif - if ( current % calls > 0 ) then - current % avg_time = current % total_time / real(current % calls, kind=R8KIND) - else - current % avg_time = current % total_time - end if - - call mpas_dmpar_max_real(domain_info, current % max_time, all_max_time) - current % max_time = all_max_time - - call mpas_dmpar_min_real(domain_info, current % min_time, all_min_time) - current % min_time = all_min_time + deallocate(domain % timer_root) - current => current % next - end do sync_timers + end subroutine mpas_timer_finalize!}}} - synced = 1 +!*********************************************************************** +! +! recursive routine mpas_timer_destroy_chldren +! +!> \brief MPAS Timer Destroy Children Routine +!> \author Doug Jacobsen +!> \date 12/22/2015 +!> \details +!> This routine destroys all children timers of parent_timer. +!> It does not destroy parent_timer itself. +! +!----------------------------------------------------------------------- + recursive subroutine mpas_timer_destroy_children(parent_timer)!{{{ + type (mpas_timer_node), pointer :: parent_timer + + type (mpas_timer_node), pointer :: current_timer + + if (.not. associated(parent_timer % child)) then + return + end if + + ! First, destroy all children of any sibling in this generation + current_timer => parent_timer % child + do while ( associated(current_timer) ) + if ( associated(current_timer % child) ) then + call mpas_timer_destroy_children(current_timer) + end if + current_timer => current_timer % next + end do + + ! This generation should not have any children now, so destroy all siblings + do while ( associated(parent_timer % child) ) + current_timer => parent_timer % child + + if ( associated(current_timer) ) then + ! If there is more than one sibling, delete the first one, and set child to be it's sibling + if ( associated(current_timer % next) ) then + parent_timer % child => current_timer % next + else + nullify(parent_timer % child) + end if + + ! Deallocate all arrays, and the timer itself + deallocate(current_timer % start_time) + deallocate(current_timer % end_time) + deallocate(current_timer % total_time) + deallocate(current_timer % max_time) + deallocate(current_timer % min_time) + deallocate(current_timer % avg_time) + deallocate(current_timer % running) + deallocate(current_timer) + else + nullify(parent_timer % child) + end if + end do - end subroutine mpas_timer_sync!}}} + end subroutine mpas_timer_destroy_children!}}} end module mpas_timer diff --git a/src/framework/mpas_timer_types.inc b/src/framework/mpas_timer_types.inc index 7025db57ab..dfc96d3662 100644 --- a/src/framework/mpas_timer_types.inc +++ b/src/framework/mpas_timer_types.inc @@ -1,10 +1,18 @@ - type timer_node - character (len=StrKIND) :: timer_name - logical :: running, printable - integer :: levels, calls, nlen - real (kind=R8KIND) :: start_time, end_time, total_time - real (kind=RKIND) :: max_time, min_time, avg_time - real (kind=RKIND) :: efficiency - type (timer_node), pointer :: next - end type timer_node + type mpas_timer_root + type (mpas_timer_node), pointer :: root_timer => null() + type (mpas_timer_node), pointer :: current_timer => null() + type (dm_info), pointer :: dminfo => null() + end type mpas_timer_root + type mpas_timer_node + character (len=StrKIND) :: timer_name + logical :: printed + integer :: nlen + logical, pointer, dimension(:) :: running + integer :: calls + real (kind=R8KIND), pointer, dimension(:) :: start_time, end_time, total_time + real (kind=RKIND), pointer, dimension(:) :: max_time, min_time, avg_time + type (mpas_timer_node), pointer :: next => null() + type (mpas_timer_node), pointer :: child => null() + type (mpas_timer_node), pointer :: parent => null() + end type mpas_timer_node diff --git a/src/framework/regex_matching.c b/src/framework/regex_matching.c new file mode 100644 index 0000000000..d37a23436d --- /dev/null +++ b/src/framework/regex_matching.c @@ -0,0 +1,37 @@ +#include +#include +#include + +#define MAX_LEN 1024 + +void check_regex_match(const char * pattern, const char * str, int *imatch){ + regex_t regex; + char bracketed_pattern[MAX_LEN]; + int ierr, len; + + *imatch = 0; + len = snprintf(bracketed_pattern, 1024, "^%s$", pattern); + if ( len >= MAX_LEN ) { + *imatch = -1; + return; + } + + ierr = regcomp(®ex, bracketed_pattern, 0); + if ( ierr ) { + *imatch = -1; + return; + } + + ierr = regexec(®ex, str, 0, NULL, 0); + + regfree(®ex); + + if ( !ierr ) { + *imatch = 1; + } else if ( ierr == REG_NOMATCH ) { + *imatch = 0; + } else { + *imatch = -1; + } +} + diff --git a/src/framework/shift_time_levs_array.inc b/src/framework/shift_time_levs_array.inc index 59f00dbdd8..eabff4f1bc 100644 --- a/src/framework/shift_time_levs_array.inc +++ b/src/framework/shift_time_levs_array.inc @@ -5,36 +5,40 @@ ! of field pointers; these pointers exist in the field types as 'next' pointers !!!!!!!!!!!!!!!!!!!!!!! + threadNum = mpas_threading_get_thread_num() + nlevs = size(fldarr) allocate(fldarr_ptr(nlevs)) - ! - ! Initialize pointers to first block of all time levels - ! - do i=1,nlevs - fldarr_ptr(i) % next => fldarr(i) - end do - + if ( threadNum == 0 ) then + ! + ! Initialize pointers to first block of all time levels + ! + do i=1,nlevs + fldarr_ptr(i) % next => fldarr(i) + end do - ! - ! Loop over all blocks - ! - do while (associated(fldarr_ptr(1) % next)) ! - ! Shift time levels for this block + ! Loop over all blocks ! - arr_ptr => fldarr_ptr(1) % next % array - do i=1,nlevs-1 - fldarr_ptr(i) % next % array => fldarr_ptr(i+1) % next % array - end do - fldarr_ptr(nlevs) % next % array => arr_ptr + do while (associated(fldarr_ptr(1) % next)) - ! Advance pointers to next block - do i=1,nlevs - fldarr_ptr(i) % next => fldarr_ptr(i) % next % next + ! + ! Shift time levels for this block + ! + arr_ptr => fldarr_ptr(1) % next % array + do i=1,nlevs-1 + fldarr_ptr(i) % next % array => fldarr_ptr(i+1) % next % array + end do + fldarr_ptr(nlevs) % next % array => arr_ptr + + ! Advance pointers to next block + do i=1,nlevs + fldarr_ptr(i) % next => fldarr_ptr(i) % next % next + end do end do - end do - deallocate(fldarr_ptr) + deallocate(fldarr_ptr) + end if diff --git a/src/framework/shift_time_levs_scalar.inc b/src/framework/shift_time_levs_scalar.inc index f5a0bffc98..d1d3160cf2 100644 --- a/src/framework/shift_time_levs_scalar.inc +++ b/src/framework/shift_time_levs_scalar.inc @@ -1,40 +1,44 @@ - !!!!!!!!!!!!!!!!!!!!!!! - ! Implementation note: - ! - ! In this subroutine, we use an array of fields as a ready-made array - ! of field pointers; these pointers exist in the field types as 'next' pointers - !!!!!!!!!!!!!!!!!!!!!!! + threadNum = mpas_threading_get_thread_num() - - nlevs = size(fldarr) - allocate(fldarr_ptr(nlevs)) + if ( threadNum == 0 ) then + !!!!!!!!!!!!!!!!!!!!!!! + ! Implementation note: + ! + ! In this subroutine, we use an array of fields as a ready-made array + ! of field pointers; these pointers exist in the field types as 'next' pointers + !!!!!!!!!!!!!!!!!!!!!!! - ! - ! Initialize pointers to first block of all time levels - ! - do i=1,nlevs - fldarr_ptr(i) % next => fldarr(i) - end do + + nlevs = size(fldarr) + allocate(fldarr_ptr(nlevs)) + ! + ! Initialize pointers to first block of all time levels + ! + do i=1,nlevs + fldarr_ptr(i) % next => fldarr(i) + end do - ! - ! Loop over all blocks - ! - do while (associated(fldarr_ptr(1) % next)) ! - ! Shift time levels for this block + ! Loop over all blocks ! - scalar = fldarr_ptr(1) % next % scalar - do i=1,nlevs-1 - fldarr_ptr(i) % next % scalar = fldarr_ptr(i+1) % next % scalar - end do - fldarr_ptr(nlevs) % next % scalar = scalar + do while (associated(fldarr_ptr(1) % next)) - ! Advance pointers to next block - do i=1,nlevs - fldarr_ptr(i) % next => fldarr_ptr(i) % next % next + ! + ! Shift time levels for this block + ! + scalar = fldarr_ptr(1) % next % scalar + do i=1,nlevs-1 + fldarr_ptr(i) % next % scalar = fldarr_ptr(i+1) % next % scalar + end do + fldarr_ptr(nlevs) % next % scalar = scalar + + ! Advance pointers to next block + do i=1,nlevs + fldarr_ptr(i) % next => fldarr_ptr(i) % next % next + end do end do - end do - deallocate(fldarr_ptr) + deallocate(fldarr_ptr) + end if diff --git a/src/framework/xml_stream_parser.c b/src/framework/xml_stream_parser.c index 5f97043429..64b243faed 100644 --- a/src/framework/xml_stream_parser.c +++ b/src/framework/xml_stream_parser.c @@ -184,25 +184,55 @@ void parse_xml_tag_name(char *tag_buf, char *tag_name) size_t parse_xml_tag(char *xml_buf, size_t buf_len, char *tag, size_t *tag_len, int *line, int *start_line) { size_t i, j; + int found_end, block_comment; + - /* Look for beginning of tag */ i = 0; - while (i < buf_len && xml_buf[i] != '<') { - if (xml_buf[i] == '\n') - (*line)++; + do { + /* Look for beginning of tag */ + while (i < buf_len && xml_buf[i] != '<') { + if (xml_buf[i] == '\n') + (*line)++; + i++; + } + + /* Ran out of characters... */ + if (i == buf_len) { + *tag_len = 0; + return 0; + } + + /* Move on to next character after opening '<' */ + *start_line = *line; i++; - } - /* Ran out of characters... */ - if (i == buf_len) { - *tag_len = 0; - return 0; - } + block_comment = 0; + /* Skip comment tags */ + if ( xml_buf[i] == '!' && xml_buf[i+1] == '-' && xml_buf[i+2] == '-' ) { + block_comment = 1; + + /* find end of the comment... */ + i = i+2; + found_end = 0; + while (i < buf_len && ! found_end) { + if ( xml_buf[i] == '-' && xml_buf[i+1] == '-' && xml_buf[i+2] == '>' ) { + found_end = 1; + i = i+2; + } else if ( xml_buf[i] == '\n' ) { + (*line)++; + } + i++; + } - /* Move on to next character after opening '<' */ - *start_line = *line; - i++; + + /* Ran out of characters... */ + if (i == buf_len) { + *tag_len = 0; + return 0; + } + } + } while (block_comment); /* Copy tag into string */ j = 0; @@ -397,7 +427,7 @@ int attribute_check(ezxml_t stream) nextchar = 0; for (i=(len-1); i>=0; nextchar=s_filename[i--]) { if (s_filename[i] == '$') { - if (strchr("YMDdhmsG",nextchar) == NULL) { + if (strchr("YMDdhmsGSB",nextchar) == NULL) { snprintf(msgbuf, MSGSIZE, "filename_template for stream \"%s\" contains unrecognized variable \"$%c\".", s_name, nextchar); fmt_err(msgbuf); return 1; @@ -861,6 +891,111 @@ int build_stream_path(const char *stream, const char *template, int *mpi_comm) } +/********************************************************************************* + * + * Function: extract_stream_interval + * + * Given an interval specification for a stream (interval) that references + * an interval in another stream (e.g., "stream:history:output_interval"), and + * an interval type (interval_type, either "input_interval" or "output_interval"), + * extracts the value of the interval from the other stream and returns it in + * the output argument interval2. + * + * If the interval specification in the interval argument does not reference + * another stream, the contents of interval2 are unchanged upon return from + * this function. + * + * In case the input interval references an interval in another stream and this + * interval cannot found, this function returns a value of 1; otherwise, this + * function returns 0. + * + *********************************************************************************/ +int extract_stream_interval(const char *interval, const char *interval_type, const char **interval2, const char *streamID, ezxml_t streams) +{ + int i; + int stream_found, copy_start, copy_from, copy_to; + char match_stream_name[256]; + char interval_name[256]; + const char *streamID2; + ezxml_t stream2_xml; + ezxml_t streammatch_xml; + + + if ( strncmp(interval, "stream:", 7) == 0 ) { + + /* Extract the name of the stream, and the name of the interval to use for interval */ + snprintf(match_stream_name, 256, "%s", (interval)+7); + copy_start = -1; + copy_from = -1; + copy_to = 0; + for ( i = 0; i < strlen(match_stream_name); i++ ) { + if ( match_stream_name[i] == ':' ) { + copy_start = i; + copy_from = copy_start+1; + } + + if ( copy_from == i ) { + interval_name[copy_to] = match_stream_name[copy_from]; + copy_from++; + copy_to++; + } + } + match_stream_name[copy_start] = '\0'; + interval_name[copy_to] = '\0'; + + if ( strcmp(match_stream_name, streamID) == 0 && strcmp(interval_name, interval_type) == 0 ) { + fprintf(stderr, "ERROR: Attribute '%s' of stream '%s' references itself.\n", interval_type, streamID); + return 1; + } + + if ( strcmp(interval_name, "input_interval") != 0 && strcmp(interval_name, "output_interval") != 0 ) { + fprintf(stderr, "ERROR: Attribute '%s' of stream '%s' references an invalid attribute: '%s'.\n", interval_type, streamID, interval_name); + fprintf(stderr, " Valid attributes are 'input_interval' and 'output_interval'.\n"); + return 1; + } + + stream_found = 0; + for ( stream2_xml = ezxml_child(streams, "immutable_stream"); stream2_xml && !stream_found; stream2_xml = stream2_xml->next ) { + streamID2 = ezxml_attr(stream2_xml, "name"); + + if ( strcmp(streamID2, match_stream_name) == 0 ){ + stream_found = 1; + streammatch_xml = stream2_xml; + } + } + + for ( stream2_xml = ezxml_child(streams, "stream"); stream2_xml && !stream_found; stream2_xml = stream2_xml->next ) { + streamID2 = ezxml_attr(stream2_xml, "name"); + + if ( strcmp(streamID2, match_stream_name) == 0 ) { + stream_found = 1; + streammatch_xml = stream2_xml; + } + } + + if ( stream_found == 1 ) { + *interval2 = ezxml_attr(streammatch_xml, interval_name); + } + else { + fprintf(stderr, "ERROR: The '%s' attribute of stream '%s' refers to an undefined stream named '%s'.\n", interval_type, streamID, match_stream_name); + return 1; + } + + + if ( *interval2 == NULL ) { + fprintf(stderr, "ERROR: The '%s' attribute of stream '%s' refers to an undefined attribute named '%s' of stream '%s'.\n", interval_type, streamID, interval_name, match_stream_name); + return 1; + } + else if ( strcmp(*interval2, "input_interval") == 0 || strcmp(*interval2, "output_interval") == 0 || strncmp(*interval2, "stream:", 7) == 0 ) { + fprintf(stderr, "ERROR: The '%s' attribute of stream '%s' contains an unexpandable value: '%s'.\n", interval_type, streamID, *interval2); + return 1; + } + } + + return 0; +} + + /********************************************************************************* * * Function: xml_stream_parser @@ -884,18 +1019,23 @@ void xml_stream_parser(char *fname, void *manager, int *mpi_comm, int *status) ezxml_t vararray_xml; ezxml_t varstruct_xml; ezxml_t substream_xml; - ezxml_t streamsmatch_xml, streammatch_xml; + ezxml_t stream2_xml; + ezxml_t streammatch_xml; const char *compstreamname_const, *structname_const; const char *streamID, *filename_template, *filename_interval, *direction, *varfile, *fieldname_const, *reference_time, *record_interval, *streamname_const, *precision; const char *interval_in, *interval_out, *packagelist; const char *clobber; const char *iotype; + const char *streamID2, *interval_in2, *interval_out2; + char interval_name[256]; + char match_stream_name[256]; char *packages, *package; char filename_interval_string[256]; char ref_time_local[256]; char rec_intv_local[256]; char fieldname[256]; char packages_local[256]; + char interval_type[32]; FILE *fd; char msgbuf[MSGSIZE]; int itype; @@ -903,6 +1043,8 @@ void xml_stream_parser(char *fname, void *manager, int *mpi_comm, int *status) int i_iotype; int iprec; int immutable; + int stream_found, copy_start, copy_from, copy_to; + int i; int err; @@ -936,7 +1078,9 @@ void xml_stream_parser(char *fname, void *manager, int *mpi_comm, int *status) filename_template = ezxml_attr(stream_xml, "filename_template"); filename_interval = ezxml_attr(stream_xml, "filename_interval"); interval_in = ezxml_attr(stream_xml, "input_interval"); + interval_in2 = ezxml_attr(stream_xml, "input_interval"); interval_out = ezxml_attr(stream_xml, "output_interval"); + interval_out2 = ezxml_attr(stream_xml, "output_interval"); reference_time = ezxml_attr(stream_xml, "reference_time"); record_interval = ezxml_attr(stream_xml, "record_interval"); precision = ezxml_attr(stream_xml, "precision"); @@ -944,6 +1088,24 @@ void xml_stream_parser(char *fname, void *manager, int *mpi_comm, int *status) clobber = ezxml_attr(stream_xml, "clobber_mode"); iotype = ezxml_attr(stream_xml, "io_type"); + /* Extract the input interval, if it refer to other streams */ + if ( interval_in ) { + sprintf(interval_type, "input_interval"); + *status = extract_stream_interval(interval_in, interval_type, &interval_in2, streamID, streams); + if ( *status != 0 ) { + return; + } + } + + /* Extract the output interval, if it refer to other streams */ + if ( interval_out ) { + sprintf(interval_type, "output_interval"); + *status = extract_stream_interval(interval_out, interval_type, &interval_out2, streamID, streams); + if ( *status != 0 ) { + return; + } + } + /* Setup filename_interval correctly. * * If filename_interval is not explicitly set... @@ -959,22 +1121,22 @@ void xml_stream_parser(char *fname, void *manager, int *mpi_comm, int *status) /* If input interval is an interval (i.e. not initial_only or none) set filename_interval to the interval. */ if ( strstr(interval_in, "initial_only") == NULL && strstr(interval_in, "none") == NULL ){ - filename_interval = ezxml_attr(stream_xml, "input_interval"); + filename_interval = interval_in2; /* If output interval is an interval (i.e. not initial_only or none) set filename_interval to the interval. */ } else if ( strstr(interval_out, "initial_only") == NULL && strstr(interval_out, "none") == NULL ){ - filename_interval = ezxml_attr(stream_xml, "output_interval"); + filename_interval = interval_out2; } /* Check for an input stream. */ } else if ( strstr(direction, "input") != NULL ) { - if ( strstr(interval_in, "initial_only") == NULL && strstr(interval_in, "none") == NULL ){ - filename_interval = ezxml_attr(stream_xml, "input_interval"); + if ( strstr(interval_in2, "initial_only") == NULL && strstr(interval_in2, "none") == NULL ){ + filename_interval = interval_in2; } /* Check for an output stream. */ } else if ( strstr(direction, "output") != NULL ) { if ( strstr(interval_out, "initial_only") == NULL && strstr(interval_out, "none") == NULL ){ - filename_interval = ezxml_attr(stream_xml, "output_interval"); + filename_interval = interval_out2; } } } else { @@ -987,13 +1149,13 @@ void xml_stream_parser(char *fname, void *manager, int *mpi_comm, int *status) */ if ( strstr(filename_interval, "input_interval") != NULL ) { if ( strstr(interval_in, "initial_only") == NULL && strstr(interval_in, "none") == NULL ) { - filename_interval = ezxml_attr(stream_xml, "input_interval"); + filename_interval = interval_in2; } else { filename_interval = NULL; } } else if ( strstr(filename_interval, "output_interval") != NULL ) { if ( strstr(interval_out, "initial_only") == NULL && strstr(interval_out, "none") == NULL ) { - filename_interval = ezxml_attr(stream_xml, "output_interval"); + filename_interval = interval_out2; } else { filename_interval = NULL; } @@ -1134,22 +1296,30 @@ void xml_stream_parser(char *fname, void *manager, int *mpi_comm, int *status) /* Possibly add an input alarm for this stream */ if (itype == 3 || itype == 1) { - stream_mgr_add_alarm_c(manager, streamID, "input", "start", interval_in, &err); + stream_mgr_add_alarm_c(manager, streamID, "input", "start", interval_in2, &err); if (err != 0) { *status = 1; return; } - fprintf(stderr, " %-20s%s\n", "input alarm:", interval_in); + if ( strcmp(interval_in, interval_in2) != 0 ) { + fprintf(stderr, " %-20s%s (%s)\n", "input alarm:", interval_in, interval_in2); + } else { + fprintf(stderr, " %-20s%s\n", "input alarm:", interval_in); + } } /* Possibly add an output alarm for this stream */ if (itype == 3 || itype == 2) { - stream_mgr_add_alarm_c(manager, streamID, "output", "start", interval_out, &err); + stream_mgr_add_alarm_c(manager, streamID, "output", "start", interval_out2, &err); if (err != 0) { *status = 1; return; } - fprintf(stderr, " %-20s%s\n", "output alarm:", interval_out); + if ( strcmp(interval_out, interval_out2) != 0 ) { + fprintf(stderr, " %-20s%s (%s)\n", "output alarm:", interval_out, interval_out2); + } else { + fprintf(stderr, " %-20s%s\n", "output alarm:", interval_out); + } } /* Possibly add packages */ @@ -1190,7 +1360,9 @@ void xml_stream_parser(char *fname, void *manager, int *mpi_comm, int *status) filename_template = ezxml_attr(stream_xml, "filename_template"); filename_interval = ezxml_attr(stream_xml, "filename_interval"); interval_in = ezxml_attr(stream_xml, "input_interval"); + interval_in2 = ezxml_attr(stream_xml, "input_interval"); interval_out = ezxml_attr(stream_xml, "output_interval"); + interval_out2 = ezxml_attr(stream_xml, "output_interval"); reference_time = ezxml_attr(stream_xml, "reference_time"); record_interval = ezxml_attr(stream_xml, "record_interval"); precision = ezxml_attr(stream_xml, "precision"); @@ -1198,6 +1370,24 @@ void xml_stream_parser(char *fname, void *manager, int *mpi_comm, int *status) clobber = ezxml_attr(stream_xml, "clobber_mode"); iotype = ezxml_attr(stream_xml, "io_type"); + /* Extract the input interval, if it refer to other streams */ + if ( interval_in ) { + sprintf(interval_type, "input_interval"); + *status = extract_stream_interval(interval_in, interval_type, &interval_in2, streamID, streams); + if ( *status != 0 ) { + return; + } + } + + /* Extract the output interval, if it refer to other streams */ + if ( interval_out ) { + sprintf(interval_type, "output_interval"); + *status = extract_stream_interval(interval_out, interval_type, &interval_out2, streamID, streams); + if ( *status != 0 ) { + return; + } + } + /* Setup filename_interval correctly. * * If filename_interval is not explicitly set... @@ -1213,22 +1403,22 @@ void xml_stream_parser(char *fname, void *manager, int *mpi_comm, int *status) /* If input interval is an interval (i.e. not initial_only or none) set filename_interval to the interval. */ if ( strstr(interval_in, "initial_only") == NULL && strstr(interval_in, "none") == NULL ){ - filename_interval = ezxml_attr(stream_xml, "input_interval"); + filename_interval = interval_in2; /* If output interval is an interval (i.e. not initial_only or none) set filename_interval to the interval. */ } else if ( strstr(interval_out, "initial_only") == NULL && strstr(interval_out, "none") == NULL ){ - filename_interval = ezxml_attr(stream_xml, "output_interval"); + filename_interval = interval_out2; } /* Check for an input stream. */ } else if ( strstr(direction, "input") != NULL ) { if ( strstr(interval_in, "initial_only") == NULL && strstr(interval_in, "none") == NULL ){ - filename_interval = ezxml_attr(stream_xml, "input_interval"); + filename_interval = interval_in2; } /* Check for an output stream. */ } else if ( strstr(direction, "output") != NULL ) { if ( strstr(interval_out, "initial_only") == NULL && strstr(interval_out, "none") == NULL ){ - filename_interval = ezxml_attr(stream_xml, "output_interval"); + filename_interval = interval_out2; } } } else { @@ -1241,13 +1431,13 @@ void xml_stream_parser(char *fname, void *manager, int *mpi_comm, int *status) */ if ( strstr(filename_interval, "input_interval") != NULL ) { if ( strstr(interval_in, "initial_only") == NULL && strstr(interval_in, "none") == NULL ) { - filename_interval = ezxml_attr(stream_xml, "input_interval"); + filename_interval = interval_in2; } else { filename_interval = NULL; } } else if ( strstr(filename_interval, "output_interval") != NULL ) { if ( strstr(interval_out, "initial_only") == NULL && strstr(interval_out, "none") == NULL ) { - filename_interval = ezxml_attr(stream_xml, "output_interval"); + filename_interval = interval_out2; } else { filename_interval = NULL; } @@ -1388,22 +1578,30 @@ void xml_stream_parser(char *fname, void *manager, int *mpi_comm, int *status) /* Possibly add an input alarm for this stream */ if (itype == 3 || itype == 1) { - stream_mgr_add_alarm_c(manager, streamID, "input", "start", interval_in, &err); + stream_mgr_add_alarm_c(manager, streamID, "input", "start", interval_in2, &err); if (err != 0) { *status = 1; return; } - fprintf(stderr, " %-20s%s\n", "input alarm:", interval_in); + if ( strcmp(interval_in, interval_in2) != 0 ) { + fprintf(stderr, " %-20s%s (%s)\n", "input alarm:", interval_in, interval_in2); + } else { + fprintf(stderr, " %-20s%s\n", "input alarm:", interval_in); + } } /* Possibly add an output alarm for this stream */ if (itype == 3 || itype == 2) { - stream_mgr_add_alarm_c(manager, streamID, "output", "start", interval_out, &err); + stream_mgr_add_alarm_c(manager, streamID, "output", "start", interval_out2, &err); if (err != 0) { *status = 1; return; } - fprintf(stderr, " %-20s%s\n", "output alarm:", interval_out); + if ( strcmp(interval_out, interval_out2) != 0 ) { + fprintf(stderr, " %-20s%s (%s)\n", "output alarm:", interval_out, interval_out2); + } else { + fprintf(stderr, " %-20s%s\n", "output alarm:", interval_out); + } } /* Possibly add packages */ diff --git a/src/operators/mpas_geometry_utils.F b/src/operators/mpas_geometry_utils.F index 928c9bcd1f..838f8378e0 100644 --- a/src/operators/mpas_geometry_utils.F +++ b/src/operators/mpas_geometry_utils.F @@ -10,10 +10,10 @@ module mpas_geometry_utils use mpas_kind_types use mpas_derived_types use mpas_pool_routines - use mpas_configure use mpas_constants use mpas_io_units use mpas_vector_operations + use mpas_abort, only : mpas_dmpar_global_abort implicit none @@ -26,51 +26,51 @@ module mpas_geometry_utils ! Equation numbers w.r.t. http://mathworld.wolfram.com/SphericalTrigonometry.html !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! real (kind=RKIND) function mpas_sphere_angle(ax, ay, az, bx, by, bz, cx, cy, cz)!{{{ - + implicit none - + real (kind=RKIND), intent(in) :: ax, ay, az, bx, by, bz, cx, cy, cz - + real (kind=RKIND) :: a, b, c ! Side lengths of spherical triangle ABC - + real (kind=RKIND) :: ABx, ABy, ABz ! The components of the vector AB real (kind=RKIND) :: ACx, ACy, ACz ! The components of the vector AC - + real (kind=RKIND) :: Dx ! The i-components of the cross product AB x AC real (kind=RKIND) :: Dy ! The j-components of the cross product AB x AC real (kind=RKIND) :: Dz ! The k-components of the cross product AB x AC - + real (kind=RKIND) :: s ! Semiperimeter of the triangle real (kind=RKIND) :: sin_angle - + a = acos(max(min(bx*cx + by*cy + bz*cz,1.0_RKIND),-1.0_RKIND)) ! Eqn. (3) b = acos(max(min(ax*cx + ay*cy + az*cz,1.0_RKIND),-1.0_RKIND)) ! Eqn. (2) c = acos(max(min(ax*bx + ay*by + az*bz,1.0_RKIND),-1.0_RKIND)) ! Eqn. (1) - + ABx = bx - ax ABy = by - ay ABz = bz - az - + ACx = cx - ax ACy = cy - ay ACz = cz - az - + Dx = (ABy * ACz) - (ABz * ACy) Dy = -((ABx * ACz) - (ABz * ACx)) Dz = (ABx * ACy) - (ABy * ACx) - + s = 0.5*(a + b + c) ! sin_angle = sqrt((sin(s-b)*sin(s-c))/(sin(b)*sin(c))) ! Eqn. (28) sin_angle = sqrt(min(1.0_RKIND,max(0.0_RKIND,(sin(s-b)*sin(s-c))/(sin(b)*sin(c))))) ! Eqn. (28) - + if ((Dx*ax + Dy*ay + Dz*az) >= 0.0) then mpas_sphere_angle = 2.0 * asin(max(min(sin_angle,1.0_RKIND),-1.0_RKIND)) else mpas_sphere_angle = -2.0 * asin(max(min(sin_angle,1.0_RKIND),-1.0_RKIND)) end if - + end function mpas_sphere_angle!}}} - + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! FUNCTION MPAS_PLANE_ANGLE @@ -79,64 +79,64 @@ end function mpas_sphere_angle!}}} ! a vector (u,v,w) normal to the plane. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! real (kind=RKIND) function mpas_plane_angle(ax, ay, az, bx, by, bz, cx, cy, cz, u, v, w)!{{{ - + implicit none - + real (kind=RKIND), intent(in) :: ax, ay, az, bx, by, bz, cx, cy, cz, u, v, w - + real (kind=RKIND) :: ABx, ABy, ABz ! The components of the vector AB real (kind=RKIND) :: mAB ! The magnitude of AB real (kind=RKIND) :: ACx, ACy, ACz ! The components of the vector AC real (kind=RKIND) :: mAC ! The magnitude of AC - + real (kind=RKIND) :: Dx ! The i-components of the cross product AB x AC real (kind=RKIND) :: Dy ! The j-components of the cross product AB x AC real (kind=RKIND) :: Dz ! The k-components of the cross product AB x AC - + real (kind=RKIND) :: cos_angle - + ABx = bx - ax ABy = by - ay ABz = bz - az mAB = sqrt(ABx**2.0 + ABy**2.0 + ABz**2.0) - + ACx = cx - ax ACy = cy - ay ACz = cz - az mAC = sqrt(ACx**2.0 + ACy**2.0 + ACz**2.0) - - + + Dx = (ABy * ACz) - (ABz * ACy) Dy = -((ABx * ACz) - (ABz * ACx)) Dz = (ABx * ACy) - (ABy * ACx) - + cos_angle = (ABx*ACx + ABy*ACy + ABz*ACz) / (mAB * mAC) - + if ((Dx*u + Dy*v + Dz*w) >= 0.0) then mpas_plane_angle = acos(max(min(cos_angle,1.0_RKIND),-1.0_RKIND)) else mpas_plane_angle = -acos(max(min(cos_angle,1.0_RKIND),-1.0_RKIND)) end if - + end function mpas_plane_angle!}}} !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! FUNCTION MPAS_ARC_LENGTH ! - ! Returns the length of the great circle arc from A=(ax, ay, az) to + ! Returns the length of the great circle arc from A=(ax, ay, az) to ! B=(bx, by, bz). It is assumed that both A and B lie on the surface of the ! same sphere centered at the origin. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! real (kind=RKIND) function mpas_arc_length(ax, ay, az, bx, by, bz)!{{{ - + implicit none - + real (kind=RKIND), intent(in) :: ax, ay, az, bx, by, bz - + real (kind=RKIND) :: r, c real (kind=RKIND) :: cx, cy, cz - + cx = bx - ax cy = by - ay cz = bz - az @@ -152,8 +152,8 @@ real (kind=RKIND) function mpas_arc_length(ax, ay, az, bx, by, bz)!{{{ mpas_arc_length = r * 2.0 * asin(c/(2.0*r)) end function mpas_arc_length!}}} - - + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! SUBROUTine mpas_arc_bisect ! @@ -162,21 +162,21 @@ end function mpas_arc_length!}}} ! surface of a sphere centered at the origin. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine mpas_arc_bisect(ax, ay, az, bx, by, bz, cx, cy, cz)!{{{ - + implicit none - + real (kind=RKIND), intent(in) :: ax, ay, az, bx, by, bz real (kind=RKIND), intent(out) :: cx, cy, cz - + real (kind=RKIND) :: r ! Radius of the sphere - real (kind=RKIND) :: d - + real (kind=RKIND) :: d + r = sqrt(ax*ax + ay*ay + az*az) - + cx = 0.5*(ax + bx) cy = 0.5*(ay + by) cz = 0.5*(az + bz) - + if (cx == 0. .and. cy == 0. .and. cz == 0.) then write(stderrUnit,*) 'Error: arc_bisect: A and B are diametrically opposite' else @@ -185,7 +185,7 @@ subroutine mpas_arc_bisect(ax, ay, az, bx, by, bz, cx, cy, cz)!{{{ cy = r * cy / d cz = r * cz / d end if - + end subroutine mpas_arc_bisect!}}} @@ -245,9 +245,9 @@ subroutine mpas_poly_fit_2(a_in,b_out,weights_in,m,n,ne)!{{{ integer, intent(in) :: m,n,ne real (kind=RKIND), dimension(ne,ne), intent(in) :: a_in, weights_in real (kind=RKIND), dimension(ne,ne), intent(out) :: b_out - + ! local storage - + real (kind=RKIND), dimension(m,n) :: a real (kind=RKIND), dimension(n,m) :: b real (kind=RKIND), dimension(m,m) :: w,wt,h @@ -256,23 +256,23 @@ subroutine mpas_poly_fit_2(a_in,b_out,weights_in,m,n,ne)!{{{ real (kind=RKIND), dimension(n,n) :: ata, atha, atha_inv integer, dimension(n) :: indx ! integer :: i,j - + if ( (ne < n) .or. (ne < m) ) then write(stderrUnit,*) ' error in poly_fit_2 inversion ',m,n,ne call mpas_dmpar_global_abort('ERROR: in subroutine poly_fit_2()') end if - -! a(1:m,1:n) = a_in(1:n,1:m) + +! a(1:m,1:n) = a_in(1:n,1:m) a(1:m,1:n) = a_in(1:m,1:n) - w(1:m,1:m) = weights_in(1:m,1:m) - b_out(:,:) = 0. + w(1:m,1:m) = weights_in(1:m,1:m) + b_out(:,:) = 0. wt = transpose(w) h = matmul(wt,w) at = transpose(a) ath = matmul(at,h) atha = matmul(ath,a) - + ata = matmul(at,a) ! if (m == n) then @@ -289,10 +289,10 @@ subroutine mpas_poly_fit_2(a_in,b_out,weights_in,m,n,ne)!{{{ b_out(1:n,1:m) = b(1:n,1:m) ! do i=1,n -! write(stdoutUnit,*) ' i, indx ',i,indx(i) +! write(stderrUnit,*) ' i, indx ',i,indx(i) ! end do ! -! write(stdoutUnit,*) ' ' +! write(stderrUnit,*) ' ' end subroutine mpas_poly_fit_2!}}} @@ -337,6 +337,7 @@ subroutine mpas_calculate_barycentric_weights(point, a, b, c, meshPool, weight_a if (vertexDegree /= 3) then write (stderrUnit,*) 'Error: Barycentric weights can only be calculated if vertexDegree is 3' ierr = 1 + return endif triangleArea = mpas_triangle_signed_area(a, b, c, meshPool) @@ -383,12 +384,12 @@ end subroutine mpas_calculate_barycentric_weights!}}} !> This routine calculates calculates barycentric weights (coordinates) !> for a set of points provided. Requires a mesh with a triangular dual mesh. !> The routine will attempt to find which triangle (made of cell centers) -!> in which each point lies. Those cell center indices will be stored in the +!> in which each point lies. Those cell center indices will be stored in the !> baryCellsOnPoints array. !> The triangle search is limited to a triangle location provided to the routine !> and its surrounding triangles. The triangle location is identified by a vertex !> id, which is stored in the input array searchVertex. -!> If the pointset is vertices, the list of vertices can be passed in. If the +!> If the pointset is vertices, the list of vertices can be passed in. If the !> pointset is something else, then the callling routine must !> first determine which vertices each point is closest (or at least close) to. !> If no 'owning' triangle is identified in this routine, then @@ -451,8 +452,10 @@ subroutine mpas_calculate_barycentric_weights_for_points(meshPool, xPoint, yPoin logical :: in_a_triangle real(kind=RKIND) :: this_triangle_distance, nearest_triangle_distance integer :: nearest_triangle_index + integer :: err_tmp ierr = 0 + err_tmp = 0 nPoints = size(xPoint) @@ -556,8 +559,8 @@ subroutine mpas_calculate_barycentric_weights_for_points(meshPool, xPoint, yPoin triangle_c = (/ xCell(cell3), yCell(cell3), zCell(cell3) /) call mpas_calculate_barycentric_weights(point, triangle_a, triangle_b, triangle_c, meshPool, & - baryWeightsOnPoints(1, iPoint), baryWeightsOnPoints(2, iPoint), baryWeightsOnPoints(3, iPoint), ierr) - + baryWeightsOnPoints(1, iPoint), baryWeightsOnPoints(2, iPoint), baryWeightsOnPoints(3, iPoint), err_tmp) + ierr = ior(ierr, err_tmp) enddo end subroutine mpas_calculate_barycentric_weights_for_points !}}} @@ -585,11 +588,13 @@ real(kind=RKIND) function mpas_triangle_signed_area(a, b, c, meshPool)!{{{ !----------------------------------------------------------------- logical, pointer :: on_a_sphere real(kind=RKIND), dimension(3) :: normalvec + real(kind=RKIND), pointer :: radius call mpas_pool_get_config(meshPool, 'on_a_sphere', on_a_sphere) + call mpas_pool_get_config(meshPool, 'sphere_radius', radius) if (on_a_sphere) then - mpas_triangle_signed_area = mpas_triangle_signed_area_sphere(a, b, c) + mpas_triangle_signed_area = mpas_triangle_signed_area_sphere(a, b, c, radius) else normalvec = (/ 0, 0, 1 /) mpas_triangle_signed_area = mpas_triangle_signed_area_plane(a, b, c, normalvec) @@ -643,27 +648,47 @@ end function mpas_triangle_signed_area_plane !}}} !> \details !> This routine calculates the area of a triangle on the surface of a sphere. !> Uses the spherical analog of Heron's formula. -!> Copied from mesh generator. +!> Copied from mesh generator. A CCW winding angle is positive. !----------------------------------------------------------------------- - real(kind=RKIND) function mpas_triangle_signed_area_sphere(a, b, c)!{{{ + real(kind=RKIND) function mpas_triangle_signed_area_sphere(a, b, c, radius)!{{{ !----------------------------------------------------------------- ! input variables !----------------------------------------------------------------- real(kind=RKIND), dimension(3), intent(in) :: a, b, c !< Input: 3d (x,y,z) points forming the triangle in which to calculate the bary weights + real(kind=RKIND), intent(in) :: radius !< sphere radius !----------------------------------------------------------------- ! local variables !----------------------------------------------------------------- real(kind=RKIND) :: ab, bc, ca, semiperim, tanqe + real(kind=RKIND), dimension(3) :: ablen, aclen, Dlen - ab = mpas_arc_length(a(1), a(2), a(3), b(1), b(2), b(3)) - bc = mpas_arc_length(b(1), b(2), b(3), c(1), c(2), c(3)) - ca = mpas_arc_length(c(1), c(2), c(3), a(1), a(2), a(3)) + ab = mpas_arc_length(a(1), a(2), a(3), b(1), b(2), b(3))/radius + bc = mpas_arc_length(b(1), b(2), b(3), c(1), c(2), c(3))/radius + ca = mpas_arc_length(c(1), c(2), c(3), a(1), a(2), a(3))/radius semiperim = 0.5 * (ab + bc + ca) - tanqe = sqrt(tan(0.5_RKIND * semiperim) * tan(0.5_RKIND * (semiperim - ab)) & - * tan(0.5_RKIND * (semiperim - bc)) * tan(0.5_RKIND * (semiperim - ca)) ) + tanqe = sqrt(max(0.0_RKIND,tan(0.5_RKIND * semiperim) * tan(0.5_RKIND * (semiperim - ab)) & + * tan(0.5_RKIND * (semiperim - bc)) * tan(0.5_RKIND * (semiperim - ca)))) + + mpas_triangle_signed_area_sphere = 4.0_RKIND * radius * radius * atan(tanqe) + + ! computing correct signs (in similar fashion to mpas_sphere_angle) + ablen(1) = b(1) - a(1) + ablen(2) = b(2) - a(2) + ablen(3) = b(3) - a(3) + + aclen(1) = c(1) - a(1) + aclen(2) = c(2) - a(2) + aclen(3) = c(3) - a(3) + + dlen(1) = (ablen(2) * aclen(3)) - (ablen(3) * aclen(2)) + dlen(2) = -((ablen(1) * aclen(3)) - (ablen(3) * aclen(1))) + dlen(3) = (ablen(1) * aclen(2)) - (ablen(2) * aclen(1)) + + if ((Dlen(1)*a(1) + Dlen(2)*a(2) + Dlen(3)*a(3)) < 0.0) then + mpas_triangle_signed_area_sphere = -mpas_triangle_signed_area_sphere + end if - mpas_triangle_signed_area_sphere = 4.0_RKIND * atan(tanqe) end function mpas_triangle_signed_area_sphere !}}} @@ -732,7 +757,7 @@ end function mpas_point_in_polygon !}}} !> \brief Converts a single layer scalar field from cells to specified points using barycentric weights. !> \author Matt Hoffman !> \date 14 Jan 2015 -!> \details +!> \details !> This routine converts a single layer scalar field from cells to a provided set !> of point locations using barycentric weights. !> The weights should be calculated on init using the @@ -798,7 +823,74 @@ subroutine mpas_cells_to_points_using_baryweights(meshPool, baryCellsOnPoints, b end subroutine mpas_cells_to_points_using_baryweights !}}} +!*********************************************************************** +! +! subroutine mpas_unit_test_triangle_signed_area_sphere +! +!> \brief Simple unit test for testing the mpas_triangle_signed_area_sphere +!> \author Phillip J. Wolfram +!> \date 08/05/2015 +!> \details +!> This routine tests the mpas_triangle_signed_area_sphere routine. +!----------------------------------------------------------------------- + subroutine mpas_unit_test_triangle_signed_area_sphere(ierr)!{{{ + !----------------------------------------------------------------- + ! input variables + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! input/output variables + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! output variables + !----------------------------------------------------------------- + integer, intent(out) :: ierr + !----------------------------------------------------------------- + ! local variables + !----------------------------------------------------------------- + real (kind=RKIND), dimension(3) :: a, b, c + real (kind=RKIND) :: sphereRadius + real (kind=RKIND) :: eps = 1.0e-12_RKIND + + ierr = 0 + a = (/ 1.0_RKIND, 0.0_RKIND, 0.0_RKIND /) + b = (/ 0.0_RKIND, 1.0_RKIND, 0.0_RKIND /) + c = (/ 0.0_RKIND, 0.0_RKIND, 1.0_RKIND /) + sphereRadius = 1.0_RKIND + + ! 1/8 of spherical area = pi*r*r/8 = pi/2 for r=1.0 + if (abs((mpas_triangle_signed_area_sphere(a,b,c,sphereRadius) - pii/2.0_RKIND)/(pii/2.0_RKIND)) > eps) then + write(stderrUnit,*) 'Error in mpas_unit_test_triangle_signed_area_sphere: unit sphere area wrong.' + ierr = 1 + else + write(stderrUnit,*) 'mpas_unit_test_triangle_signed_area_sphere: unit sphere area ok - SUCCESS' + endif + + ! 1/8 of spherical area = pi*r*r/8 = pi/2 for r=1.0 + if (abs((mpas_triangle_signed_area_sphere(a,c,b,sphereRadius) + pii/2.0_RKIND)/(pii/2.0_RKIND)) > eps) then + write(stderrUnit,*) 'Error in mpas_unit_test_triangle_signed_area_sphere: unit sphere area sign wrong.' + ierr = 1 + else + write(stderrUnit,*) 'mpas_unit_test_triangle_signed_area_sphere: unit sphere area sign ok - SUCCESS' + endif + + ! 1/8 of spherical area = pi*r*r/8 = r*r*pi/2 for r=sphereRadius + sphereRadius = 6371220._RKIND + a = a*sphereRadius + b = b*sphereRadius + c = c*sphereRadius + if (abs((mpas_triangle_signed_area_sphere(a,b,c,sphereRadius) - pii/2.0_RKIND*sphereRadius*sphereRadius)/(pii/2.0_RKIND*sphereRadius*sphereRadius)) > eps) then + write(stderrUnit,*) 'Error in mpas_unit_test_triangle_signed_area_sphere: radius sphere area wrong.' + write(stderrUnit,*) mpas_triangle_signed_area_sphere(a,b,c,sphereRadius) - pii/2.0_RKIND*sphereRadius*sphereRadius + ierr = 1 + else + write(stderrUnit,*) 'mpas_unit_test_triangle_signed_area_sphere: radius sphere area ok - SUCCESS' + endif + + + end subroutine mpas_unit_test_triangle_signed_area_sphere!}}} !*********************************************************************** ! @@ -807,7 +899,7 @@ end subroutine mpas_cells_to_points_using_baryweights !}}} !> \brief Simple unit test for testing the mpas_point_in_polygon routine. !> \author Matt Hoffman !> \date 4 Feb 2015 -!> \details +!> \details !> This routine tests the mpas_point_in_polygon routine. !----------------------------------------------------------------------- subroutine mpas_unit_test_in_triangle(ierr)!{{{ @@ -842,14 +934,14 @@ subroutine mpas_unit_test_in_triangle(ierr)!{{{ write(stderrUnit,*) 'Error in mpas_unit_test_in_triangle: point1 calculated to be outside of triangle.' ierr = 1 else - write(stdoutUnit,*) 'mpas_unit_test_in_triangle: point1 test - SUCCESS' + write(stderrUnit,*) 'mpas_unit_test_in_triangle: point1 test - SUCCESS' endif if (mpas_point_in_polygon(point2, trianglePoints, .false.)) then write(stderrUnit,*) 'Error in mpas_unit_test_in_triangle: point2 calculated to be inside of triangle.' ierr = 1 else - write(stdoutUnit,*) 'mpas_unit_test_in_triangle: point2 test - SUCCESS' + write(stderrUnit,*) 'mpas_unit_test_in_triangle: point2 test - SUCCESS' endif end subroutine mpas_unit_test_in_triangle !}}} @@ -863,7 +955,7 @@ end subroutine mpas_unit_test_in_triangle !}}} !> \brief Simple unit test for testing the mpas_calculate_barycentric_weights routine. !> \author Matt Hoffman !> \date 4 Feb 2015 -!> \details +!> \details !> This routine tests the mpas_calculate_barycentric_weights routine. !----------------------------------------------------------------------- subroutine mpas_unit_test_bary_weights(meshPool, ierr)!{{{ @@ -903,7 +995,7 @@ subroutine mpas_unit_test_bary_weights(meshPool, ierr)!{{{ write(stderrUnit,*) 'Error in mpas_unit_test_bary_weights: point1 weights have error greater than tolerance.' ierr = 1 else - write(stdoutUnit,*) 'mpas_unit_test_bary_weights: point1 test - SUCCESS' + write(stderrUnit,*) 'mpas_unit_test_bary_weights: point1 test - SUCCESS' endif call mpas_calculate_barycentric_weights(point2, trianglePoints(1,:), trianglePoints(2,:), trianglePoints(3,:), meshPool, & @@ -912,11 +1004,730 @@ subroutine mpas_unit_test_bary_weights(meshPool, ierr)!{{{ write(stderrUnit,*) 'Error in mpas_unit_test_bary_weights: point1 weights have error greater than tolerance.' ierr = 1 else - write(stdoutUnit,*) 'mpas_unit_test_bary_weights: point2 test - SUCCESS' + write(stderrUnit,*) 'mpas_unit_test_bary_weights: point2 test - SUCCESS' endif end subroutine mpas_unit_test_bary_weights !}}} -!------------------------------------------------------------- + +!*********************************************************************** +! +! subroutine mpas_get_nearby_cell_index +! +!> \brief Determine nearest cell ID for a particular location +!> \author Phillip Wolfram +!> \date 01/21/2015 +!> \details +!> This routine returns the cell index for a particular location based on +!> distance to a cell center. Two approaches are employed: 1) a brute +!> force solution where the minimum distance over all cells is +!> determined, and 2) a greedy, nearest neighbor algorithm which +!> searches over all the neighbors of the lastCell index and finds the +!> nearest point. +!----------------------------------------------------------------------- + subroutine mpas_get_nearby_cell_index(nCells, xc,yc,zc , xp,yp,zp, meshPool, lastCell, cellsOnCell, nEdgesOnCell) !{{{ + implicit none + + integer, intent(in) :: nCells ! number of cells + integer, dimension(:,:), intent(in) :: cellsOnCell ! cell connectivity + integer, dimension(:), intent(in) :: nEdgesOnCell ! number of edges bordering cell + real (kind=RKIND), dimension(:), intent(in) :: xc,yc,zc ! center cell locations + real (kind=RKIND), intent(in) :: xp,yp,zp ! point to find cell locations + type (mpas_pool_type), intent(in), pointer :: meshPool ! meshPool pointer + + real (kind=RKIND) :: pointRadius ! magnitude of a point radius + integer :: iPoint, aPoint, cellGuess, cellID + real (kind=RKIND), dimension(3) :: xPoint + real (kind=RKIND), dimension(3, nCells) :: xCell + real (kind=RKIND) :: dx,dy,dz ! differences between two points + real (kind=RKIND) :: r2Min ! minimum squared distance found thus far + real (kind=RKIND) :: r2 ! squared distance + logical, pointer :: on_a_sphere ! flag designating if we are on a sphere + logical, pointer :: is_periodic ! flag designating if periodicity is important + real(kind=RKIND), pointer :: x_period, y_period + + integer, intent(inout) :: lastCell ! last known cell number + + call mpas_pool_get_config(meshPool, 'on_a_sphere', on_a_sphere) + call mpas_pool_get_config(meshPool, 'is_periodic', is_periodic) + call mpas_pool_get_config(meshPool, 'x_period', x_period) + call mpas_pool_get_config(meshPool, 'y_period', y_period) + + ! could also be last than 1 for greater use in other cases (used in general since first index is 1) + if (lastCell < 1) then + ! brute force solution + !{{{ + ! normalize locations to same spherical shell (unit) for direct comparison !{{{ + if (on_a_sphere) then + pointRadius = sqrt(xp*xp + yp*yp + zp*zp) + xPoint = (/ xp/pointRadius , yp/pointRadius , zp/pointRadius /) + do aPoint = 1, nCells + pointRadius = sqrt(xc(aPoint)*xc(aPoint) + yc(aPoint)*yc(aPoint) + zc(aPoint)*zc(aPoint)) + xCell(:,aPoint) = (/ xc(aPoint), yc(aPoint), zc(aPoint) /) / pointRadius + end do + else + xPoint = (/ xp,yp,zp /) + do aPoint = 1, nCells + if (is_periodic) then + xCell(:, aPoint) = (/ mpas_fix_periodicity(xc(aPoint), xp, x_period), mpas_fix_periodicity(yc(aPoint), yp, y_period), zc(aPoint) /) + else + xCell(:, aPoint) = (/ xc(aPoint), yc(aPoint), zc(aPoint) /) + end if + end do + end if + !}}} + + ! brute force solution + cellID = 1 + dx = xPoint(1) - xCell(1,cellID) + dy = xPoint(2) - xCell(2,cellID) + dz = xPoint(3) - xCell(3,cellID) + r2Min = dx*dx + dy*dy + dz*dz + + ! loop over all the other cells to get minimum + do aPoint = 2, nCells + ! compute squared distance + dx = xPoint(1) - xCell(1,aPoint) + dy = xPoint(2) - xCell(2,aPoint) + dz = xPoint(3) - xCell(3,aPoint) + r2 = dx*dx + dy*dy + dz*dz + if ( r2 < r2Min) then + ! we have a new closest point + cellID = aPoint + r2Min = r2 + end if + end do + !}}} + else + ! greedy, nearest neighbor algorithm + !{{{ + + ! search neighbors via greedy algorithm + !write(stderrUnit,*) 'get_cell_id', lastCell + cellGuess = lastCell + cellID = -1 + ! loop over closest cells to make this a greedy method + do while (cellID /= cellGuess) + !write(stderrUnit,*) cellGuess, cellID + + ! we have a known cell + cellID = cellGuess + + ! normalize locations to same spherical shell (unit) for direct comparison !{{{ + if (on_a_sphere) then + pointRadius = sqrt(xp*xp + yp*yp + zp*zp) + xPoint = (/ xp/pointRadius , yp/pointRadius , zp/pointRadius /) + ! for point itself + pointRadius = sqrt(xc(cellID)*xc(cellID) + yc(cellID)*yc(cellID) + zc(cellID)*zc(cellID)) + xCell(:,cellID) = (/ xc(cellID), yc(cellID), zc(cellID) /) / pointRadius + ! for point neighbors + do iPoint = 1, nEdgesOnCell(cellID) + aPoint = cellsOnCell(iPoint, cellID) + if (aPoint > nCells) cycle + pointRadius = sqrt(xc(aPoint)*xc(aPoint) + yc(aPoint)*yc(aPoint) + zc(aPoint)*zc(aPoint)) + xCell(:,aPoint) = (/ xc(aPoint), yc(aPoint), zc(aPoint) /) / pointRadius + end do + else + xPoint = (/ xp,yp,zp /) + ! for point itself + if (is_periodic) then + xCell(:, cellID) = (/ mpas_fix_periodicity(xc(cellID), xp, x_period), mpas_fix_periodicity(yc(cellID), yp, y_period), zc(cellID) /) + else + xCell(:, cellID) = (/ xc(cellID), yc(cellID), zc(cellID) /) + end if + ! for point neighbors + do iPoint = 1, nEdgesOnCell(cellID) + aPoint = cellsOnCell(iPoint, cellID) + if (aPoint > nCells) cycle + if (is_periodic) then + xCell(:, aPoint) = (/ mpas_fix_periodicity(xc(aPoint), xp, x_period), mpas_fix_periodicity(yc(aPoint), yp, y_period), zc(aPoint) /) + else + xCell(:, aPoint) = (/ xc(aPoint), yc(aPoint), zc(aPoint) /) + end if + end do + end if + !}}} + + dx = xPoint(1) - xCell(1,cellID) + dy = xPoint(2) - xCell(2,cellID) + dz = xPoint(3) - xCell(3,cellID) + r2Min = dx*dx + dy*dy + dz*dz + !write(stderrUnit,*) 'r2Min_1 =', r2Min, ' cellID = ', cellID + + ! loop over all the other on the existing cell to see if we have found a closer cell + do iPoint = 1, nEdgesOnCell(cellGuess) + aPoint = cellsOnCell(iPoint, cellID) + if (aPoint > nCells) cycle + ! compute squared distance + dx = xPoint(1) - xCell(1,aPoint) + dy = xPoint(2) - xCell(2,aPoint) + dz = xPoint(3) - xCell(3,aPoint) + r2 = dx*dx + dy*dy + dz*dz + if ( r2 < r2Min) then + ! we have a new closest point + cellGuess = aPoint + r2Min = r2 + end if + end do + !write(stderrUnit,*) 'r2Min_2 = ' ,r2Min, ' cellguess = ', cellguess + end do + !}}} + end if + + ! return value + lastCell = cellID + + ! could also perform a hit-test to make sure we got the right cell just to be sure + ! this should be done outside this function for clarity + + end subroutine mpas_get_nearby_cell_index !}}} + + +!*********************************************************************** +! +! function mpas_get_vertical_id +! +!> \brief Determines vertical level index for a location +!> \author Phillip Wolfram +!> \date 05/13/2014 +!> \details +!> This function determines the nearest vertical level to a particular +!> location within a column. If the location is below the bottom +!> vertical level this returns -1, if it is above the top level, 0. +!> Levels are referenced to vertical cell centers. +!----------------------------------------------------------------------- + function mpas_get_vertical_id(nLevels, zLoc, zMid) !{{{ + + implicit none + ! in / out variables + integer :: mpas_get_vertical_id ! index of vertical level + real (kind=RKIND), intent(in) :: zLoc ! z-level location of point + integer, intent(in) :: nLevels ! number of vertical levels + real (kind=RKIND), dimension(:), intent(in) :: zMid ! elevation of cell centers + + ! local variables + integer :: aLevel ! particular level returned + + ! cases except for bottom cell, if location is above the surface this yields 0 + do aLevel = 1, nLevels-1 + if((zMid(aLevel) <= zLoc .and. zLoc <= zMid(aLevel+1)) .or. (zMid(aLevel+1) <= zLoc .and. zLoc <= zMid(aLevel))) then + ! the point is bounded by the levels (store the bottom level) + mpas_get_vertical_id = aLevel + return + end if + end do + + if(zLoc < minval(zMid(1:nLevels))) then + ! case where location is smallest value + mpas_get_vertical_id = -1 + return + else if (zLoc > maxval(zMid(1:nLevels))) then + ! case where location is largest value + mpas_get_vertical_id = 0 + return + end if + + end function mpas_get_vertical_id !}}} + + +!*********************************************************************** +! +! function mpas_wachspress_coordinates +! +!> \brief Compute the barycentric Wachspress coordinates for a polygon +!> \author Phillip Wolfram +!> \date 01/26/2015 +!> \details +!> Computes the barycentric Wachspress coordinates for a polygon with nVertices +!> points in R3, vertCoords for a particular pointInterp with normalized radius. +!> Follows Gillette, A., Rand, A., Bajaj, C., 2011. +!> Error estimates for generalized barycentric interpolation. +!> Advances in computational mathematics 37 (3), 417–439. +!> Optimized version of mpas_wachspress_coordinates uses optional cached B_i areas +!------------------------------------------------------------------------ + function mpas_wachspress_coordinates(nVertices, vertCoords, pointInterp, meshPool, areaBin) !{{{ + implicit none + + ! input points + integer, intent(in) :: nVertices + real (kind=RKIND), dimension(3, nVertices), intent(in) :: vertCoords + real (kind=RKIND), dimension(3), intent(in) :: pointInterp + real (kind=RKIND), dimension(nVertices), optional, intent(in) :: areaBin + type (mpas_pool_type), pointer :: meshPool + ! output + real (kind=RKIND), dimension(nVertices) :: mpas_wachspress_coordinates + ! computational intermediates + real (kind=RKIND), dimension(nVertices) :: wach ! The wachpress area-product + real (kind=RKIND) :: wach_total ! The wachpress total weight + integer :: i, j ! Loop indices + integer :: im1, i0, ip1 ! im1 = (i-1), i0 = i, ip1 = (i+1) + + ! triangle areas to compute wachspress coordinate + real (kind=RKIND), dimension(nVertices) :: areaA + real (kind=RKIND), dimension(nVertices) :: areaB + + logical, pointer :: on_a_sphere + real(kind=RKIND), pointer :: sphere_radius + real(kind=RKIND) :: radiusLocal + + call mpas_pool_get_config(meshPool, 'on_a_sphere', on_a_sphere) + call mpas_pool_get_config(meshPool, 'sphere_radius', sphere_radius) + if ( on_a_sphere ) then + radiusLocal = sphere_radius + else + radiusLocal = 1.0_RKIND + end if + + if (.not. present(areaBin)) then + ! compute areas + do i = 1, nVertices + ! compute first area B_i + ! get vertex indices + im1 = mod(nVertices + i - 2, nVertices) + 1 + i0 = mod(nVertices + i - 1, nVertices) + 1 + ip1 = mod(nVertices + i , nVertices) + 1 + + ! precompute B_i areas + ! always the same because B_i independent of xp,yp,zp + ! (COULD CACHE AND USE RESULT FROM ARRAY FOR FURTHER OPTIMIZATION) + areaB(i) = mpas_triangle_signed_area(vertCoords(:, im1), vertCoords(:, i0), vertCoords(:, ip1), meshPool) + end do + else + ! assign areas + do i = 1, nVertices + areaB(i) = areaBin(i) + end do + end if + + ! compute areas + do i = 1, nVertices + ! compute first area B_i + ! get vertex indices + im1 = mod(nVertices + i - 2, nVertices) + 1 + i0 = mod(nVertices + i - 1, nVertices) + 1 + ip1 = mod(nVertices + i , nVertices) + 1 + + ! compute A_ij areas + ! must be computed each time + areaA(i0) = mpas_triangle_signed_area(pointInterp, vertCoords(:, i0), vertCoords(:, ip1), meshPool) + + ! precomputed B_i areas, cached + end do + + + ! for each vertex compute wachpress coordinate + do i = 1, nVertices + wach(i) = areaB(i) + do j = (i + 1), (i + nVertices - 2) + i0 = mod(nVertices + j - 1, nVertices) + 1 + ! accumulate products for A_ij subareas + wach(i) = wach(i) * areaA(i0) + end do + end do + + ! get summed weights for normalization + wach_total = 0 + do i = 1, nVertices + wach_total = wach_total + wach(i) + end do + + ! compute lambda + mpas_wachspress_coordinates= 0.0_RKIND + do i = 1, nVertices + mpas_wachspress_coordinates(i) = wach(i)/wach_total + end do + + end function mpas_wachspress_coordinates!}}} + + +!*********************************************************************** +! +! routine mpas_wachspress_interpolate +! +!> \brief Interpolate using barycentric Wachspress coordinates +!> \author Phillip Wolfram +!> \date 03/27/2015 +!> \details +!> Interpolate using the barycentric Wachspress coordinates for a polygon with nVertices +!> having values phi. +!------------------------------------------------------------------------ + real (kind=RKIND) function mpas_wachspress_interpolate(lambda, phi) !{{{ + implicit none + + ! input points + real (kind=RKIND), dimension(:), intent(in) :: lambda !< Input: Wachspress coordinate / weight + real (kind=RKIND), dimension(:), intent(in) :: phi !< Input: values at lambda weights + ! output for function + !real (kind=RKIND), intent(out) :: mpas_wachspress_interpolate + + mpas_wachspress_interpolate = sum(phi * lambda) + + end function mpas_wachspress_interpolate! }}} + + +!*********************************************************************** +! +! subroutine mpas_unit_test_wachspress_hexagon +! +!> \brief Simple unit test for testing wachspress interpolation on a hexagon. +!> \author Phillip J. Wolfram +!> \date 07/21/2015 +!> \details +!> This routine tests the mpas_wachspress_coordinates and +!> mpas_wachspress_interpolate routines on a hexagon. +!----------------------------------------------------------------------- + subroutine mpas_unit_test_wachspress_hexagon(ierr)!{{{ + !----------------------------------------------------------------- + ! input variables + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! input/output variables + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! output variables + !----------------------------------------------------------------- + integer, intent(out) :: ierr + !----------------------------------------------------------------- + ! local variables + !----------------------------------------------------------------- + integer, parameter :: nVertices = 6 + real (kind=RKIND), dimension(3, nVertices) :: vertCoords + real (kind=RKIND), dimension(nVertices) :: vertValues + real (kind=RKIND), dimension(3) :: pointInterp + type (mpas_pool_type), pointer :: meshPool + integer :: j + real (kind=RKIND), dimension(nVertices) :: lambda + real (kind=RKIND), dimension(3) :: velocity + real (kind=RKIND) :: eps = 1.0e-12_RKIND + real (kind=RKIND) :: theta, interpValue + + ierr = 0 +#if MPAS_DEBUG + write(stderrUnit,*) 'starting mpas_unit_test_wachspress_hexagon' +#endif + ! set on a plane + call mpas_pool_create_pool(meshPool) + call mpas_pool_add_config(meshPool,'on_a_sphere', .False.) + call mpas_pool_add_config(meshPool, 'sphere_radius', 1.0_RKIND) + ! hexagon geometry values + do j = 1,nVertices + theta = 2*pii/nVertices * (j-1) + vertCoords(1,j) = cos(theta) + vertCoords(2,j) = sin(theta) + vertCoords(3,j) = 0.0_RKIND + vertValues(j) = 3.0_RKIND + vertCoords(1,j) + 2.0_RKIND*vertCoords(2,j) + end do + + ! interpolation at hexagon center + pointInterp(:) = (/0.0_RKIND, 0.0_RKIND, 0.0_RKIND /) + ! test without precached areas + lambda = mpas_wachspress_coordinates(nVertices, vertCoords, pointInterp, meshPool) + interpValue = mpas_wachspress_interpolate(lambda, vertValues) + if (abs(interpValue - 3.0_RKIND) > eps) then + write(stderrUnit,*) 'Error in mpas_unit_test_wachspress_hexagon: test1 - FAILED.' + ierr = 1 + else + write(stderrUnit,*) 'mpas_unit_test_wachspress_hexagon: test1 - SUCCESS' + end if + + ! interpolation at vertex + pointInterp(:) = (/1.0_RKIND, 0.0_RKIND, 0.0_RKIND /) + ! test without precached areas + lambda = mpas_wachspress_coordinates(nVertices, vertCoords, pointInterp, meshPool) + interpValue = mpas_wachspress_interpolate(lambda, vertValues) + if (abs(interpValue - 4.0_RKIND) > eps) then + write(stderrUnit,*) 'Error in mpas_unit_test_wachspress_hexagon: test2 - FAILED.' + ierr = 1 + else + write(stderrUnit,*) 'mpas_unit_test_wachspress_hexagon: test2 - SUCCESS' + end if + + call mpas_pool_destroy_pool(meshPool) +#if MPAS_DEBUG + write(stderrUnit,*) 'finished mpas_unit_test_wachspress_hexagon' +#endif + + end subroutine mpas_unit_test_wachspress_hexagon!}}} + + +!*********************************************************************** +! +! subroutine mpas_unit_test_wachspress_triangle +! +!> \brief Simple unit test for testing wachspress interpolation on a triangle. +!> \author Phillip J. Wolfram +!> \date 07/21/2015 +!> \details +!> This routine tests the mpas_wachspress_coordinates and +!> mpas_wachspress_interpolate routines on a triangle. +!----------------------------------------------------------------------- + subroutine mpas_unit_test_wachspress_triangle(ierr)!{{{ + !----------------------------------------------------------------- + ! input variables + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! input/output variables + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! output variables + !----------------------------------------------------------------- + integer, intent(out) :: ierr + !----------------------------------------------------------------- + ! local variables + !----------------------------------------------------------------- + integer, parameter :: nVertices = 3 + real (kind=RKIND), dimension(3, nVertices) :: vertCoords + real (kind=RKIND), dimension(nVertices) :: vertValues + real (kind=RKIND), dimension(3) :: pointInterp + type (mpas_pool_type), pointer :: meshPool + integer :: j + real (kind=RKIND), dimension(nVertices) :: lambda + real (kind=RKIND), dimension(3) :: velocity + real (kind=RKIND) :: eps = 1.0e-12_RKIND + real (kind=RKIND) :: interpValue + + ierr = 0 +#if MPAS_DEBUG + write(stderrUnit,*) 'starting mpas_unit_test_wachspress_triangle' +#endif + ! set on a plane + call mpas_pool_create_pool(meshPool) + call mpas_pool_add_config(meshPool,'on_a_sphere', .False.) + call mpas_pool_add_config(meshPool, 'sphere_radius', 1.0_RKIND) + ! hexagon geometry values + vertCoords(:,1) = (/ 1.0_RKIND, 0.0_RKIND, 0.0_RKIND /) + vertCoords(:,2) = (/ 0.0_RKIND, 1.0_RKIND, 0.0_RKIND /) + vertCoords(:,3) = (/ 0.0_RKIND, 0.0_RKIND, 0.0_RKIND /) + vertValues(:) = 3.0_RKIND + vertCoords(1,:) + 2.0_RKIND*vertCoords(2,:) + + ! interpolation at triangle interior + pointInterp(:) = (/0.25_RKIND, 0.25_RKIND, 0.0_RKIND /) + ! test without precached areas + lambda = mpas_wachspress_coordinates(nVertices, vertCoords, pointInterp, meshPool) + interpValue = mpas_wachspress_interpolate(lambda, vertValues) + if (abs(interpValue - 3.75_RKIND) > eps) then + write(stderrUnit,*) 'Error in mpas_unit_test_wachspress_triangle: test1 - FAILED.' + ierr = 1 + else + write(stderrUnit,*) 'mpas_unit_test_wachspress_triangle: test1 - SUCCESS' + end if + + ! interpolation at vertex + pointInterp(:) = (/1.0_RKIND, 0.0_RKIND, 0.0_RKIND /) + ! test without precached areas + lambda = mpas_wachspress_coordinates(nVertices, vertCoords, pointInterp, meshPool) + interpValue = mpas_wachspress_interpolate(lambda, vertValues) + if (abs(interpValue - 4.0_RKIND) > eps) then + write(stderrUnit,*) 'Error in mpas_unit_test_wachspress_triangle: test2 - FAILED.' + ierr = 1 + else + write(stderrUnit,*) 'mpas_unit_test_wachspress_triangle: test2 - SUCCESS' + end if + + call mpas_pool_destroy_pool(meshPool) +#if MPAS_DEBUG + write(stderrUnit,*) 'finished mpas_unit_test_wachspress_triangle' +#endif + + end subroutine mpas_unit_test_wachspress_triangle!}}} + + +!*********************************************************************** +! +! routine mpas_convert_xyz_velocity_to_latlon +! +!> \brief Determine the zonal and meridional velocity field +!> \author Phillip Wolfram +!> \date 09/13/2014 +!> \details +!> Returns the zonal and meridional velocity field (Uzon,Umer) from +!> velocity field defined in R3: (x,y,z) point with velocity +!> vectors (Ux,Uy,Uz). +!------------------------------------------------------------------------ + subroutine mpas_convert_xyz_velocity_to_latlon(Uzon, Umer, xyzPoint, xyzVel) !{{{ + implicit none + + ! input + real(kind=RKIND), dimension(3), intent(in) :: xyzPoint, xyzVel + ! output + real(kind=RKIND), intent(out) :: Uzon, Umer + ! local + real(kind=RKIND) :: Rxy, Rxyz, slon,clon,slat,clat + + ! test for singularities at the poles + if(xyzPoint(1) == 0.0_RKIND .and. xyzPoint(2) == 0.0_RKIND) then +#ifdef MPAS_DEBUG + write(stderrUnit,*) 'Point is located at a pole, cannot convert accurately to lat/lon velocity!' +#endif + ! velocity is undefined but return 0s for testing purposes + Uzon = 0.0_RKIND + Umer = 0.0_RKIND + return + end if + + ! compute geometric coordinate transform coefficients + Rxy = sqrt(xyzPoint(1)**2.0_RKIND + xyzPoint(2)**2.0_RKIND) + Rxyz = sqrt(xyzPoint(1)**2.0_RKIND + xyzPoint(2)**2.0_RKIND + xyzPoint(3)**2.0_RKIND) + slon = xyzPoint(2)/Rxy + clon = xyzPoint(1)/Rxy + slat = xyzPoint(3)/Rxyz + clat = Rxy/Rxyz + + + ! compute the zonal and meridional velocity fields + Uzon = -slon*xyzVel(1) + clon*xyzVel(2) + Umer = -slat*(clon*xyzVel(1) + slon*xyzVel(2)) + clat*xyzVel(3) + + end subroutine mpas_convert_xyz_velocity_to_latlon !}}} + + +!*********************************************************************** +! +! subroutine mpas_unit_test_velocity_conversion +! +!> \brief Simple unit test for testing the mpas_convert_xyz_velocity_to_latlon routine. +!> \author Phillip J. Wolfram +!> \date 07/15/2015 +!> \details +!> This routine tests the mpas_convert_xyz_velocity_to_latlon routine. +!----------------------------------------------------------------------- + subroutine mpas_unit_test_velocity_conversion(ierr)!{{{ + !----------------------------------------------------------------- + ! input variables + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! input/output variables + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! output variables + !----------------------------------------------------------------- + integer, intent(out) :: ierr + !----------------------------------------------------------------- + ! local variables + !----------------------------------------------------------------- + real (kind=RKIND), dimension(3) :: sphereLocation + real (kind=RKIND), dimension(3) :: velocity + real (kind=RKIND) :: zonalVel, meridVel + real (kind=RKIND) :: eps = 1.0e-12_RKIND + + ierr = 0 + + ! equator case (merid vel) + sphereLocation = (/ 1.0_RKIND, 0.0_RKIND, 0.0_RKIND /) + velocity = (/ 0.0_RKIND, 0.0_RKIND, 1.0_RKIND /) + call mpas_convert_xyz_velocity_to_latlon(zonalVel, meridVel, sphereLocation, velocity) + if (abs(zonalVel - 0.0_RKIND ) > eps .or. & + abs(meridVel - 1.0_RKIND ) > eps) then + write(stderrUnit,*) 'Error in mpas_unit_test_velocity_conversion: test1 - FAILED.' + ierr = 1 + else + write(stderrUnit,*) 'mpas_unit_test_velocity_conversion: test1 - SUCCESS' + end if + + ! equator case (merid vel) + sphereLocation = (/ 0.0_RKIND, 1.0_RKIND, 0.0_RKIND /) + velocity = (/ 0.0_RKIND, 0.0_RKIND, 1.0_RKIND /) + call mpas_convert_xyz_velocity_to_latlon(zonalVel, meridVel, sphereLocation, velocity) + if (abs(zonalVel - 0.0_RKIND ) > eps .or. & + abs(meridVel - 1.0_RKIND ) > eps) then + write(stderrUnit,*) 'Error in mpas_unit_test_velocity_conversion: test2 - FAILED.' + ierr = 1 + else + write(stderrUnit,*) 'mpas_unit_test_velocity_conversion: test2 - SUCCESS' + end if + + ! pole case + sphereLocation = (/ 0.0_RKIND, 0.0_RKIND, 1.0_RKIND /) + velocity = (/ 0.0_RKIND, 0.0_RKIND, 1.0_RKIND /) + call mpas_convert_xyz_velocity_to_latlon(zonalVel, meridVel, sphereLocation, velocity) + if (abs(zonalVel - 0.0_RKIND ) > eps .or. & + abs(meridVel - 0.0_RKIND ) > eps) then + write(stderrUnit,*) 'Error in mpas_unit_test_velocity_conversion: test3 - FAILED.' + ierr = 1 + else + write(stderrUnit,*) 'mpas_unit_test_velocity_conversion: test3 - SUCCESS' + end if + + ! equator case (zonal vel) + sphereLocation = (/ 1.0_RKIND, 0.0_RKIND, 0.0_RKIND /) + velocity = (/ 0.0_RKIND, 1.0_RKIND, 0.0_RKIND /) + call mpas_convert_xyz_velocity_to_latlon(zonalVel, meridVel, sphereLocation, velocity) + if (abs(zonalVel - 1.0_RKIND ) > eps .or. & + abs(meridVel - 0.0_RKIND ) > eps) then + write(stderrUnit,*) 'Error in mpas_unit_test_velocity_conversion: test3 - FAILED.' + ierr = 1 + else + write(stderrUnit,*) 'mpas_unit_test_velocity_conversion: test3 - SUCCESS' + end if + + ! equator case (zonal vel) + sphereLocation = (/ 0.0_RKIND, 1.0_RKIND, 0.0_RKIND /) + velocity = (/ -1.0_RKIND, 0.0_RKIND, 0.0_RKIND /) + call mpas_convert_xyz_velocity_to_latlon(zonalVel, meridVel, sphereLocation, velocity) + if (abs(zonalVel - 1.0_RKIND ) > eps .or. & + abs(meridVel - 0.0_RKIND ) > eps) then + write(stderrUnit,*) 'Error in mpas_unit_test_velocity_conversion: test4 - FAILED.' + ierr = 1 + else + write(stderrUnit,*) 'mpas_unit_test_velocity_conversion: test4 - SUCCESS' + end if + + end subroutine mpas_unit_test_velocity_conversion!}}} + + +!*********************************************************************** +! +! routine mpas_spherical_linear_interp +! +!> \brief Spherical linear interpolation of a point p0 to p1 +!> \author Phillip Wolfram +!> \date 04/21/2014 +!> \details +!> Returns the point following a spherical linear interpolation from p0 to p1 +!> with alpha = 0 => p0 and alpha = 1 => p1. All interpolated points are +!> on great circle route between p0 and p1, assuming that p0 and p1 are +!> on the same spherical shell. +!------------------------------------------------------------------------ + subroutine mpas_spherical_linear_interp(pInterp, p0, p1, alpha) !{{{ + + implicit none + ! input variables + real (kind=RKIND), dimension(3), intent(in) :: p0 + real (kind=RKIND), dimension(3), intent(in) :: p1 + real (kind=RKIND), intent(in) :: alpha + + ! function output variable + real (kind=RKIND), dimension(3), intent(out) :: pInterp + + ! local variables + real (kind=RKIND) :: p0mag, p1mag, omega, dotProd + real (kind=RKIND), dimension(size(p0)) :: p0scaled + real (kind=RKIND), dimension(size(p1)) :: p1scaled + real (kind=RKIND), parameter :: eps = 1e-14_RKIND + + ! compute omega for angle of subtended arc + p0mag = sqrt(sum(p0 * p0)) + p1mag = sqrt(sum(p1 * p1)) + ! scale for stability + p0scaled = p0/p0mag + p1scaled = p1/p1mag + dotProd = min(1.0_RKIND,max(-1.0_RKIND,sum(p0scaled * p1scaled))) + ! may need to potentially handle round-off errors with max? + omega = acos(dotProd) + ! handle case where p0 \approx p1 + if(abs(omega) < eps ) then + pInterp = p0 + return + end if + pInterp = sin( (1.0_RKIND-alpha) * omega) / sin(omega) * p0 & + + sin(alpha * omega) / sin(omega) * p1 + + end subroutine mpas_spherical_linear_interp !}}} end module mpas_geometry_utils diff --git a/src/operators/mpas_matrix_operations.F b/src/operators/mpas_matrix_operations.F index 0a7dc6b51a..7008e44500 100644 --- a/src/operators/mpas_matrix_operations.F +++ b/src/operators/mpas_matrix_operations.F @@ -22,6 +22,7 @@ module mpas_matrix_operations use mpas_pool_routines use mpas_constants use mpas_io_units + use mpas_abort, only : mpas_dmpar_global_abort implicit none private diff --git a/src/operators/mpas_tensor_operations.F b/src/operators/mpas_tensor_operations.F index 6aad96f495..bb07875dd4 100644 --- a/src/operators/mpas_tensor_operations.F +++ b/src/operators/mpas_tensor_operations.F @@ -27,6 +27,7 @@ module mpas_tensor_operations use mpas_matrix_operations use mpas_dmpar use mpas_io_units + use mpas_abort, only : mpas_dmpar_global_abort implicit none private diff --git a/src/operators/mpas_tracer_advection_helpers.F b/src/operators/mpas_tracer_advection_helpers.F index e68f04263f..a6eaf5bc80 100644 --- a/src/operators/mpas_tracer_advection_helpers.F +++ b/src/operators/mpas_tracer_advection_helpers.F @@ -21,7 +21,6 @@ module mpas_tracer_advection_helpers use mpas_kind_types use mpas_derived_types use mpas_pool_routines - use mpas_configure use mpas_sort use mpas_geometry_utils use mpas_io_units diff --git a/src/operators/mpas_tracer_advection_mono.F b/src/operators/mpas_tracer_advection_mono.F index a2676303be..66ec000175 100644 --- a/src/operators/mpas_tracer_advection_mono.F +++ b/src/operators/mpas_tracer_advection_mono.F @@ -22,6 +22,7 @@ module mpas_tracer_advection_mono use mpas_derived_types use mpas_pool_routines use mpas_io_units + use mpas_abort, only : mpas_dmpar_global_abort use mpas_tracer_advection_helpers diff --git a/src/operators/mpas_vector_operations.F b/src/operators/mpas_vector_operations.F index dfb706bd5a..db0da10e9f 100644 --- a/src/operators/mpas_vector_operations.F +++ b/src/operators/mpas_vector_operations.F @@ -49,7 +49,10 @@ module mpas_vector_operations mpas_vector_R3Cell_to_normalVectorEdge, & mpas_vector_R3_to_LonLatR, & mpas_vector_LonLatR_to_R3, & - mpas_zonal_meridional_vectors + mpas_zonal_meridional_vectors, & + mpas_fix_periodicity, & + mpas_unit_test_fix_periodicity + !-------------------------------------------------------------------- ! @@ -147,7 +150,7 @@ subroutine mpas_vector_R3Cell_to_2DEdge(vectorR3Cell, & real (kind=RKIND), dimension(:,:), intent(in) :: & edgeTangentVectors !< Input: unit vector tangent to an edge - logical, intent(in) :: & + logical, intent(in) :: & includeHalo !< Input: If true, halo cells and edges are included in computation !----------------------------------------------------------------- @@ -178,7 +181,7 @@ subroutine mpas_vector_R3Cell_to_2DEdge(vectorR3Cell, & if (includeHalo) then call mpas_pool_get_dimension(meshPool, 'nEdges', nEdgesCompute) - else + else call mpas_pool_get_dimension(meshPool, 'nEdgesSolve', nEdgesCompute) endif call mpas_pool_get_dimension(meshPool, 'nVertLevels', nVertLevels) @@ -193,7 +196,7 @@ subroutine mpas_vector_R3Cell_to_2DEdge(vectorR3Cell, & do k=1,nVertLevels ! average neighboring cell-centered vectors to the edge - vectorR3Edge(:) = 0.5*(vectorR3Cell(:,k,cell1) + vectorR3Cell(:,k,cell2)) + vectorR3Edge(:) = 0.5_RKIND*(vectorR3Cell(:,k,cell1) + vectorR3Cell(:,k,cell2)) ! normal component at edge: take dot products with unit vectors at edge normalVectorEdge(k,iEdge) = sum(vectorR3Edge(:)*edgeNormalVectors(:,iEdge)) @@ -274,7 +277,7 @@ subroutine mpas_vector_R3Cell_to_normalVectorEdge(vectorR3Cell, & do k=1,nVertLevels ! average neighboring cell-centered vectors to the edge - vectorR3Edge(:) = 0.5*(vectorR3Cell(:,k,cell1) + vectorR3Cell(:,k,cell2)) + vectorR3Edge(:) = 0.5_RKIND*(vectorR3Cell(:,k,cell1) + vectorR3Cell(:,k,cell2)) ! normal component at edge: take dot products with unit vectors at edge normalVectorEdge(k,iEdge) = sum(vectorR3Edge(:)*edgeNormalVectors(:,iEdge)) @@ -442,7 +445,7 @@ end subroutine mpas_tangential_vector_1d!}}} !> \author Mark Petersen !> \date 1 May 2013 !> \details -!> Given a latitude and longitude location, compute unit vectors pointing +!> Given a latitude and longitude location, compute unit vectors pointing !> in the zonal, meridional, and vertical directions. ! !----------------------------------------------------------------------- @@ -525,7 +528,7 @@ subroutine mpas_vector_R3_to_LonLatR(vectorR3, lon, lat, vectorLonLatR)!{{{ real (kind=RKIND), intent(in) :: & lon, &!< Input: longitude, in radians, ranging [0,2*pi] lat !< Input: latitude, in radians, ranging [-pi,pi] - + !----------------------------------------------------------------- ! ! output variables @@ -552,7 +555,7 @@ subroutine mpas_vector_R3_to_LonLatR(vectorR3, lon, lat, vectorLonLatR)!{{{ rotationMatrix(:,2) = meridionalUnitVector rotationMatrix(:,3) = verticalUnitVector - vectorLonLatR = 0.0 + vectorLonLatR = 0.0_RKIND do i=1,3 do j=1,3 ! xi = R^T x @@ -590,7 +593,7 @@ subroutine mpas_vector_LonLatR_to_R3(vectorLonLatR, lon, lat, vectorR3)!{{{ real (kind=RKIND), intent(in) :: & lon, &!< Input: longitude, in radians, ranging [0,2*pi] lat !< Input: latitude, in radians, ranging [-pi,pi] - + !----------------------------------------------------------------- ! ! output variables @@ -617,7 +620,7 @@ subroutine mpas_vector_LonLatR_to_R3(vectorLonLatR, lon, lat, vectorR3)!{{{ rotationMatrix(:,2) = meridionalUnitVector rotationMatrix(:,3) = verticalUnitVector - vectorR3 = 0.0 + vectorR3 = 0.0_RKIND do i=1,3 do j=1,3 vectorR3(i) = vectorR3(i) + rotationMatrix(i,j)*vectorLonLatR(j) @@ -659,7 +662,12 @@ subroutine mpas_initialize_vectors(meshPool)!{{{ real(kind=RKIND), dimension(:,:,:), pointer :: cellTangentPlane real(kind=RKIND), dimension(3) :: xHatPlane, yHatPlane, rHat real(kind=RKIND) :: normalDotRHat - logical, pointer :: on_a_sphere + logical, pointer :: on_a_sphere, is_periodic + real(kind=RKIND), pointer :: x_period, y_period + + call mpas_pool_get_config(meshPool, 'is_periodic', is_periodic) + call mpas_pool_get_config(meshPool, 'x_period', x_period) + call mpas_pool_get_config(meshPool, 'y_period', y_period) call mpas_pool_get_config(meshPool, 'on_a_sphere', on_a_sphere) @@ -705,24 +713,42 @@ subroutine mpas_initialize_vectors(meshPool)!{{{ if (cell1 == nCells+1) then ! this is a boundary edge ! the normal points from the cell location to the edge location - edgeNormalVectors(1,iEdge) = xEdge(iEdge) - xCell(cell2) - edgeNormalVectors(2,iEdge) = yEdge(iEdge) - yCell(cell2) - edgeNormalVectors(3,iEdge) = zEdge(iEdge) - zCell(cell2) - - elseif (cell2 == nCells+1) then ! this is a boundary edge + if (is_periodic) then + edgeNormalVectors(1,iEdge) = mpas_fix_periodicity(xEdge(iEdge), xCell(cell2), x_period) - xCell(cell2) + edgeNormalVectors(2,iEdge) = mpas_fix_periodicity(yEdge(iEdge), yCell(cell2), y_period) - yCell(cell2) + edgeNormalVectors(3,iEdge) = zEdge(iEdge) - zCell(cell2) + else + edgeNormalVectors(1,iEdge) = xEdge(iEdge) - xCell(cell2) + edgeNormalVectors(2,iEdge) = yEdge(iEdge) - yCell(cell2) + edgeNormalVectors(3,iEdge) = zEdge(iEdge) - zCell(cell2) + end if + + else if (cell2 == nCells+1) then ! this is a boundary edge ! the normal points from the cell location to the edge location - edgeNormalVectors(1,iEdge) = xEdge(iEdge) - xCell(cell1) - edgeNormalVectors(2,iEdge) = yEdge(iEdge) - yCell(cell1) - edgeNormalVectors(3,iEdge) = zEdge(iEdge) - zCell(cell1) + if (is_periodic) then + edgeNormalVectors(1,iEdge) = mpas_fix_periodicity(xEdge(iEdge), xCell(cell1), x_period) - xCell(cell1) + edgeNormalVectors(2,iEdge) = mpas_fix_periodicity(yEdge(iEdge), yCell(cell1), y_period) - yCell(cell1) + edgeNormalVectors(3,iEdge) = zEdge(iEdge) - zCell(cell1) + else + edgeNormalVectors(1,iEdge) = xEdge(iEdge) - xCell(cell1) + edgeNormalVectors(2,iEdge) = yEdge(iEdge) - yCell(cell1) + edgeNormalVectors(3,iEdge) = zEdge(iEdge) - zCell(cell1) + end if else ! this is not a boundary cell ! the normal points from the cell 1 to cell2 ! mrp problem: on periodic domains, vectors on edges of domain point the wrong way. - edgeNormalVectors(1,iEdge) = xCell(cell2) - xCell(cell1) - edgeNormalVectors(2,iEdge) = yCell(cell2) - yCell(cell1) - edgeNormalVectors(3,iEdge) = zCell(cell2) - zCell(cell1) + if (is_periodic) then + edgeNormalVectors(1,iEdge) = mpas_fix_periodicity(xCell(cell2), xCell(cell1), x_period) - xCell(cell1) + edgeNormalVectors(2,iEdge) = mpas_fix_periodicity(yCell(cell2), yCell(cell1), y_period) - yCell(cell1) + edgeNormalVectors(3,iEdge) = zCell(cell2) - zCell(cell1) + else + edgeNormalVectors(1,iEdge) = xCell(cell2) - xCell(cell1) + edgeNormalVectors(2,iEdge) = yCell(cell2) - yCell(cell1) + edgeNormalVectors(3,iEdge) = zCell(cell2) - zCell(cell1) + end if - endif + end if call mpas_unit_vec_in_r3(edgeNormalVectors(:,iEdge)) end do @@ -783,6 +809,13 @@ subroutine mpas_initialize_tangent_vectors(meshPool, edgeTangentVectors)!{{{ integer, dimension(:,:), pointer :: verticesOnEdge real(kind=RKIND), dimension(:), pointer :: xVertex, yVertex, zVertex + logical, pointer :: is_periodic + real(kind=RKIND), pointer :: x_period, y_period + + call mpas_pool_get_config(meshPool, 'is_periodic', is_periodic) + call mpas_pool_get_config(meshPool, 'x_period', x_period) + call mpas_pool_get_config(meshPool, 'y_period', y_period) + call mpas_pool_get_dimension(meshPool, 'nEdges', nEdges) call mpas_pool_get_array(meshPool, 'verticesOnEdge', verticesOnEdge) call mpas_pool_get_array(meshPool, 'xVertex', xVertex) @@ -790,17 +823,125 @@ subroutine mpas_initialize_tangent_vectors(meshPool, edgeTangentVectors)!{{{ call mpas_pool_get_array(meshPool, 'zVertex', zVertex) do iEdge = 1,nEdges - vertex1 = verticesOnEdge(1,iEdge) - vertex2 = verticesOnEdge(2,iEdge) + vertex1 = verticesOnEdge(1,iEdge) + vertex2 = verticesOnEdge(2,iEdge) ! the tangent vector points from the vertex 1 to vertex 2 - edgeTangentVectors(1,iEdge) = xVertex(vertex2) - xVertex(vertex1) - edgeTangentVectors(2,iEdge) = yVertex(vertex2) - yVertex(vertex1) - edgeTangentVectors(3,iEdge) = zVertex(vertex2) - zVertex(vertex1) + if (is_periodic) then + edgeTangentVectors(1,iEdge) = mpas_fix_periodicity(xVertex(vertex2), xVertex(vertex1), x_period) - xVertex(vertex1) + edgeTangentVectors(2,iEdge) = mpas_fix_periodicity(yVertex(vertex2), yVertex(vertex1), y_period) - yVertex(vertex1) + edgeTangentVectors(3,iEdge) = zVertex(vertex2) - zVertex(vertex1) + else + edgeTangentVectors(1,iEdge) = xVertex(vertex2) - xVertex(vertex1) + edgeTangentVectors(2,iEdge) = yVertex(vertex2) - yVertex(vertex1) + edgeTangentVectors(3,iEdge) = zVertex(vertex2) - zVertex(vertex1) + end if call mpas_unit_vec_in_r3(edgeTangentVectors(:,iEdge)) end do end subroutine mpas_initialize_tangent_vectors!}}} +!*********************************************************************** +! +! subroutine mpas_fix_periodicity +! +!> \brief Fixes periodicity of point pxi relative to a point xci and xiRef periodicity. +!> \author Phillip Wolfram & Doug Jacobsen +!> \date 06/29/2015 +!> \details +!> This routine recomputes the location of a point pxi relative to a +!> point xci with xiRef periodicity. The calculation ensures that the recomputed +!> point and xci are spatially nearby, permitting reasonable calculation of +!> relative geometry. This function operates only on a single dimension "i" for +!> a point pxi relative to a specific location xci for dimension xiRef +!> in the periodic direction. Note, pxi can only be adjusted by at most +!> a single period, e.g., pxi = 260 + 360 = 620, xci = 0, and xiRef = 360 +!> returns 260, not -100. +!----------------------------------------------------------------------- + real(KIND=RKIND) function mpas_fix_periodicity(pxi, xci, xiRef) !{{{ + + !----------------------------------------------------------------- + ! input variables + !----------------------------------------------------------------- + real (kind=RKIND), intent(in) :: pxi, xci, xiRef + !----------------------------------------------------------------- + ! input/output variables + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! output variables + !----------------------------------------------------------------- + ! real (kind=RKIND), intent(out) :: mpas_fix_periodicity + + !----------------------------------------------------------------- + ! local variables + !----------------------------------------------------------------- + real (kind=RKIND) :: dist + + dist = pxi - xci + + if (abs(dist) > xiRef * 0.5_RKIND) then + mpas_fix_periodicity = pxi - (dist/abs(dist)) * xiRef + else + mpas_fix_periodicity = pxi + end if + + end function mpas_fix_periodicity !}}} + +!*********************************************************************** +! +! subroutine mpas_unit_test_fix_periodicity +! +!> \brief Simple unit test for testing the mpas_fix_periodicity routine. +!> \author Phillip Wolfram +!> \date 06/29/2015 +!> \details +!> This routine tests the mpas_fix_periodicity routine. +!----------------------------------------------------------------------- + subroutine mpas_unit_test_fix_periodicity(ierr)!{{{ + !----------------------------------------------------------------- + ! input variables + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! input/output variables + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! output variables + !----------------------------------------------------------------- + integer, intent(out) :: ierr + !----------------------------------------------------------------- + ! local variables + !----------------------------------------------------------------- + real (kind=RKIND) :: x1, x2, xc, xLen, xnew + real (kind=RKIND) :: eps = 1.0e-12_RKIND + + ierr = 0 + + xLen = 2_RKIND*pii + x1 = pii + x2 = 3_RKIND*pii + xc = pii + + xnew = mpas_fix_periodicity(x1, xc, xLen) + if (abs(xnew - pii ) > eps) then + write(stderrUnit,*) "Error in mpas_unit_test_fix_periodicity: ", & + "x1's periodicity fix has error greater than tolerance -- FAILED." + ierr = 1 + else + write(stderrUnit,*) 'mpas_unit_test_fix_periodicity: x1 test - SUCCESS' + endif + + xnew = mpas_fix_periodicity(x2, xc, xLen) + if (abs(xnew - pii ) > eps) then + write(stderrUnit,*) "Error in mpas_unit_test_fix_periodicity: ", & + "x2's periodicity fix has error greater than tolerance -- FAILED." + ierr = 1 + else + write(stderrUnit,*) 'mpas_unit_test_fix_periodicity: x2 test - SUCCESS' + endif + + end subroutine mpas_unit_test_fix_periodicity!}}} end module mpas_vector_operations diff --git a/src/operators/mpas_vector_reconstruction.F b/src/operators/mpas_vector_reconstruction.F index 64a7d8d170..41d1c40212 100644 --- a/src/operators/mpas_vector_reconstruction.F +++ b/src/operators/mpas_vector_reconstruction.F @@ -20,9 +20,9 @@ module mpas_vector_reconstruction use mpas_derived_types use mpas_pool_routines - use mpas_configure use mpas_constants use mpas_rbf_interpolation + use mpas_vector_operations implicit none @@ -48,15 +48,17 @@ module mpas_vector_reconstruction !> Output: grid % coeffs_reconstruct - coefficients used to reconstruct !> velocity vectors at cell centers !----------------------------------------------------------------------- - subroutine mpas_init_reconstruct(meshPool)!{{{ + subroutine mpas_init_reconstruct(meshPool, includeHalos)!{{{ implicit none type (mpas_pool_type), intent(in) :: & meshPool !< Input: Mesh information + logical, optional, intent(in) :: includeHalos + ! temporary arrays needed in the (to be constructed) init procedure - integer, pointer :: nCellsSolve + integer, pointer :: nCells integer, dimension(:,:), pointer :: edgesOnCell integer, dimension(:), pointer :: nEdgesOnCell integer :: i, iCell, iEdge, pointCount, maxEdgeCount @@ -68,6 +70,20 @@ subroutine mpas_init_reconstruct(meshPool)!{{{ real(kind=RKIND), dimension(:,:,:), pointer :: cellTangentPlane real (kind=RKIND), dimension(:,:,:), pointer :: coeffs_reconstruct + logical, pointer :: is_periodic + real(kind=RKIND), pointer :: x_period, y_period + + logical :: includeHalosLocal + + call mpas_pool_get_config(meshPool, 'is_periodic', is_periodic) + call mpas_pool_get_config(meshPool, 'x_period', x_period) + call mpas_pool_get_config(meshPool, 'y_period', y_period) + + if ( present(includeHalos) ) then + includeHalosLocal = includeHalos + else + includeHalosLocal = .false. + end if !======================================================== ! arrays filled and saved during init procedure @@ -88,7 +104,11 @@ subroutine mpas_init_reconstruct(meshPool)!{{{ call mpas_pool_get_array(meshPool, 'edgeNormalVectors', edgeNormalVectors) call mpas_pool_get_array(meshPool, 'cellTangentPlane', cellTangentPlane) - call mpas_pool_get_dimension(meshPool, 'nCellsSolve', nCellsSolve) + if ( includeHalosLocal ) then + call mpas_pool_get_dimension(meshPool, 'nCells', nCells) + else + call mpas_pool_get_dimension(meshPool, 'nCellsSolve', nCells) + end if ! init arrays coeffs_reconstruct = 0.0 @@ -100,7 +120,7 @@ subroutine mpas_init_reconstruct(meshPool)!{{{ allocate(coeffs(maxEdgeCount,3)) ! loop over all cells to be solved on this block - do iCell=1,nCellsSolve + do iCell=1,nCells pointCount = nEdgesOnCell(iCell) cellCenter(1) = xCell(iCell) cellCenter(2) = yCell(iCell) @@ -108,9 +128,15 @@ subroutine mpas_init_reconstruct(meshPool)!{{{ do i=1,pointCount iEdge = edgesOnCell(i,iCell) - edgeOnCellLocations(i,1) = xEdge(iEdge) - edgeOnCellLocations(i,2) = yEdge(iEdge) - edgeOnCellLocations(i,3) = zEdge(iEdge) + if (is_periodic) then + edgeOnCellLocations(i,1) = mpas_fix_periodicity(xEdge(iEdge), cellCenter(1), x_period) + edgeOnCellLocations(i,2) = mpas_fix_periodicity(yEdge(iEdge), cellCenter(2), y_period) + edgeOnCellLocations(i,3) = zEdge(iEdge) + else + edgeOnCellLocations(i,1) = xEdge(iEdge) + edgeOnCellLocations(i,2) = yEdge(iEdge) + edgeOnCellLocations(i,3) = zEdge(iEdge) + end if edgeOnCellNormals(i,:) = edgeNormalVectors(:, iEdge) end do @@ -166,7 +192,7 @@ end subroutine mpas_init_reconstruct!}}} !> Input: grid meta data and vector component data residing at cell edges !> Output: reconstructed vector field (measured in X,Y,Z) located at cell centers !----------------------------------------------------------------------- - subroutine mpas_reconstruct_2d(meshPool, u, uReconstructX, uReconstructY, uReconstructZ, uReconstructZonal, uReconstructMeridional)!{{{ + subroutine mpas_reconstruct_2d(meshPool, u, uReconstructX, uReconstructY, uReconstructZ, uReconstructZonal, uReconstructMeridional, includeHalos)!{{{ implicit none @@ -177,9 +203,11 @@ subroutine mpas_reconstruct_2d(meshPool, u, uReconstructX, uReconstructY, uRecon real (kind=RKIND), dimension(:,:), intent(out) :: uReconstructZ !< Output: Z Component of velocity reconstructed to cell centers real (kind=RKIND), dimension(:,:), intent(out) :: uReconstructZonal !< Output: Zonal Component of velocity reconstructed to cell centers real (kind=RKIND), dimension(:,:), intent(out) :: uReconstructMeridional !< Output: Meridional Component of velocity reconstructed to cell centers + logical, optional, intent(in) :: includeHalos !< Input: Optional logical that allows reconstruction over halo regions ! temporary arrays needed in the compute procedure - integer, pointer :: nCellsSolve + logical :: includeHalosLocal + integer, pointer :: nCells integer, dimension(:,:), pointer :: edgesOnCell integer, dimension(:), pointer :: nEdgesOnCell integer :: iCell,iEdge, i @@ -191,6 +219,11 @@ subroutine mpas_reconstruct_2d(meshPool, u, uReconstructX, uReconstructY, uRecon real (kind=RKIND) :: clat, slat, clon, slon + if ( present(includeHalos) ) then + includeHalosLocal = includeHalos + else + includeHalosLocal = .false. + end if ! stored arrays used during compute procedure call mpas_pool_get_array(meshPool, 'coeffs_reconstruct', coeffs_reconstruct) @@ -198,7 +231,12 @@ subroutine mpas_reconstruct_2d(meshPool, u, uReconstructX, uReconstructY, uRecon ! temporary variables call mpas_pool_get_array(meshPool, 'nEdgesOnCell', nEdgesOnCell) call mpas_pool_get_array(meshPool, 'edgesOnCell', edgesOnCell) - call mpas_pool_get_dimension(meshPool, 'nCellsSolve', nCellsSolve) + + if ( includeHalosLocal ) then + call mpas_pool_get_dimension(meshPool, 'nCells', nCells) + else + call mpas_pool_get_dimension(meshPool, 'nCellsSolve', nCells) + end if call mpas_pool_get_array(meshPool, 'latCell', latCell) call mpas_pool_get_array(meshPool, 'lonCell', lonCell) @@ -211,7 +249,7 @@ subroutine mpas_reconstruct_2d(meshPool, u, uReconstructX, uReconstructY, uRecon uReconstructZ = 0.0 ! loop over cell centers - do iCell = 1, nCellsSolve + do iCell = 1, nCells ! a more efficient reconstruction where rbf_values*matrix_reconstruct has been precomputed ! in coeffs_reconstruct do i=1,nEdgesOnCell(iCell) @@ -227,7 +265,7 @@ subroutine mpas_reconstruct_2d(meshPool, u, uReconstructX, uReconstructY, uRecon enddo ! iCell if (on_a_sphere) then - do iCell = 1, nCellsSolve + do iCell = 1, nCells clat = cos(latCell(iCell)) slat = sin(latCell(iCell)) clon = cos(lonCell(iCell)) @@ -257,7 +295,7 @@ end subroutine mpas_reconstruct_2d!}}} !> Input: grid meta data and vector component data residing at cell edges !> Output: reconstructed vector field (measured in X,Y,Z) located at cell centers !----------------------------------------------------------------------- - subroutine mpas_reconstruct_1d(meshPool, u, uReconstructX, uReconstructY, uReconstructZ, uReconstructZonal, uReconstructMeridional)!{{{ + subroutine mpas_reconstruct_1d(meshPool, u, uReconstructX, uReconstructY, uReconstructZ, uReconstructZonal, uReconstructMeridional, includeHalos)!{{{ implicit none @@ -268,9 +306,10 @@ subroutine mpas_reconstruct_1d(meshPool, u, uReconstructX, uReconstructY, uRecon real (kind=RKIND), dimension(:), intent(out) :: uReconstructZ !< Output: Z Component of velocity reconstructed to cell centers real (kind=RKIND), dimension(:), intent(out) :: uReconstructZonal !< Output: Zonal Component of velocity reconstructed to cell centers real (kind=RKIND), dimension(:), intent(out) :: uReconstructMeridional !< Output: Meridional Component of velocity reconstructed to cell centers + logical, optional, intent(in) :: includeHalos !< Input: Logical flag that allows reconstructing over halo regions ! temporary arrays needed in the compute procedure - integer, pointer :: nCellsSolve + integer, pointer :: nCells integer, dimension(:,:), pointer :: edgesOnCell integer, dimension(:), pointer :: nEdgesOnCell integer :: iCell,iEdge, i @@ -279,9 +318,15 @@ subroutine mpas_reconstruct_1d(meshPool, u, uReconstructX, uReconstructY, uRecon real (kind=RKIND), dimension(:,:,:), pointer :: coeffs_reconstruct logical, pointer :: on_a_sphere + logical :: includeHalosLocal real (kind=RKIND) :: clat, slat, clon, slon + if ( present(includeHalos) ) then + includeHalosLocal = includeHalos + else + includeHalosLocal = .false. + end if ! stored arrays used during compute procedure call mpas_pool_get_array(meshPool, 'coeffs_reconstruct', coeffs_reconstruct) @@ -289,7 +334,12 @@ subroutine mpas_reconstruct_1d(meshPool, u, uReconstructX, uReconstructY, uRecon ! temporary variables call mpas_pool_get_array(meshPool, 'nEdgesOnCell', nEdgesOnCell) call mpas_pool_get_array(meshPool, 'edgesOnCell', edgesOnCell) - call mpas_pool_get_dimension(meshPool, 'nCellsSolve', nCellsSolve) + + if ( includeHalosLocal ) then + call mpas_pool_get_dimension(meshPool, 'nCells', nCells) + else + call mpas_pool_get_dimension(meshPool, 'nCellsSolve', nCells) + end if call mpas_pool_get_array(meshPool, 'latCell', latCell) call mpas_pool_get_array(meshPool, 'lonCell', lonCell) @@ -302,7 +352,7 @@ subroutine mpas_reconstruct_1d(meshPool, u, uReconstructX, uReconstructY, uRecon uReconstructZ = 0.0 ! loop over cell centers - do iCell = 1, nCellsSolve + do iCell = 1, nCells ! a more efficient reconstruction where rbf_values*matrix_reconstruct has been precomputed ! in coeffs_reconstruct do i=1,nEdgesOnCell(iCell) @@ -318,7 +368,7 @@ subroutine mpas_reconstruct_1d(meshPool, u, uReconstructX, uReconstructY, uRecon enddo ! iCell if (on_a_sphere) then - do iCell = 1, nCellsSolve + do iCell = 1, nCells clat = cos(latCell(iCell)) slat = sin(latCell(iCell)) clon = cos(lonCell(iCell)) diff --git a/src/tools/Makefile b/src/tools/Makefile index e55084c734..26a11ed81b 100644 --- a/src/tools/Makefile +++ b/src/tools/Makefile @@ -1,11 +1,13 @@ -all: build_input_gen build_registry +all: + ( $(MAKE) build_registry CPPFLAGS="$(CPPFLAGS)" CC="$(CC)" CFLAGS="$(CFLAGS)" ) + ( $(MAKE) build_input_gen CPPFLAGS="$(CPPFLAGS)" CC="$(CC)" CFLAGS="$(CFLAGS)" ) build_input_gen: - (cd input_gen; $(MAKE) CPPFLAGS="$(CPPFLAGS)" CC="$(CC)") + (cd input_gen; $(MAKE) CPPFLAGS="$(CPPFLAGS)" CC="$(CC)" CFLAGS="$(CFLAGS)") build_registry: - (cd registry; $(MAKE) CPPFLAGS="$(CPPFLAGS)" CC="$(CC)") + (cd registry; $(MAKE) CPPFLAGS="$(CPPFLAGS)" CC="$(CC)" CFLAGS="$(CFLAGS)") clean: (cd input_gen; $(MAKE) clean) diff --git a/src/tools/input_gen/Makefile b/src/tools/input_gen/Makefile index 33878cd0cb..29c47a242d 100644 --- a/src/tools/input_gen/Makefile +++ b/src/tools/input_gen/Makefile @@ -7,17 +7,17 @@ ST_OBJS = streams_gen.o test_functions.o XML_OBJS = $(EZXML_PATH)/ezxml.o all: ezxml - ($(MAKE) namelist_gen CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)") - ($(MAKE) streams_gen CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)") + ($(MAKE) -j 1 namelist_gen CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)") + ($(MAKE) -j 1 streams_gen CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)") ezxml: (cd $(EZXML_PATH); $(MAKE) CFLAGS="$(CFLAGS) $(TOOL_TARGET_ARCH)") namelist_gen: ezxml $(NL_OBJS) $(XML_OBJS) - $(CC) $(CPPFLAGS) -I$(EZXML_PATH) -o $@ $(NL_OBJS) $(XML_OBJS) + $(CC) $(CPPFLAGS) $(CFLAGS) -I$(EZXML_PATH) -o $@ $(NL_OBJS) $(XML_OBJS) streams_gen: ezxml $(ST_OBJS) $(XML_OBJS) - $(CC) $(CPPFLAGS) -I$(EZXML_PATH) -o $@ $(ST_OBJS) $(XML_OBJS) + $(CC) $(CPPFLAGS) $(CFLAGS) -I$(EZXML_PATH) -o $@ $(ST_OBJS) $(XML_OBJS) clean: $(RM) *.o namelist_gen streams_gen diff --git a/src/tools/registry/Makefile b/src/tools/registry/Makefile index 7ead1aad23..46d9d56394 100644 --- a/src/tools/registry/Makefile +++ b/src/tools/registry/Makefile @@ -4,14 +4,25 @@ EZXML_PATH= ../../external/ezxml OBJS = parse.o dictionary.o gen_inc.o fortprintf.o utility.o -all: ezxml parse +all: ezxml + #($(MAKE) ezxml CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)") ($(MAKE) parse CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)") ezxml: (cd $(EZXML_PATH); $(MAKE) CFLAGS="$(CFLAGS) $(TOOL_TARGET_ARCH)") -parse: ezxml $(OBJS) - $(CC) $(CPPFLAGS) $(EZXML_PATH)/ezxml.o -I$(EZXML_PATH) -o $@ $(OBJS) +parse: $(OBJS) + $(CC) $(CPPFLAGS) $(CFLAGS) $(EZXML_PATH)/ezxml.o -I$(EZXML_PATH) -o $@ $(OBJS) + +parse.o: + +dictionary.o: + +gen_inc.o: + +fortprintf.o: + +utility.o: clean: $(RM) *.o parse diff --git a/src/tools/registry/fortprintf.c b/src/tools/registry/fortprintf.c index 9526d020de..0635146602 100644 --- a/src/tools/registry/fortprintf.c +++ b/src/tools/registry/fortprintf.c @@ -25,13 +25,15 @@ int nbuf = 0; int fortprintf(FILE * fd, char * str, ...)/*{{{*/ { - int i, nl, sp, inquotes, q; + int i, nl, sp, sp_inquotes, inquotes, q; int lastchar; int errorcode; va_list ap; #ifdef FORTPRINTF_DEBUG printf("call to fortprintf\n"); + printf("Format string is: %s\n", str); + printf("\n"); #endif /* Assume no errors */ @@ -40,6 +42,10 @@ int fortprintf(FILE * fd, char * str, ...)/*{{{*/ /* Add formatted string to the buffer of fortran code to be written */ va_start(ap, str); i = vsnprintf(fbuffer+nbuf, 1024-nbuf, str, ap); +#ifdef FORTPRINTF_DEBUG + printf("Full buffer is:\n"); + printf("%s\n", fbuffer); +#endif va_end(ap); /* Set the next free position in the fortran buffer */ @@ -55,16 +61,23 @@ int fortprintf(FILE * fd, char * str, ...)/*{{{*/ /* Scan through the max line length - 1 (since we may have to add an & character) or the end of the buffer, whichever comes first */ for (i=0; i= 0) { snprintf(printbuf, sp+2, "%s", fbuffer); i = sp+1; - if (inquotes && (sp > q)) printbuf[i++] = '\''; + if (sp_inquotes && (sp > q)) printbuf[i++] = '\''; printbuf[i++] = '&'; printbuf[i++] = '\n'; printbuf[i++] = '\0'; fprintf(fd, "%s", printbuf); sp++; i = 0; - if (inquotes && (sp > q)) { + if (sp_inquotes && (sp > q)) { inquotes = (inquotes + 1) % 2; fbuffer[i++] = '/'; fbuffer[i++] = '/'; @@ -155,12 +168,14 @@ int main()/*{{{*/ foo = fopen("test01.inc","w"); err = fortprintf(foo, "123456789\n"); print_result(1, err); + fortprint_flush(foo); fclose(foo); /* Tests writing a line with space that is below the column limit */ foo = fopen("test02.inc","w"); err = fortprintf(foo, "12345 789\n"); print_result(2, err); + fortprint_flush(foo); fclose(foo); /*** Test lines that are less than 20 chars long ***/ @@ -169,12 +184,14 @@ int main()/*{{{*/ foo = fopen("test03.inc","w"); err = fortprintf(foo, "123456789 12345678\n"); print_result(3, err); + fortprint_flush(foo); fclose(foo); /* Tests the case where we write a newline at the last column with NO chances to break the line earlier */ foo = fopen("test04.inc","w"); err = fortprintf(foo, "123456789012345678\n"); print_result(4, err); + fortprint_flush(foo); fclose(foo); /* Tests a line with a space occurring in the NEXT-TO-LAST column plus another line */ @@ -182,6 +199,7 @@ int main()/*{{{*/ err = fortprintf(foo, "123456789 1234567 0"); err += fortprintf(foo, "1234\n"); print_result(5, err); + fortprint_flush(foo); fclose(foo); /* Tests a line with a space occurring in the LAST column plus another line */ @@ -189,6 +207,7 @@ int main()/*{{{*/ err = fortprintf(foo, "123456789 12345678 "); err += fortprintf(foo, "1234\n"); print_result(6, err); + fortprint_flush(foo); fclose(foo); /* Tests a line with the first space occurring in the NEXT-TO-LAST column plus another line */ @@ -196,6 +215,7 @@ int main()/*{{{*/ err = fortprintf(foo, "12345678901234567 0"); err += fortprintf(foo, "1234\n"); print_result(7, err); + fortprint_flush(foo); fclose(foo); /* Tests a line with the first space occurring in the LAST column plus another line */ @@ -203,6 +223,7 @@ int main()/*{{{*/ err = fortprintf(foo, "123456789012345678 "); err += fortprintf(foo, "1234\n"); print_result(8, err); + fortprint_flush(foo); fclose(foo); /*** Test lines that are exactly 20 chars long ***/ @@ -211,12 +232,14 @@ int main()/*{{{*/ foo = fopen("test09.inc","w"); err = fortprintf(foo, "123456789 123456789\n"); print_result(9, err); + fortprint_flush(foo); fclose(foo); /* Tests the case where we write a newline at the last column with NO chances to break the line earlier */ foo = fopen("test10.inc","w"); err = fortprintf(foo, "1234567890123456789\n"); print_result(10, err); + fortprint_flush(foo); fclose(foo); /* Tests a line with a space occurring in the NEXT-TO-LAST column plus another line */ @@ -224,6 +247,7 @@ int main()/*{{{*/ err = fortprintf(foo, "123456789 12345678 0"); err += fortprintf(foo, "1234\n"); print_result(11, err); + fortprint_flush(foo); fclose(foo); /* Tests a line with a space occurring in the LAST column plus another line */ @@ -231,6 +255,7 @@ int main()/*{{{*/ err = fortprintf(foo, "123456789 123456780 "); err += fortprintf(foo, "1234\n"); print_result(12, err); + fortprint_flush(foo); fclose(foo); /* Tests a line with the first space occurring in the NEXT-TO-LAST column plus another line */ @@ -238,6 +263,7 @@ int main()/*{{{*/ err = fortprintf(foo, "123456789012345678 0"); err += fortprintf(foo, "1234\n"); print_result(13, err); + fortprint_flush(foo); fclose(foo); /* Tests a line with the first space occurring in the LAST column plus another line */ @@ -245,6 +271,7 @@ int main()/*{{{*/ err = fortprintf(foo, "1234567890123456780 "); err += fortprintf(foo, "1234\n"); print_result(14, err); + fortprint_flush(foo); fclose(foo); /*** Test lines that are more than 21 chars long ***/ @@ -253,12 +280,14 @@ int main()/*{{{*/ foo = fopen("test15.inc","w"); err = fortprintf(foo, "123456789 1234567890\n"); print_result(15, err); + fortprint_flush(foo); fclose(foo); /* Tests the case where we write a newline at the last column with NO chances to break the line earlier */ foo = fopen("test16.inc","w"); err = fortprintf(foo, "1234567890123456789\n"); print_result(16, err); + fortprint_flush(foo); fclose(foo); /* Tests a line with a space occurring in the NEXT-TO-LAST column plus another line */ @@ -266,6 +295,7 @@ int main()/*{{{*/ err = fortprintf(foo, "123456789 123456789 0"); err = fortprintf(foo, "1234\n"); print_result(17, err); + fortprint_flush(foo); fclose(foo); /* Tests a line with a space occurring in the LAST column plus another line */ @@ -273,6 +303,7 @@ int main()/*{{{*/ err = fortprintf(foo, "123456789 1234567890 "); err += fortprintf(foo, "1234\n"); print_result(18, err); + fortprint_flush(foo); fclose(foo); /* Tests a line with the first space occurring in the NEXT-TO-LAST column plus another line */ @@ -280,6 +311,7 @@ int main()/*{{{*/ err = fortprintf(foo, "1234567890123456789 0"); err += fortprintf(foo, "1234\n"); print_result(19, err); + fortprint_flush(foo); fclose(foo); /* Tests a line with the first space occurring in the LAST column plus another line */ @@ -287,8 +319,31 @@ int main()/*{{{*/ err = fortprintf(foo, "12345678901234567890 "); err += fortprintf(foo, "1234\n"); print_result(20, err); + fortprint_flush(foo); + fclose(foo); + + /* Tests a line with single quotes */ + foo = fopen("test21.inc", "w"); + err = fortprintf(foo, "\'1234567890\'"); + print_result(21, err); + fortprint_flush(foo); fclose(foo); + /* Tests a line with double quotes */ + foo = fopen("test22.inc", "w"); + err = fortprintf(foo, "\'1234\'\'56789\'"); + print_result(22, err); + fortprint_flush(foo); + fclose(foo); + + /* Tests a line with double quotes and a line break after the double quotes */ + foo = fopen("test22.inc", "w"); + err = fortprintf(foo, "\'1234567\'\'890 1234567890\'"); + print_result(22, err); + fortprint_flush(foo); + fclose(foo); + + return 0; }/*}}}*/ #endif diff --git a/src/tools/registry/gen_inc.c b/src/tools/registry/gen_inc.c index 0c82cad2d5..02acdd22e1 100644 --- a/src/tools/registry/gen_inc.c +++ b/src/tools/registry/gen_inc.c @@ -424,7 +424,7 @@ int build_dimension_information(ezxml_t registry, ezxml_t var, int *ndims, int * }/*}}}*/ -int get_field_information(const char *vartype, const char *varval, char *default_value, int *type){/*{{{*/ +int get_field_information(const char *vartype, const char *varval, char *default_value, const char *varmissval, char *missing_value, int *type){/*{{{*/ if (strcmp(vartype, "real") == 0){ (*type) = REAL; if(!varval){ @@ -448,6 +448,29 @@ int get_field_information(const char *vartype, const char *varval, char *default } } + if (strcmp(vartype, "real") == 0){ + (*type) = REAL; + if(!varmissval || strcmp(varmissval, "FILLVAL") == 0){ + snprintf(missing_value, 1024, "MPAS_REAL_FILLVAL"); + } else { + snprintf(missing_value, 1024, "%s", varmissval); + } + } else if (strcmp(vartype, "integer") == 0){ + (*type) = INTEGER; + if(!varmissval || strcmp(varmissval, "FILLVAL") == 0){ + snprintf(missing_value, 1024, "MPAS_INT_FILLVAL"); + } else { + snprintf(missing_value, 1024, "%s", varmissval); + } + } else if (strcmp(vartype, "text") == 0){ + (*type) = CHARACTER; + if(!varmissval || strcmp(varmissval, "FILLVAL") == 0){ + snprintf(missing_value, 1024, "MPAS_CHAR_FILLVAL"); + } else { + snprintf(missing_value, 1024, "'%s'", varmissval); + } + } + return 0; }/*}}}*/ @@ -470,6 +493,7 @@ int parse_packages_from_registry(ezxml_t registry)/*{{{*/ fortprintf(fd, " use mpas_derived_types\n"); fortprintf(fd, " use mpas_pool_routines\n"); fortprintf(fd, " use mpas_io_units\n"); + fortprintf(fd, " implicit none\n"); fortprintf(fd, " type (mpas_pool_type), intent(inout) :: packagePool !< Input: MPAS Pool for containing package logicals.\n\n"); fortprintf(fd, " integer :: iErr\n"); fortprintf(fd, "\n"); @@ -519,6 +543,8 @@ int parse_namelist_records_from_registry(ezxml_t registry)/*{{{*/ fortprintf(fd2, " use mpas_derived_types\n"); fortprintf(fd2, " use mpas_pool_routines\n"); fortprintf(fd2, " use mpas_io_units\n"); + fortprintf(fd2, " use mpas_abort, only : mpas_dmpar_global_abort\n"); + fortprintf(fd2, " implicit none\n"); fortprintf(fd2, " type (mpas_pool_type), intent(inout) :: configPool\n"); fortprintf(fd2, " character (len=*), intent(in) :: namelistFilename\n"); fortprintf(fd2, " type (dm_info), intent(in) :: dminfo\n"); @@ -567,10 +593,12 @@ int parse_namelist_records_from_registry(ezxml_t registry)/*{{{*/ fortprintf(fd, " use mpas_dmpar\n"); fortprintf(fd, " use mpas_pool_routines\n"); fortprintf(fd, " use mpas_io_units\n"); + fortprintf(fd, " implicit none\n"); fortprintf(fd, " type (mpas_pool_type), intent(inout) :: configPool\n"); fortprintf(fd, " integer, intent(in) :: unitNumber\n"); fortprintf(fd, " type (dm_info), intent(in) :: dminfo\n"); fortprintf(fd, " type (mpas_pool_type), pointer :: recordPool\n"); + fortprintf(fd, " integer :: ierr\n"); fortprintf(fd, "\n"); // Define variable defintions prior to reading the namelist in. @@ -584,7 +612,7 @@ int parse_namelist_records_from_registry(ezxml_t registry)/*{{{*/ nmloptindef = ezxml_attr(nmlopt_xml, "in_defaults"); if(strncmp(nmlopttype, "real", 1024) == 0){ - fortprintf(fd, " real (kind=RKIND) :: %s = %lf\n", nmloptname, (float)atof(nmloptval)); + fortprintf(fd, " real (kind=RKIND) :: %s = %lf\n", nmloptname, (double)atof(nmloptval)); } else if(strncmp(nmlopttype, "integer", 1024) == 0){ fortprintf(fd, " integer :: %s = %d\n", nmloptname, atoi(nmloptval)); } else if(strncmp(nmlopttype, "logical", 1024) == 0){ @@ -621,12 +649,6 @@ int parse_namelist_records_from_registry(ezxml_t registry)/*{{{*/ fortprintf(fd, " if (dminfo %% my_proc_id == IO_NODE) then\n"); fortprintf(fd, " rewind(unitNumber)\n"); fortprintf(fd, " read(unitNumber, %s, iostat=ierr)\n", nmlrecname); - fortprintf(fd, " if (ierr > 0) then\n"); - fortprintf(fd, " write(stderrUnit, *) 'Error while reading namelist record %s.'\n", nmlrecname); - fortprintf(fd, " call mpas_dmpar_abort(dminfo)\n"); - fortprintf(fd, " else if (ierr < 0) then\n"); - fortprintf(fd, " write(stderrUnit,*) 'Namelist record %s not found; using default values for variables in this namelist'\n", nmlrecname); - fortprintf(fd, " end if\n"); fortprintf(fd, " end if\n"); // Broadcast ierr, to check if a broadcast should happen for the options (if namelist was read in) @@ -634,7 +656,7 @@ int parse_namelist_records_from_registry(ezxml_t registry)/*{{{*/ fortprintf(fd, "\n"); // Define broadcast calls for namelist values. - fortprintf(fd, " if (ierr == 0) then\n"); + fortprintf(fd, " if (ierr <= 0) then\n"); for (nmlopt_xml = ezxml_child(nmlrecs_xml, "nml_option"); nmlopt_xml; nmlopt_xml = nmlopt_xml->next){ nmloptname = ezxml_attr(nmlopt_xml, "name"); nmlopttype = ezxml_attr(nmlopt_xml, "type"); @@ -649,6 +671,25 @@ int parse_namelist_records_from_registry(ezxml_t registry)/*{{{*/ fortprintf(fd, " call mpas_dmpar_bcast_char(dminfo, %s)\n", nmloptname); } } + fortprintf(fd, " if (ierr < 0) then\n"); + fortprintf(fd, " write(stderrUnit,*) '*** Encountered an issue while attempting to read namelist record %s'\n", nmlrecname); + fortprintf(fd, " write(stderrUnit,*) ' The following values will be used for variables in this record:'\n"); + fortprintf(fd, " write(stderrUnit,*) ' '\n"); + for (nmlopt_xml = ezxml_child(nmlrecs_xml, "nml_option"); nmlopt_xml; nmlopt_xml = nmlopt_xml->next){ + nmloptname = ezxml_attr(nmlopt_xml, "name"); + nmlopttype = ezxml_attr(nmlopt_xml, "type"); + + if(strncmp(nmlopttype, "character", 1024) == 0){ + fortprintf(fd, " write(stderrUnit,*) ' %s = ', trim(%s)\n", nmloptname, nmloptname); + } else { + fortprintf(fd, " write(stderrUnit,*) ' %s = ', %s\n", nmloptname, nmloptname); + } + } + fortprintf(fd, " write(stderrUnit,*) ' '\n"); + fortprintf(fd, " end if\n"); + fortprintf(fd, " else if (ierr > 0) then\n"); + fortprintf(fd, " write(stderrUnit, *) 'Error while reading namelist record %s.'\n", nmlrecname); + fortprintf(fd, " call mpas_dmpar_abort(dminfo)\n"); fortprintf(fd, " end if\n"); fortprintf(fd, "\n"); @@ -700,6 +741,8 @@ int parse_dimensions_from_registry(ezxml_t registry)/*{{{*/ fortprintf(fd, " use mpas_pool_routines\n"); fortprintf(fd, " use mpas_io_units\n"); fortprintf(fd, "\n"); + fortprintf(fd, " implicit none\n"); + fortprintf(fd, "\n"); fortprintf(fd, " type (mpas_pool_type), intent(inout) :: readDimensions !< Input: Pool to pull read dimensions from\n"); fortprintf(fd, " type (mpas_pool_type), intent(inout) :: configPool !< Input: Pool containing namelist options with configs\n"); fortprintf(fd, " type (mpas_pool_type), intent(inout) :: dimensionPool !< Input/Output: Pool to add dimensions into\n"); @@ -846,12 +889,15 @@ int parse_dimensions_from_registry(ezxml_t registry)/*{{{*/ fortprintf(fd, "\n\n"); - fortprintf(fd, " subroutine mpas_setup%sdecomposed_dimensions(block, manager, readDimensions, dimensionPool, totalBlocks)\n", core_string); + fortprintf(fd, " function %s_setup_decomposed_dimensions(block, manager, readDimensions, dimensionPool, totalBlocks) result(iErr)\n", core_string); fortprintf(fd, "\n"); fortprintf(fd, " use mpas_derived_types\n"); fortprintf(fd, " use mpas_decomp\n"); fortprintf(fd, " use mpas_pool_routines\n"); fortprintf(fd, " use mpas_io_units\n"); + fortprintf(fd, " use mpas_abort, only : mpas_dmpar_global_abort\n"); + fortprintf(fd, "\n"); + fortprintf(fd, " implicit none\n"); fortprintf(fd, "\n"); fortprintf(fd, " type (block_type), intent(inout) :: block !< Input: Pointer to block\n"); fortprintf(fd, " type (mpas_streamManager_type), intent(inout) :: manager !< Input: Stream manager\n"); @@ -877,7 +923,8 @@ int parse_dimensions_from_registry(ezxml_t registry)/*{{{*/ } fortprintf(fd, "\n"); - fortprintf(fd, "write(stderrUnit,\'(a)\') \'Processing decomposed dimensions ...\'\n\n"); + fortprintf(fd, " iErr = 0\n"); + fortprintf(fd, " write(stderrUnit,\'(a)\') \'Processing decomposed dimensions ...\'\n\n"); /* Retrieve dimension integers */ for (dims_xml = ezxml_child(registry, "dims"); dims_xml; dims_xml = dims_xml->next) { @@ -916,13 +963,13 @@ int parse_dimensions_from_registry(ezxml_t registry)/*{{{*/ } } - fortprintf(fd, "write(stderrUnit,*) \' '\n"); - fortprintf(fd, "write(stderrUnit,\'(a)\') \' ----- done processing decomposed dimensions -----\'\n"); - fortprintf(fd, "write(stderrUnit,*) \' '\n"); - fortprintf(fd, "write(stderrUnit,*) \' '\n"); + fortprintf(fd, " write(stderrUnit,*) \' '\n"); + fortprintf(fd, " write(stderrUnit,\'(a)\') \' ----- done processing decomposed dimensions -----\'\n"); + fortprintf(fd, " write(stderrUnit,*) \' '\n"); + fortprintf(fd, " write(stderrUnit,*) \' '\n"); fortprintf(fd, "\n"); - fortprintf(fd, " end subroutine mpas_setup%sdecomposed_dimensions\n", core_string); + fortprintf(fd, " end function %s_setup_decomposed_dimensions\n", core_string); fclose(fd); @@ -938,7 +985,7 @@ int parse_var_array(FILE *fd, ezxml_t registry, ezxml_t superStruct, ezxml_t var const char *structname, *structlevs, *structpackages; const char *substructname; - const char *vararrname, *vararrtype, *vararrdims, *vararrpersistence, *vararrdefaultval, *vararrpackages; + const char *vararrname, *vararrtype, *vararrdims, *vararrpersistence, *vararrdefaultval, *vararrpackages, *vararrmissingval; const char *varname, *varpersistence, *vartype, *vardims, *varunits, *vardesc, *vararrgroup, *varstreams, *vardefaultval, *varpackages; const char *varname2, *vararrgroup2, *vararrname_in_code; const char *varname_in_code; @@ -957,9 +1004,11 @@ int parse_var_array(FILE *fd, ezxml_t registry, ezxml_t superStruct, ezxml_t var int ndims, type, hasTime, decomp, in_stream; int persistence; char *string, *tofree, *token; + char temp_str[1024]; char pointer_name[1024]; char spacing[1024], sub_spacing[1024]; char default_value[1024]; + char missing_value[1024]; structname = ezxml_attr(superStruct, "name"); @@ -972,6 +1021,7 @@ int parse_var_array(FILE *fd, ezxml_t registry, ezxml_t superStruct, ezxml_t var vararrdims = ezxml_attr(var_arr_xml, "dimensions"); vararrpersistence = ezxml_attr(var_arr_xml, "persistence"); vararrdefaultval = ezxml_attr(var_arr_xml, "default_value"); + vararrmissingval = ezxml_attr(var_arr_xml, "missing_value"); vararrpackages = ezxml_attr(var_arr_xml, "packages"); vararrtimelevs = ezxml_attr(var_arr_xml, "time_levs"); vararrname_in_code = ezxml_attr(var_arr_xml, "name_in_code"); @@ -1002,7 +1052,7 @@ int parse_var_array(FILE *fd, ezxml_t registry, ezxml_t superStruct, ezxml_t var snprintf(spacing, 1024, " "); // Determine field type and default value. - get_field_information(vararrtype, vararrdefaultval, default_value, &type); + get_field_information(vararrtype, vararrdefaultval, default_value, vararrmissingval, missing_value, &type); // Determine ndims, hasTime, and decomp type build_dimension_information(registry, var_arr_xml, &ndims, &hasTime, &decomp); @@ -1219,7 +1269,7 @@ int parse_var_array(FILE *fd, ezxml_t registry, ezxml_t superStruct, ezxml_t var fortprintf(fd, " if (associated(newSubPool)) then\n"); fortprintf(fd, " call mpas_pool_get_dimension(newSubPool, 'index_%s', const_index)\n", varname_in_code); fortprintf(fd, " end if\n"); - fortprintf(fd, " if (index_counter > 0) then\n", spacing); + fortprintf(fd, " if (const_index > 0) then\n", spacing); fortprintf(fd, " %s(%d) %% constituentNames(const_index) = '%s'\n", pointer_name, time_lev, varname); fortprintf(fd, " end if\n", spacing); } @@ -1268,6 +1318,74 @@ int parse_var_array(FILE *fd, ezxml_t registry, ezxml_t superStruct, ezxml_t var fortprintf(fd, " nullify(%s(%d) %% sendList)\n", pointer_name, time_lev); fortprintf(fd, " nullify(%s(%d) %% recvList)\n", pointer_name, time_lev); fortprintf(fd, " nullify(%s(%d) %% copyList)\n", pointer_name, time_lev); + fortprintf(fd, " allocate(%s(%d) %% attLists(size(%s(%d) %% constituentNames, dim=1)))\n", pointer_name, time_lev, pointer_name, time_lev); + + fortprintf(fd, " do index_counter = 1, size(%s(%d) %% constituentNames, dim=1)\n", pointer_name, time_lev); + fortprintf(fd, " allocate(%s(%d) %% attLists(index_counter) %% attList)\n", pointer_name, time_lev); + fortprintf(fd, " %s(%d) %% attLists(index_counter) %% attList %% attName = ''\n", pointer_name, time_lev); + fortprintf(fd, " %s(%d) %% attLists(index_counter) %% attList %% attType = -1\n", pointer_name, time_lev); + fortprintf(fd, " nullify(%s(%d) %% attLists(index_counter) %% attList %% next)\n", pointer_name, time_lev); + fortprintf(fd, " nullify(%s(%d) %% attLists(index_counter) %% attList %% attValueIntA)\n", pointer_name, time_lev); + fortprintf(fd, " nullify(%s(%d) %% attLists(index_counter) %% attList %% attValueRealA)\n", pointer_name, time_lev); + fortprintf(fd, " end do\n"); + + for(var_xml = ezxml_child(var_arr_xml, "var"); var_xml; var_xml = var_xml->next){ + varname = ezxml_attr(var_xml, "name"); + varname_in_code = ezxml_attr(var_xml, "name_in_code"); + vardesc = ezxml_attr(var_xml, "description"); + varunits = ezxml_attr(var_xml, "units"); + + if(!varname_in_code){ + varname_in_code = ezxml_attr(var_xml, "name"); + } + + fortprintf(fd, " if (associated(newSubPool)) then\n"); + fortprintf(fd, " call mpas_pool_get_dimension(newSubPool, 'index_%s', const_index)\n", varname_in_code); + fortprintf(fd, " end if\n"); + fortprintf(fd, " if (const_index > 0) then\n", spacing); + if ( vardesc != NULL ) { + string = strdup(vardesc); + tofree = string; + + token = strsep(&string, "'"); + sprintf(temp_str, "%s", token); + + while ( ( token = strsep(&string, "'") ) != NULL ) { + sprintf(temp_str, "%s''%s", temp_str, token); + } + + free(tofree); + + fortprintf(fd, " call mpas_add_att(%s(%d) %% attLists(const_index) %% attList, 'long_name', '%s')\n", pointer_name, time_lev, temp_str); + } + + if ( varunits != NULL ) { + string = strdup(varunits); + tofree = string; + + token = strsep(&string, "'"); + sprintf(temp_str, "%s", token); + + while ( ( token = strsep(&string, "'") ) != NULL ) { + sprintf(temp_str, "%s''%s", temp_str, token); + } + + free(tofree); + + fortprintf(fd, " call mpas_add_att(%s(%d) %% attLists(const_index) %% attList, 'units', '%s')\n", pointer_name, time_lev, temp_str); + } + + if ( vararrmissingval ) { + fortprintf(fd, " call mpas_add_att(%s(%d) %% attLists(const_index) %% attList, 'missing_value', %s)\n", pointer_name, time_lev, missing_value); + // Uncomment to add _FillValue to match missing_value + // fortprintf(fd, " call mpas_add_att(%s(%d) %% attLists(const_index) %% attList, '_FillValue', %s)\n", pointer_name, time_lev, missing_value); + } + fortprintf(fd, " %s(%d) %% missingValue = %s\n", pointer_name, time_lev, missing_value); + fortprintf(fd, " %s(%d) %% constituentNames(const_index) = '%s'\n", pointer_name, time_lev, varname); + fortprintf(fd, " end if\n", spacing); + } + + fortprintf(fd, " %s(%d) %% block => block\n", pointer_name, time_lev); } @@ -1291,12 +1409,11 @@ int parse_var_array(FILE *fd, ezxml_t registry, ezxml_t superStruct, ezxml_t var for(time_lev = 1; time_lev <= time_levs; time_lev++){ fortprintf(fd, " %s%s(%d) %% isActive = .true.\n", spacing, pointer_name, time_lev); } - fortprintf(fd, " %scall mpas_pool_add_field(newSubPool, '%s', %s)\n", spacing, vararrname_in_code, pointer_name); if (!no_packages) { fortprintf(fd, " end if\n"); } - + fortprintf(fd, " call mpas_pool_add_field(newSubPool, '%s', %s)\n", vararrname_in_code, pointer_name); fortprintf(fd, " call mpas_pool_add_field(block %% allFields, '%s', %s)\n", vararrname, pointer_name); fortprintf(fd, "\n"); @@ -1313,7 +1430,7 @@ int parse_var(FILE *fd, ezxml_t registry, ezxml_t superStruct, ezxml_t currentVa const char *structtimelevs, *vartimelevs; const char *structname, *structlevs, *structpackages; const char *substructname; - const char *varname, *varpersistence, *vartype, *vardims, *varunits, *vardesc, *vararrgroup, *varstreams, *vardefaultval, *varpackages; + const char *varname, *varpersistence, *vartype, *vardims, *varunits, *vardesc, *vararrgroup, *varstreams, *vardefaultval, *varpackages, *varmissingval; const char *varname2, *vararrgroup2; const char *varname_in_code; const char *streamname, *streamname2; @@ -1327,9 +1444,11 @@ int parse_var(FILE *fd, ezxml_t registry, ezxml_t superStruct, ezxml_t currentVa int ndims, type, hasTime, decomp, in_stream; int persistence; char *string, *tofree, *token; + char temp_str[1024]; char pointer_name[1024]; char package_spacing[1024]; char default_value[1024]; + char missing_value[1024]; var_xml = currentVar; @@ -1345,6 +1464,9 @@ int parse_var(FILE *fd, ezxml_t registry, ezxml_t superStruct, ezxml_t currentVa vardefaultval = ezxml_attr(var_xml, "default_value"); vartimelevs = ezxml_attr(var_xml, "time_levs"); varname_in_code = ezxml_attr(var_xml, "name_in_code"); + varunits = ezxml_attr(var_xml, "units"); + vardesc = ezxml_attr(var_xml, "description"); + varmissingval = ezxml_attr(var_xml, "missing_value"); if(!varname_in_code){ varname_in_code = ezxml_attr(var_xml, "name"); @@ -1367,9 +1489,8 @@ int parse_var(FILE *fd, ezxml_t registry, ezxml_t superStruct, ezxml_t currentVa fortprintf(fd, "! Define variable %s\n", varname); - // Determine field type and default value. - get_field_information(vartype, vardefaultval, default_value, &type); + get_field_information(vartype, vardefaultval, default_value, varmissingval, missing_value, &type); // Determine ndims, hasTime, and decomp type build_dimension_information(registry, var_xml, &ndims, &hasTime, &decomp); @@ -1424,8 +1545,9 @@ int parse_var(FILE *fd, ezxml_t registry, ezxml_t superStruct, ezxml_t currentVa free(tofree); } + fortprintf(fd, " %s(%d) %% defaultValue = %s\n", pointer_name, time_lev, default_value); + fortprintf(fd, " %s(%d) %% defaultValue = %s\n", pointer_name, time_lev, default_value); if ( ndims > 0 ) { - fortprintf(fd, " %s(%d) %% defaultValue = %s\n", pointer_name, time_lev, default_value); fortprintf(fd, " nullify(%s(%d) %% array)\n", pointer_name, time_lev); } else { fortprintf(fd, " %s(%d) %% scalar = %s\n", pointer_name, time_lev, default_value); @@ -1435,7 +1557,55 @@ int parse_var(FILE *fd, ezxml_t registry, ezxml_t superStruct, ezxml_t currentVa fortprintf(fd, " nullify(%s(%d) %% sendList)\n", pointer_name, time_lev); fortprintf(fd, " nullify(%s(%d) %% recvList)\n", pointer_name, time_lev); fortprintf(fd, " nullify(%s(%d) %% copyList)\n", pointer_name, time_lev); + fortprintf(fd, " allocate(%s(%d) %% attLists(1))\n", pointer_name, time_lev); + fortprintf(fd, " allocate(%s(%d) %% attLists(1) %% attList)\n", pointer_name, time_lev); + fortprintf(fd, " %s(%d) %% attLists(1) %% attList %% attName = ''\n", pointer_name, time_lev); + fortprintf(fd, " %s(%d) %% attLists(1) %% attList %% attType = -1\n", pointer_name, time_lev); + fortprintf(fd, " nullify(%s(%d) %% attLists(1) %% attList %% next)\n", pointer_name, time_lev); + fortprintf(fd, " nullify(%s(%d) %% attLists(1) %% attList %% attValueIntA)\n", pointer_name, time_lev); + fortprintf(fd, " nullify(%s(%d) %% attLists(1) %% attList %% attValueRealA)\n", pointer_name, time_lev); + + if ( varunits != NULL ) { + string = strdup(varunits); + tofree = string; + token = strsep(&string, "'"); + + sprintf(temp_str, "%s", token); + + while ( ( token = strsep(&string, "'") ) != NULL ) { + sprintf(temp_str, "%s''%s", temp_str, token); + } + + free(tofree); + + fortprintf(fd, " call mpas_add_att(%s(%d) %% attLists(1) %% attList, 'units', '%s')\n", pointer_name, time_lev, temp_str); + } + + if ( vardesc != NULL ) { + string = strdup(vardesc); + tofree = string; + token = strsep(&string, "'"); + + sprintf(temp_str, "%s", token); + + while ( ( token = strsep(&string, "'") ) != NULL ) { + sprintf(temp_str, "%s''%s", temp_str, token); + } + + free(tofree); + + fortprintf(fd, " call mpas_add_att(%s(%d) %% attLists(1) %% attList, 'long_name', '%s')\n", pointer_name, time_lev, temp_str); + } + + if ( varmissingval != NULL ) { + fortprintf(fd, " call mpas_add_att(%s(%d) %% attLists(1) %% attList, 'missing_value', %s)\n", pointer_name, time_lev, missing_value); + // Uncomment to add _FillValue to match missing_value + // fortprintf(fd, " call mpas_add_att(%s(%d) %% attLists(1) %% attList, '_FillValue', %s)\n", pointer_name, time_lev, missing_value); + } + fortprintf(fd, " %s(%d) %% missingValue = %s\n", pointer_name, time_lev, missing_value); + fortprintf(fd, " %s(%d) %% block => block\n", pointer_name, time_lev); + } // Parse packages if they are defined @@ -1459,12 +1629,12 @@ int parse_var(FILE *fd, ezxml_t registry, ezxml_t superStruct, ezxml_t currentVa for(time_lev = 1; time_lev <= time_levs; time_lev++){ fortprintf(fd, " %s%s(%d) %% isActive = .true.\n", package_spacing, pointer_name, time_lev); } - fortprintf(fd, " %scall mpas_pool_add_field(newSubPool, '%s', %s)\n", package_spacing, varname_in_code, pointer_name); if(varpackages != NULL){ fortprintf(fd, " end if\n"); } + fortprintf(fd, " call mpas_pool_add_field(newSubPool, '%s', %s)\n" , varname_in_code, pointer_name); fortprintf(fd, " call mpas_pool_add_field(block %% allFields, '%s', %s)\n", varname, pointer_name); fortprintf(fd, "\n"); @@ -1520,6 +1690,8 @@ int parse_struct(FILE *fd, ezxml_t registry, ezxml_t superStruct, int subpool, c fortprintf(fd, " use mpas_derived_types\n"); fortprintf(fd, " use mpas_pool_routines\n"); fortprintf(fd, " use mpas_io_units\n"); + fortprintf(fd, " use mpas_io, only : MPAS_REAL_FILLVAL, MPAS_INT_FILLVAL, MPAS_CHAR_FILLVAL\n"); + fortprintf(fd, " implicit none\n"); fortprintf(fd, " type (block_type), intent(inout), pointer :: block\n"); fortprintf(fd, " type (mpas_pool_type), intent(inout) :: structPool\n"); fortprintf(fd, " type (mpas_pool_type), intent(inout) :: dimensionPool\n"); @@ -1565,35 +1737,11 @@ int parse_struct(FILE *fd, ezxml_t registry, ezxml_t superStruct, int subpool, c fortprintf(fd, "\n"); - // Parse packages if they are defined - package_list[0] = '\0'; - no_packages = build_struct_package_lists(superStruct, package_list); - - spacing[0] = '\0'; - if(!no_packages){ - fortprintf(fd, " if ("); - string = strdup(package_list); - tofree = string; - token = strsep(&string, ";"); - fortprintf(fd, "%sActive", token); - - while( (token = strsep(&string, ";")) != NULL){ - fortprintf(fd, " .or. %sActive", token); - } - - fortprintf(fd, ") then\n"); - sprintf(spacing, " "); - } - // Setup new pool to be added into structPool - fortprintf(fd, " %sallocate(newSubPool)\n", spacing); - fortprintf(fd, " %scall mpas_pool_create_pool(newSubPool)\n", spacing); - fortprintf(fd, " %scall mpas_pool_add_subpool(structPool, '%s', newSubPool)\n", spacing, structnameincode); - fortprintf(fd, " %scall mpas_pool_add_subpool(block %% allStructs, '%s', newSubPool)\n", spacing, structname); - - if(!no_packages){ - fortprintf(fd, " end if\n"); - } + fortprintf(fd, " allocate(newSubPool)\n"); + fortprintf(fd, " call mpas_pool_create_pool(newSubPool)\n"); + fortprintf(fd, " call mpas_pool_add_subpool(structPool, '%s', newSubPool)\n", structnameincode); + fortprintf(fd, " call mpas_pool_add_subpool(block %% allStructs, '%s', newSubPool)\n", structname); fortprintf(fd, "\n"); @@ -1621,6 +1769,9 @@ int parse_struct(FILE *fd, ezxml_t registry, ezxml_t superStruct, int subpool, c fortprintf(fd, " if (associated(newSubPool)) then\n"); fortprintf(fd, " call mpas_pool_add_config(newSubPool, 'on_a_sphere', block %% domain %% on_a_sphere)\n"); fortprintf(fd, " call mpas_pool_add_config(newSubPool, 'sphere_radius', block %% domain %% sphere_radius)\n"); + fortprintf(fd, " call mpas_pool_add_config(newSubPool, 'is_periodic', block %% domain %% is_periodic)\n"); + fortprintf(fd, " call mpas_pool_add_config(newSubPool, 'x_period', block %% domain %% x_period)\n"); + fortprintf(fd, " call mpas_pool_add_config(newSubPool, 'y_period', block %% domain %% y_period)\n"); fortprintf(fd, " end if\n"); fortprintf(fd, "\n"); @@ -1655,7 +1806,7 @@ int generate_struct_links(FILE *fd, int curLevel, ezxml_t superStruct, ezxml_t r const char *structname; const char *vartimelevs; const char *varname, *vardims, *vartype; - const char *vardefaultval; + const char *vardefaultval, *varmissingval; const char *varname_in_code; int depth; int err; @@ -1666,6 +1817,7 @@ int generate_struct_links(FILE *fd, int curLevel, ezxml_t superStruct, ezxml_t r char *string, *tofree, *token; char pointer_name[1024]; char default_value[1024]; + char missing_value[1024]; depth = curLevel + 1; @@ -1708,6 +1860,7 @@ int generate_struct_links(FILE *fd, int curLevel, ezxml_t superStruct, ezxml_t r vartimelevs = ezxml_attr(var_arr_xml, "time_levs"); vartype = ezxml_attr(var_arr_xml, "type"); vardefaultval = ezxml_attr(var_arr_xml, "default_value"); + varmissingval = ezxml_attr(var_arr_xml, "missing_value"); if(!vartimelevs){ vartimelevs = ezxml_attr(subStruct, "time_levs"); @@ -1722,8 +1875,12 @@ int generate_struct_links(FILE *fd, int curLevel, ezxml_t superStruct, ezxml_t r time_levs = 1; } + if(!varmissingval){ + varmissingval = vardefaultval; + } + // Determine field type and default value. - get_field_information(vartype, vardefaultval, default_value, &type); + get_field_information(vartype, vardefaultval, default_value, varmissingval, missing_value, &type); // Determine number of dimensions // and decomp type @@ -1774,6 +1931,7 @@ int generate_struct_links(FILE *fd, int curLevel, ezxml_t superStruct, ezxml_t r vartimelevs = ezxml_attr(var_xml, "time_levs"); vartype = ezxml_attr(var_xml, "type"); vardefaultval = ezxml_attr(var_xml, "default_value"); + varmissingval = ezxml_attr(var_xml, "missing_value"); varname_in_code = ezxml_attr(var_xml, "name_in_code"); if(!vartimelevs){ @@ -1793,8 +1951,12 @@ int generate_struct_links(FILE *fd, int curLevel, ezxml_t superStruct, ezxml_t r varname_in_code = ezxml_attr(var_xml, "name"); } + if(!varmissingval){ + varmissingval = vardefaultval; + } + // Determine field type and default value. - get_field_information(vartype, vardefaultval, default_value, &type); + get_field_information(vartype, vardefaultval, default_value, varmissingval, missing_value, &type); // Determine number of dimensions // and decomp type @@ -2388,6 +2550,7 @@ int parse_structs_from_registry(ezxml_t registry)/*{{{*/ fortprintf(fd, " subroutine %s_generate_structs(block, structPool, dimensionPool, packagePool)\n", core_string); fortprintf(fd, " use mpas_derived_types\n"); fortprintf(fd, " use mpas_io_units\n"); + fortprintf(fd, " implicit none\n"); fortprintf(fd, " type (block_type), pointer, intent(inout) :: block\n"); fortprintf(fd, " type (mpas_pool_type), intent(inout) :: structPool\n"); fortprintf(fd, " type (mpas_pool_type), intent(inout) :: dimensionPool\n"); diff --git a/src/tools/registry/gen_inc.h b/src/tools/registry/gen_inc.h index 9db4686c45..765dc0a230 100644 --- a/src/tools/registry/gen_inc.h +++ b/src/tools/registry/gen_inc.h @@ -17,7 +17,7 @@ int add_package_to_list(const char * package, const char * package_list); int build_struct_package_lists(ezxml_t currentPosition, char * out_packages); int get_dimension_information(ezxml_t registry, const char *test_dimname, int *has_time, int *decomp); int build_dimension_information(ezxml_t registry, ezxml_t var, int *ndims, int *has_time, int *decomp); -int get_field_information(const char *vartype, const char *varval, char *default_value, int *type); +int get_field_information(const char *vartype, const char *varval, char *default_value, const char *varmissval, char *missing_value, int *type); int write_set_field_pointer(FILE *fd, const char *spacing, const char *iterator_name, const char *pool_name); void write_default_namelist(ezxml_t registry); int parse_packages_from_registry(ezxml_t registry); diff --git a/test_cases/ocean/.gitignore b/test_cases/ocean/.gitignore new file mode 100644 index 0000000000..32b011dc6b --- /dev/null +++ b/test_cases/ocean/.gitignore @@ -0,0 +1,2 @@ +command_history +local.config diff --git a/test_cases/ocean/README b/test_cases/ocean/README new file mode 100644 index 0000000000..fe0a4db00b --- /dev/null +++ b/test_cases/ocean/README @@ -0,0 +1,41 @@ +Author: Doug Jacobsen +Date: 10/01/2015 + +This directory provides a simple test case workflow definition capability. It +is intended to house a minimal number of files which can describe the steps to +setup and configure a test case. + +It provides four utility python scripts: + * clean_testcase.py + * list_testcases.py + * setup_testcase.py + * manage_regression_suite.py + +and two configuration file templates: + * general.config.test + * general.config.ocean + +Each of the python scripts can be run with a -h argument only to get usage information. + +Additionally, each core has a directory at the top level (e.g. ocean for the +ocean test cases). There is also a templates directory where a core can place +template files that are intended to be available for it's test cases. + +An example test case is placed in ocean/baroclinic_channel/10km +An example template is placed in templates/ocean/global_stats.xml + +Test cases are described by XML files. Each test case can have an arbitrary +number of XML files that configure the steps for setting up the test case. + +The various XML files that can be used with this test case infrastructure are +described in the README files contained in the doc directory. + - doc/README.config: Describes configuration of a specific step (called a + case) in a test case. + - doc/README.driver_script: Describes configuration of a script to drive an + entire test case (multiple steps / cases) + - doc/README.template: Describes configuration of a template which can be + applied to multiple config files. + - doc/README.regression_suite: Describes configuration of a regression suite, + which is a group of test cases. + - doc/README.run_config: Defines how to take a step, and convert + it into running the model executable in a specific environment. diff --git a/test_cases/ocean/clean_testcase.py b/test_cases/ocean/clean_testcase.py new file mode 100755 index 0000000000..0f6588065c --- /dev/null +++ b/test_cases/ocean/clean_testcase.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python +""" +This script is used to clean one or more test cases that have already been +setup. + +It will remove directories / driver scripts that were generated as part of +setting up a test case. +""" + +import sys, os, shutil, fnmatch, re +import argparse +import subprocess +import xml.etree.ElementTree as ET + +if __name__ == "__main__": + # Define and process input arguments + parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + + parser.add_argument("-o", "--core", dest="core", help="Core that conatins configurations to clean", metavar="CORE") + parser.add_argument("-c", "--configuration", dest="configuration", help="Configuration to clean", metavar="CONFIG") + parser.add_argument("-r", "--resolution", dest="resolution", help="Resolution of configuration to clean", metavar="RES") + parser.add_argument("-t", "--test", dest="test", help="Test name within a resolution to clean", metavar="TEST") + parser.add_argument("-n", "--case_number", dest="case_num", help="Case number to clean, as listed from list_testcases.py. Can be a comma delimited list of case numbers.", metavar="NUM") + parser.add_argument("-q", "--quiet", dest="quiet", help="If set, script will not write a command_history file", action="store_true") + parser.add_argument("-a", "--all", dest="clean_all", help="Is set, the script will clean all test cases in the work_dir.", action="store_true") + parser.add_argument("--work_dir", dest="work_dir", help="If set, script will clean case directories in work_dir rather than the current directory.", metavar="PATH") + + args = parser.parse_args() + + if not args.case_num and ( not args.core and not args.configuration and not args.resolution and not args.test) and not args.clean_all: + print 'Must be run with either the --case_number argument, the --all argument, or all of the core, configuration, resolution, and test arguments.' + parser.error(' Invalid configuration. Exiting...') + + if args.case_num and args.core and args.configuration and args.resoltuion and args.test and args.clean_all: + print 'Can only be configured with either --case_number (-n), --all (-a), or all of --core (-o), --configuration (-c), --resolution (-r), and --test (-t).' + parser.error(' Invalid configuration. Too many options used. Exiting...') + + if not args.clean_all: + if args.case_num: + use_case_list = True + case_list = args.case_num.split(',') + else: + use_case_list = False + case_list = list() + case_list.append(0) + else: + use_case_list = True + valid_case = 1 + case_num = 1 + case_list = list() + + regex = re.compile('(\d):') + core_configuration = subprocess.check_output(['./list_testcases.py']) + for line in core_configuration.split('\n'): + if not regex.search(line) == None: + conf_arr = line.replace(":", " ").split() + case_num = int(conf_arr[0]) + del conf_arr + case_list.append(case_num) + del core_configuration + del regex + + if not args.work_dir: + args.work_dir = os.getcwd() + + args.work_dir = os.path.abspath(args.work_dir) + + # Build variables for history output + old_dir = os.getcwd() + os.chdir( os.path.dirname( os.path.realpath(__file__) ) ) + git_version = subprocess.check_output(['git', 'describe', '--tags', '--dirty']) + git_version = git_version.strip('\n') + os.chdir(old_dir) + calling_command = "" + write_history = False + for arg in sys.argv: + calling_command = "%s%s "%(calling_command, arg) + + # Iterate over all cases in the case_list. + # There is only one if the (-o, -c, -r) options were used in place of (-n) + for case_num in case_list: + # If we're using a case_list, determine the core, configuration, and + # resolution for the current test case. + if use_case_list: + core_configuration = subprocess.check_output(['./list_testcases.py', '-n', '%d'%(int(case_num))]) + config_options = core_configuration.strip('\n').split(' ') + args.core = config_options[1] + args.configuration = config_options[3] + args.resolution = config_options[5] + args.test = config_options[7] + + # Setup each xml file in the configuration directory: + test_path = '%s/%s/%s/%s'%(args.core, args.configuration, args.resolution, args.test) + work_dir = '%s/%s'%(args.work_dir, test_path) + + # Only write history if we did something... + write_history = False + + # Loop over all files in test_path that have the .xml extension. + for file in os.listdir('%s'%(test_path)): + if fnmatch.fnmatch(file, '*.xml'): + # Build full file name + config_file = '%s/%s'%(test_path, file) + + # Parse file + config_tree = ET.parse(config_file) + config_root = config_tree.getroot() + + # Process files + if config_root.tag == 'config': + case_dir = config_root.attrib['case'] + + case_paths = case_dir.split('/') + # Determine the base directory in the case path, to delete + case_base = case_paths[0] + + # Delete the top level directory that was created, if it exists. + if os.path.exists('%s/%s'%(work_dir, case_base)): + if os.path.isdir('%s/%s'%(work_dir, case_base)): + shutil.rmtree('%s/%s'%(work_dir, case_base)) + write_history = True + print ' -- Removed case %s/%s'%(work_dir, case_base) + + # Process files + elif config_root.tag == 'driver_script': + script_name = config_root.attrib['name'] + + # Delete script if it exists + if os.path.exists('%s/%s'%(work_dir, script_name)): + os.remove('%s/%s'%(work_dir, script_name)) + write_history = True + print ' -- Removed driver script %s/%s'%(work_dir, script_name) + + del config_tree + del config_root + + # Write the history of this command to the command_history file, for + # provenance. + if write_history and not args.quiet: + history_file_path = '%s/command_history'%(args.work_dir) + if os.path.exists(history_file_path): + history_file = open(history_file_path, 'a') + history_file.write('\n') + else: + history_file = open(history_file_path, 'w') + + history_file.write('***********************************************************************\n') + history_file.write('git_version: %s\n'%(git_version)) + history_file.write('command: %s\n'%(calling_command)) + history_file.write('setup the following cases:\n') + if use_case_list: + for case_num in case_list: + core_configuration = subprocess.check_output(['./list_testcases.py', '-n', '%d'%(int(case_num))]) + config_options = core_configuration.strip('\n').split(' ') + history_file.write('\n') + history_file.write('\tcore: %s\n'%(config_options[1])) + history_file.write('\tconfiguration: %s\n'%(config_options[3])) + history_file.write('\tresolution: %s\n'%(config_options[5])) + history_file.write('\ttest: %s\n'%(config_options[7])) + else: + history_file.write('core: %s\n'%(args.core)) + history_file.write('configuration: %s\n'%(args.configuration)) + history_file.write('resolution: %s\n'%(args.resolution)) + history_file.write('test: %s\n'%(args.test)) + + history_file.write('***********************************************************************\n') + history_file.close() + diff --git a/test_cases/ocean/doc/README.config b/test_cases/ocean/doc/README.config new file mode 100644 index 0000000000..ddfe00a416 --- /dev/null +++ b/test_cases/ocean/doc/README.config @@ -0,0 +1,243 @@ +Author: Doug Jacobsen +Date: 11/30/2015 + +This document describes the format of a config file, which is used to setup a +case directory. + +A config file contains information describing how to configure a case +directory, including files that the case depends on, executables that are +required for the case, namelists and streams files the case requires, and run +scripts which can be used to automate running a case. + +###### How to use pre-defined paths ###### + +This testing infrastructure has several predefined paths available as +attributes to several XML tags. Attributes that can use these will have the +line "Can use pre-defined paths" in their description. + +In order to help you make use of these pre-defined paths, this section will +describe what they are, and how to use them. + +To begin, there are two standard paths. These are referred to as +and . + + is the location where the test cases are setup to run. + is the location where the testing infrastructure scripts live. + +Additionally, there are 4 sub-paths: + - - This is the core directory that contains the test case + - - This is the configuration directory that contains the test case + - - This is the resolution directory that contains the test case + - - This is the test directory that contains the test case + - - This is the case directory that is generated from an XML config file + +Now, all attributes that can use pre-defined paths can build a path using the +following syntax: + +{base}_{sub} + +Where {base} can be either "work" or "script", and {sub} can be any of +"core_dir", "configuration_dir", "resolution_dir", "test_dir", and "case_dir". + +Note however, "case_dir" isn't valid when {base} is "script" as a case +directory isn't typically generated in the script path if it's different from +the work path. + +As an example: + - script_test_dir would point to the location that the XML files exist to + setup a testcase + - work_test_dir would point to the location that the testcase will be setup, + and will not include the case directory created from an XML file. + + +###### Description of XML file: ###### + +Below, you will see text describing the various XML tags available in a config +file. Each will describe the tag itself, any attributes the tag can have, and +what children can be placed below the tag. + + - This is the overarching parent tag of a config file that describes the setup for a case. + - Attributes: + * case: The name of the case directory that will be created from this + config tag. + - Children: + * + * + * + * + * + * + + - This tag defines the need for ensuring a required file is available, and the + appropriate ways of acquiring the file. + - Attributes: + * hash: (Optional) The expected hash of the mesh file. The acquired + mesh file will be validated using this. If this attribute is omitted, + the resulting file will not be validated. + * dest_path: The path the resulting file should be placed in. Should be + the name of a path defined in the config file, or optionally 'case' + which is expanded to be the case directory generated from the XML + file containing the get_file tag. Can additionally take the values of: + - Can use pre-defined paths + * file_name: The name of the file that will be downloaded and placed in dest_path. + - Children: + * + + - This tag defined the different methods of acquiring a required file. + - Attributes: + * protocol: A description of how the mesh should be retrieved. + Currently supports wget. + * url: Only used if protocol == wget. The url (pre-filename) portion of + the wget command. + + - This tag defined the need to link an executable defined in a + configuration file (e.g. general.config) into a case directory. + - Attributes: + * source: The name of the executable, defined in the configuration file + (e.g. general.config). This name is a short name, and will be + expanded to executables.source + * dest: The name of the link that will be generated from the executable. + + - This tag defined the need to link a file into a case directory. + - Attributes: + * source_path: The path variable from a configure file to find the + source file in. If it is empty, source is assumed to + have the full path to the file. Additionally, it can + take the values of: + - Can use pre-defined paths + + * source: The source to generate a symlink from. Relative to the case + directory that will be generated from the parent tag. + * dest: The name of the resulting symlink. + + - This tag defines a namelist that should be generated from a template. + - Attributes: + * name: The name of the namelist file that will be generated from the + template namelist pointed to by its mode attribute. + * mode: The name of the mode to use from the template input files + Each core can define these arbitrarily + - Children: + *