# 1 "../mpp/mpp_domains.F90"
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
!-----------------------------------------------------------------------
! Domain decomposition and domain update for message-passing codes
!
! AUTHOR: V. Balaji (vb@gfdl.gov)
! SGI/GFDL Princeton University
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! For the full text of the GNU General Public License,
! write to: Free Software Foundation, Inc.,
! 675 Mass Ave, Cambridge, MA 02139, USA.
!-----------------------------------------------------------------------
!
! V. Balaji
!
!
! Zhi Liang
!
!
!
!
! mpp_domains_mod is a set of simple calls for domain
! decomposition and domain updates on rectilinear grids. It requires the
! module mpp_mod, upon which it is built.
!
!
! Scalable implementations of finite-difference codes are generally
! based on decomposing the model domain into subdomains that are
! distributed among processors. These domains will then be obliged to
! exchange data at their boundaries if data dependencies are merely
! nearest-neighbour, or may need to acquire information from the global
! domain if there are extended data dependencies, as in the spectral
! transform. The domain decomposition is a key operation in the
! development of parallel codes.
!
! mpp_domains_mod provides a domain decomposition and domain
! update API for rectilinear grids, built on top of the mpp_mod API for message passing. Features
! of mpp_domains_mod include:
!
! Simple, minimal API, with free access to underlying API for more complicated stuff.
!
! Design toward typical use in climate/weather CFD codes.
!
!
Domains
!
! I have assumed that domain decomposition will mainly be in 2
! horizontal dimensions, which will in general be the two
! fastest-varying indices. There is a separate implementation of 1D
! decomposition on the fastest-varying index, and 1D decomposition on
! the second index, treated as a special case of 2D decomposition, is
! also possible. We define domain as the grid associated with a task.
! We define the compute domain as the set of gridpoints that are
! computed by a task, and the data domain as the set of points
! that are required by the task for the calculation. There can in
! general be more than 1 task per PE, though often
! the number of domains is the same as the processor count. We define
! the global domain as the global computational domain of the
! entire model (i.e, the same as the computational domain if run on a
! single processor). 2D domains are defined using a derived type domain2D,
! constructed as follows (see comments in code for more details):
!
!
! type, public :: domain_axis_spec
! private
! integer :: begin, end, size, max_size
! logical :: is_global
! end type domain_axis_spec
! type, public :: domain1D
! private
! type(domain_axis_spec) :: compute, data, global, active
! logical :: mustputb, mustgetb, mustputf, mustgetf, folded
! type(domain1D), pointer, dimension(:) :: list
! integer :: pe !PE to which this domain is assigned
! integer :: pos
! end type domain1D
!domaintypes of higher rank can be constructed from type domain1D
!typically we only need 1 and 2D, but could need higher (e.g 3D LES)
!some elements are repeated below if they are needed once per domain
! type, public :: domain2D
! private
! type(domain1D) :: x
! type(domain1D) :: y
! type(domain2D), pointer, dimension(:) :: list
! integer :: pe !PE to which this domain is assigned
! integer :: pos
! end type domain2D
! type(domain1D), public :: NULL_DOMAIN1D
! type(domain2D), public :: NULL_DOMAIN2D
!
! The domain2D type contains all the necessary information to
! define the global, compute and data domains of each task, as well as the PE
! associated with the task. The PEs from which remote data may be
! acquired to update the data domain are also contained in a linked list
! of neighbours.
!
module mpp_domains_mod
!a generalized domain decomposition package for use with mpp_mod
!Balaji (vb@gfdl.gov) 15 March 1999
# 1 "../include/fms_platform.h" 1
! -*-f90-*-*
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
!Set type kinds.
# 37
!These values are not necessarily portable.
!DEC$ MESSAGE:'Using 8-byte addressing'
!Control "pure" functions.
# 54
!DEC$ MESSAGE:'Using pure routines.'
!Control array members of derived types.
# 66
!DEC$ MESSAGE:'Using allocatable derived type array members.'
!Control use of cray pointers.
# 78
!DEC$ MESSAGE:'Using cray pointers.'
!Control size of integers that will hold address values.
!Appears for legacy reasons, but seems rather dangerous.
# 89
!If you do not want to use 64-bit integers.
# 95
!If you do not want to use 32-bit floats.
# 106
!If you want to use quad-precision.
# 115
# 132 "../mpp/mpp_domains.F90" 2
# 135
use mpp_parameter_mod, only : MPP_DEBUG, MPP_VERBOSE, MPP_DOMAIN_TIME
use mpp_parameter_mod, only : GLOBAL_DATA_DOMAIN, CYCLIC_GLOBAL_DOMAIN, GLOBAL,CYCLIC
use mpp_parameter_mod, only : AGRID, BGRID_SW, BGRID_NE, CGRID_NE, CGRID_SW, DGRID_NE, DGRID_SW
use mpp_parameter_mod, only : FOLD_WEST_EDGE, FOLD_EAST_EDGE, FOLD_SOUTH_EDGE, FOLD_NORTH_EDGE
use mpp_parameter_mod, only : WUPDATE, EUPDATE, SUPDATE, NUPDATE, XUPDATE, YUPDATE
use mpp_parameter_mod, only : NON_BITWISE_EXACT_SUM, BITWISE_EXACT_SUM, MPP_DOMAIN_TIME
use mpp_parameter_mod, only : CENTER, CORNER, SCALAR_PAIR, SCALAR_BIT, BITWISE_EFP_SUM
use mpp_parameter_mod, only : NORTH, NORTH_EAST, EAST, SOUTH_EAST
use mpp_parameter_mod, only : SOUTH, SOUTH_WEST, WEST, NORTH_WEST
use mpp_parameter_mod, only : MAX_DOMAIN_FIELDS, NULL_PE, DOMAIN_ID_BASE
use mpp_parameter_mod, only : ZERO, NINETY, MINUS_NINETY, ONE_HUNDRED_EIGHTY, MAX_TILES
use mpp_parameter_mod, only : EVENT_SEND, EVENT_RECV, ROOT_GLOBAL
use mpp_parameter_mod, only : NONBLOCK_UPDATE_TAG, EDGEONLY, EDGEUPDATE
use mpp_parameter_mod, only : NONSYMEDGE, NONSYMEDGEUPDATE
use mpp_data_mod, only : mpp_domains_stack, ptr_domains_stack
use mpp_data_mod, only : mpp_domains_stack_nonblock, ptr_domains_stack_nonblock
use mpp_mod, only : mpp_pe, mpp_root_pe, mpp_npes, mpp_error, FATAL, WARNING, NOTE
use mpp_mod, only : stdout, stderr, stdlog, mpp_send, mpp_recv, mpp_transmit, mpp_sync_self
use mpp_mod, only : mpp_clock_id, mpp_clock_begin, mpp_clock_end
use mpp_mod, only : mpp_max, mpp_min, mpp_sum, mpp_get_current_pelist, mpp_broadcast
use mpp_mod, only : mpp_sum_ad
use mpp_mod, only : mpp_sync, mpp_init, mpp_malloc, lowercase
use mpp_mod, only : input_nml_file, mpp_alltoall
use mpp_mod, only : mpp_type, mpp_byte
use mpp_mod, only : mpp_type_create, mpp_type_free
use mpp_mod, only : COMM_TAG_1, COMM_TAG_2, COMM_TAG_3, COMM_TAG_4
use mpp_memutils_mod, only : mpp_memuse_begin, mpp_memuse_end
use mpp_pset_mod, only : mpp_pset_init
use mpp_efp_mod, only : mpp_reproducing_sum
implicit none
private
# 1 "/opt/cray/pe/mpich/8.1.9/ofi/intel/19.0/include/mpif.h" 1
! /* -*- Mode: Fortran; -*- */
!
! (C) 2001 by Argonne National Laboratory.
! See COPYRIGHT in top-level directory.
!
! DO NOT EDIT
! This file created by buildiface
!
INTEGER MPI_SOURCE, MPI_TAG, MPI_ERROR
PARAMETER (MPI_SOURCE=3,MPI_TAG=4,MPI_ERROR=5)
INTEGER MPI_STATUS_SIZE
PARAMETER (MPI_STATUS_SIZE=5)
INTEGER MPI_STATUS_IGNORE(MPI_STATUS_SIZE)
INTEGER MPI_STATUSES_IGNORE(MPI_STATUS_SIZE,1)
INTEGER MPI_ERRCODES_IGNORE(1)
CHARACTER*1 MPI_ARGVS_NULL(1,1)
CHARACTER*1 MPI_ARGV_NULL(1)
INTEGER MPI_SUCCESS
PARAMETER (MPI_SUCCESS=0)
INTEGER MPI_ERR_DIMS
PARAMETER (MPI_ERR_DIMS=11)
INTEGER MPI_ERR_OP
PARAMETER (MPI_ERR_OP=9)
INTEGER MPI_ERR_QUOTA
PARAMETER (MPI_ERR_QUOTA=39)
INTEGER MPI_ERR_NAME
PARAMETER (MPI_ERR_NAME=33)
INTEGER MPI_ERR_RMA_FLAVOR
PARAMETER (MPI_ERR_RMA_FLAVOR=58)
INTEGER MPI_ERR_GROUP
PARAMETER (MPI_ERR_GROUP=8)
INTEGER MPI_ERR_ASSERT
PARAMETER (MPI_ERR_ASSERT=53)
INTEGER MPI_ERR_UNSUPPORTED_DATAREP
PARAMETER (MPI_ERR_UNSUPPORTED_DATAREP=43)
INTEGER MPI_ERR_INFO
PARAMETER (MPI_ERR_INFO=28)
INTEGER MPI_ERR_SIZE
PARAMETER (MPI_ERR_SIZE=51)
INTEGER MPI_ERR_RMA_CONFLICT
PARAMETER (MPI_ERR_RMA_CONFLICT=49)
INTEGER MPI_ERR_COUNT
PARAMETER (MPI_ERR_COUNT=2)
INTEGER MPI_ERR_NOT_SAME
PARAMETER (MPI_ERR_NOT_SAME=35)
INTEGER MPI_ERR_IO
PARAMETER (MPI_ERR_IO=32)
INTEGER MPI_ERR_NO_SPACE
PARAMETER (MPI_ERR_NO_SPACE=36)
INTEGER MPI_ERR_BUFFER
PARAMETER (MPI_ERR_BUFFER=1)
INTEGER MPI_ERR_PORT
PARAMETER (MPI_ERR_PORT=38)
INTEGER MPI_ERR_RMA_ATTACH
PARAMETER (MPI_ERR_RMA_ATTACH=56)
INTEGER MPI_ERR_NO_MEM
PARAMETER (MPI_ERR_NO_MEM=34)
INTEGER MPI_ERR_INFO_VALUE
PARAMETER (MPI_ERR_INFO_VALUE=30)
INTEGER MPI_ERR_IN_STATUS
PARAMETER (MPI_ERR_IN_STATUS=17)
INTEGER MPI_ERR_PENDING
PARAMETER (MPI_ERR_PENDING=18)
INTEGER MPI_ERR_RMA_RANGE
PARAMETER (MPI_ERR_RMA_RANGE=55)
INTEGER MPI_ERR_INFO_KEY
PARAMETER (MPI_ERR_INFO_KEY=29)
INTEGER MPI_ERR_FILE
PARAMETER (MPI_ERR_FILE=27)
INTEGER MPI_ERR_READ_ONLY
PARAMETER (MPI_ERR_READ_ONLY=40)
INTEGER MPI_ERR_REQUEST
PARAMETER (MPI_ERR_REQUEST=19)
INTEGER MPI_ERR_OTHER
PARAMETER (MPI_ERR_OTHER=15)
INTEGER MPI_ERR_TRUNCATE
PARAMETER (MPI_ERR_TRUNCATE=14)
INTEGER MPI_ERR_DISP
PARAMETER (MPI_ERR_DISP=52)
INTEGER MPI_ERR_SPAWN
PARAMETER (MPI_ERR_SPAWN=42)
INTEGER MPI_ERR_UNSUPPORTED_OPERATION
PARAMETER (MPI_ERR_UNSUPPORTED_OPERATION=44)
INTEGER MPI_ERR_UNKNOWN
PARAMETER (MPI_ERR_UNKNOWN=13)
INTEGER MPI_ERR_INTERN
PARAMETER (MPI_ERR_INTERN=16)
INTEGER MPI_ERR_AMODE
PARAMETER (MPI_ERR_AMODE=21)
INTEGER MPI_ERR_KEYVAL
PARAMETER (MPI_ERR_KEYVAL=48)
INTEGER MPI_ERR_ROOT
PARAMETER (MPI_ERR_ROOT=7)
INTEGER MPI_ERR_BAD_FILE
PARAMETER (MPI_ERR_BAD_FILE=22)
INTEGER MPI_ERR_TYPE
PARAMETER (MPI_ERR_TYPE=3)
INTEGER MPI_ERR_ARG
PARAMETER (MPI_ERR_ARG=12)
INTEGER MPI_ERR_TAG
PARAMETER (MPI_ERR_TAG=4)
INTEGER MPI_ERR_TOPOLOGY
PARAMETER (MPI_ERR_TOPOLOGY=10)
INTEGER MPI_ERR_RMA_SYNC
PARAMETER (MPI_ERR_RMA_SYNC=50)
INTEGER MPI_ERR_LOCKTYPE
PARAMETER (MPI_ERR_LOCKTYPE=47)
INTEGER MPI_ERR_DUP_DATAREP
PARAMETER (MPI_ERR_DUP_DATAREP=24)
INTEGER MPI_ERR_INFO_NOKEY
PARAMETER (MPI_ERR_INFO_NOKEY=31)
INTEGER MPI_ERR_CONVERSION
PARAMETER (MPI_ERR_CONVERSION=23)
INTEGER MPI_ERR_FILE_IN_USE
PARAMETER (MPI_ERR_FILE_IN_USE=26)
INTEGER MPI_ERR_SERVICE
PARAMETER (MPI_ERR_SERVICE=41)
INTEGER MPI_ERR_NO_SUCH_FILE
PARAMETER (MPI_ERR_NO_SUCH_FILE=37)
INTEGER MPI_ERR_FILE_EXISTS
PARAMETER (MPI_ERR_FILE_EXISTS=25)
INTEGER MPI_ERR_ACCESS
PARAMETER (MPI_ERR_ACCESS=20)
INTEGER MPI_ERR_RMA_SHARED
PARAMETER (MPI_ERR_RMA_SHARED=57)
INTEGER MPI_ERR_LASTCODE
PARAMETER (MPI_ERR_LASTCODE=1073741823)
INTEGER MPI_ERR_RANK
PARAMETER (MPI_ERR_RANK=6)
INTEGER MPI_ERR_COMM
PARAMETER (MPI_ERR_COMM=5)
INTEGER MPI_ERR_BASE
PARAMETER (MPI_ERR_BASE=46)
INTEGER MPI_ERR_WIN
PARAMETER (MPI_ERR_WIN=45)
INTEGER MPI_ERRORS_ARE_FATAL
PARAMETER (MPI_ERRORS_ARE_FATAL=1409286144)
INTEGER MPI_ERRORS_RETURN
PARAMETER (MPI_ERRORS_RETURN=1409286145)
INTEGER MPI_IDENT
PARAMETER (MPI_IDENT=0)
INTEGER MPI_CONGRUENT
PARAMETER (MPI_CONGRUENT=1)
INTEGER MPI_SIMILAR
PARAMETER (MPI_SIMILAR=2)
INTEGER MPI_UNEQUAL
PARAMETER (MPI_UNEQUAL=3)
INTEGER MPI_WIN_FLAVOR_CREATE
PARAMETER (MPI_WIN_FLAVOR_CREATE=1)
INTEGER MPI_WIN_FLAVOR_ALLOCATE
PARAMETER (MPI_WIN_FLAVOR_ALLOCATE=2)
INTEGER MPI_WIN_FLAVOR_DYNAMIC
PARAMETER (MPI_WIN_FLAVOR_DYNAMIC=3)
INTEGER MPI_WIN_FLAVOR_SHARED
PARAMETER (MPI_WIN_FLAVOR_SHARED=4)
INTEGER MPI_WIN_SEPARATE
PARAMETER (MPI_WIN_SEPARATE=1)
INTEGER MPI_WIN_UNIFIED
PARAMETER (MPI_WIN_UNIFIED=2)
INTEGER MPI_MAX
PARAMETER (MPI_MAX=1476395009)
INTEGER MPI_MIN
PARAMETER (MPI_MIN=1476395010)
INTEGER MPI_SUM
PARAMETER (MPI_SUM=1476395011)
INTEGER MPI_PROD
PARAMETER (MPI_PROD=1476395012)
INTEGER MPI_LAND
PARAMETER (MPI_LAND=1476395013)
INTEGER MPI_BAND
PARAMETER (MPI_BAND=1476395014)
INTEGER MPI_LOR
PARAMETER (MPI_LOR=1476395015)
INTEGER MPI_BOR
PARAMETER (MPI_BOR=1476395016)
INTEGER MPI_LXOR
PARAMETER (MPI_LXOR=1476395017)
INTEGER MPI_BXOR
PARAMETER (MPI_BXOR=1476395018)
INTEGER MPI_MINLOC
PARAMETER (MPI_MINLOC=1476395019)
INTEGER MPI_MAXLOC
PARAMETER (MPI_MAXLOC=1476395020)
INTEGER MPI_REPLACE
PARAMETER (MPI_REPLACE=1476395021)
INTEGER MPI_NO_OP
PARAMETER (MPI_NO_OP=1476395022)
INTEGER MPI_COMM_WORLD
PARAMETER (MPI_COMM_WORLD=1140850688)
INTEGER MPI_COMM_SELF
PARAMETER (MPI_COMM_SELF=1140850689)
INTEGER MPI_GROUP_EMPTY
PARAMETER (MPI_GROUP_EMPTY=1207959552)
INTEGER MPI_COMM_NULL
PARAMETER (MPI_COMM_NULL=67108864)
INTEGER MPI_WIN_NULL
PARAMETER (MPI_WIN_NULL=536870912)
INTEGER MPI_FILE_NULL
PARAMETER (MPI_FILE_NULL=0)
INTEGER MPI_GROUP_NULL
PARAMETER (MPI_GROUP_NULL=134217728)
INTEGER MPI_OP_NULL
PARAMETER (MPI_OP_NULL=402653184)
INTEGER MPI_DATATYPE_NULL
PARAMETER (MPI_DATATYPE_NULL=201326592)
INTEGER MPI_REQUEST_NULL
PARAMETER (MPI_REQUEST_NULL=738197504)
INTEGER MPI_ERRHANDLER_NULL
PARAMETER (MPI_ERRHANDLER_NULL=335544320)
INTEGER MPI_INFO_NULL
PARAMETER (MPI_INFO_NULL=469762048)
INTEGER MPI_INFO_ENV
PARAMETER (MPI_INFO_ENV=1543503873)
INTEGER MPI_TAG_UB
PARAMETER (MPI_TAG_UB=1681915906)
INTEGER MPI_HOST
PARAMETER (MPI_HOST=1681915908)
INTEGER MPI_IO
PARAMETER (MPI_IO=1681915910)
INTEGER MPI_WTIME_IS_GLOBAL
PARAMETER (MPI_WTIME_IS_GLOBAL=1681915912)
INTEGER MPI_UNIVERSE_SIZE
PARAMETER (MPI_UNIVERSE_SIZE=1681915914)
INTEGER MPI_LASTUSEDCODE
PARAMETER (MPI_LASTUSEDCODE=1681915916)
INTEGER MPI_APPNUM
PARAMETER (MPI_APPNUM=1681915918)
INTEGER MPI_WIN_BASE
PARAMETER (MPI_WIN_BASE=1711276034)
INTEGER MPI_WIN_SIZE
PARAMETER (MPI_WIN_SIZE=1711276036)
INTEGER MPI_WIN_DISP_UNIT
PARAMETER (MPI_WIN_DISP_UNIT=1711276038)
INTEGER MPI_WIN_CREATE_FLAVOR
PARAMETER (MPI_WIN_CREATE_FLAVOR=1711276040)
INTEGER MPI_WIN_MODEL
PARAMETER (MPI_WIN_MODEL=1711276042)
INTEGER MPI_MAX_ERROR_STRING
PARAMETER (MPI_MAX_ERROR_STRING=512-1)
INTEGER MPI_MAX_PORT_NAME
PARAMETER (MPI_MAX_PORT_NAME=255)
INTEGER MPI_MAX_OBJECT_NAME
PARAMETER (MPI_MAX_OBJECT_NAME=127)
INTEGER MPI_MAX_INFO_KEY
PARAMETER (MPI_MAX_INFO_KEY=254)
INTEGER MPI_MAX_INFO_VAL
PARAMETER (MPI_MAX_INFO_VAL=1023)
INTEGER MPI_MAX_PROCESSOR_NAME
PARAMETER (MPI_MAX_PROCESSOR_NAME=128-1)
INTEGER MPI_MAX_DATAREP_STRING
PARAMETER (MPI_MAX_DATAREP_STRING=127)
INTEGER MPI_MAX_LIBRARY_VERSION_STRING
PARAMETER (MPI_MAX_LIBRARY_VERSION_STRING=8192-1)
INTEGER MPI_UNDEFINED
PARAMETER (MPI_UNDEFINED=(-32766))
INTEGER MPI_KEYVAL_INVALID
PARAMETER (MPI_KEYVAL_INVALID=603979776)
INTEGER MPI_BSEND_OVERHEAD
PARAMETER (MPI_BSEND_OVERHEAD=96)
INTEGER MPI_PROC_NULL
PARAMETER (MPI_PROC_NULL=-1)
INTEGER MPI_ANY_SOURCE
PARAMETER (MPI_ANY_SOURCE=-2)
INTEGER MPI_ANY_TAG
PARAMETER (MPI_ANY_TAG=-1)
INTEGER MPI_ROOT
PARAMETER (MPI_ROOT=-3)
INTEGER MPI_GRAPH
PARAMETER (MPI_GRAPH=1)
INTEGER MPI_CART
PARAMETER (MPI_CART=2)
INTEGER MPI_DIST_GRAPH
PARAMETER (MPI_DIST_GRAPH=3)
INTEGER MPI_VERSION
PARAMETER (MPI_VERSION=3)
INTEGER MPI_SUBVERSION
PARAMETER (MPI_SUBVERSION=1)
INTEGER MPI_LOCK_EXCLUSIVE
PARAMETER (MPI_LOCK_EXCLUSIVE=234)
INTEGER MPI_LOCK_SHARED
PARAMETER (MPI_LOCK_SHARED=235)
INTEGER MPI_COMPLEX
PARAMETER (MPI_COMPLEX=1275070494)
INTEGER MPI_DOUBLE_COMPLEX
PARAMETER (MPI_DOUBLE_COMPLEX=1275072546)
INTEGER MPI_LOGICAL
PARAMETER (MPI_LOGICAL=1275069469)
INTEGER MPI_REAL
PARAMETER (MPI_REAL=1275069468)
INTEGER MPI_DOUBLE_PRECISION
PARAMETER (MPI_DOUBLE_PRECISION=1275070495)
INTEGER MPI_INTEGER
PARAMETER (MPI_INTEGER=1275069467)
INTEGER MPI_2INTEGER
PARAMETER (MPI_2INTEGER=1275070496)
INTEGER MPI_2DOUBLE_PRECISION
PARAMETER (MPI_2DOUBLE_PRECISION=1275072547)
INTEGER MPI_2REAL
PARAMETER (MPI_2REAL=1275070497)
INTEGER MPI_CHARACTER
PARAMETER (MPI_CHARACTER=1275068698)
INTEGER MPI_BYTE
PARAMETER (MPI_BYTE=1275068685)
INTEGER MPI_UB
PARAMETER (MPI_UB=1275068433)
INTEGER MPI_LB
PARAMETER (MPI_LB=1275068432)
INTEGER MPI_PACKED
PARAMETER (MPI_PACKED=1275068687)
INTEGER MPI_INTEGER1
PARAMETER (MPI_INTEGER1=1275068717)
INTEGER MPI_INTEGER2
PARAMETER (MPI_INTEGER2=1275068975)
INTEGER MPI_INTEGER4
PARAMETER (MPI_INTEGER4=1275069488)
INTEGER MPI_INTEGER8
PARAMETER (MPI_INTEGER8=1275070513)
INTEGER MPI_INTEGER16
PARAMETER (MPI_INTEGER16=MPI_DATATYPE_NULL)
INTEGER MPI_REAL4
PARAMETER (MPI_REAL4=1275069479)
INTEGER MPI_REAL8
PARAMETER (MPI_REAL8=1275070505)
INTEGER MPI_REAL16
PARAMETER (MPI_REAL16=1275072555)
INTEGER MPI_COMPLEX8
PARAMETER (MPI_COMPLEX8=1275070504)
INTEGER MPI_COMPLEX16
PARAMETER (MPI_COMPLEX16=1275072554)
INTEGER MPI_COMPLEX32
PARAMETER (MPI_COMPLEX32=1275076652)
INTEGER MPI_ADDRESS_KIND
PARAMETER (MPI_ADDRESS_KIND=8)
INTEGER MPI_OFFSET_KIND
PARAMETER (MPI_OFFSET_KIND=8)
INTEGER MPI_COUNT_KIND
PARAMETER (MPI_COUNT_KIND=8)
INTEGER MPI_INTEGER_KIND
PARAMETER (MPI_INTEGER_KIND=4)
INTEGER MPI_CHAR
PARAMETER (MPI_CHAR=1275068673)
INTEGER MPI_SIGNED_CHAR
PARAMETER (MPI_SIGNED_CHAR=1275068696)
INTEGER MPI_UNSIGNED_CHAR
PARAMETER (MPI_UNSIGNED_CHAR=1275068674)
INTEGER MPI_WCHAR
PARAMETER (MPI_WCHAR=1275069454)
INTEGER MPI_SHORT
PARAMETER (MPI_SHORT=1275068931)
INTEGER MPI_UNSIGNED_SHORT
PARAMETER (MPI_UNSIGNED_SHORT=1275068932)
INTEGER MPI_INT
PARAMETER (MPI_INT=1275069445)
INTEGER MPI_UNSIGNED
PARAMETER (MPI_UNSIGNED=1275069446)
INTEGER MPI_LONG
PARAMETER (MPI_LONG=1275070471)
INTEGER MPI_UNSIGNED_LONG
PARAMETER (MPI_UNSIGNED_LONG=1275070472)
INTEGER MPI_FLOAT
PARAMETER (MPI_FLOAT=1275069450)
INTEGER MPI_DOUBLE
PARAMETER (MPI_DOUBLE=1275070475)
INTEGER MPI_LONG_DOUBLE
PARAMETER (MPI_LONG_DOUBLE=1275072524)
INTEGER MPI_LONG_LONG_INT
PARAMETER (MPI_LONG_LONG_INT=1275070473)
INTEGER MPI_UNSIGNED_LONG_LONG
PARAMETER (MPI_UNSIGNED_LONG_LONG=1275070489)
INTEGER MPI_LONG_LONG
PARAMETER (MPI_LONG_LONG=1275070473)
INTEGER MPI_FLOAT_INT
PARAMETER (MPI_FLOAT_INT=-1946157056)
INTEGER MPI_DOUBLE_INT
PARAMETER (MPI_DOUBLE_INT=-1946157055)
INTEGER MPI_LONG_INT
PARAMETER (MPI_LONG_INT=-1946157054)
INTEGER MPI_SHORT_INT
PARAMETER (MPI_SHORT_INT=-1946157053)
INTEGER MPI_2INT
PARAMETER (MPI_2INT=1275070486)
INTEGER MPI_LONG_DOUBLE_INT
PARAMETER (MPI_LONG_DOUBLE_INT=-1946157052)
INTEGER MPI_INT8_T
PARAMETER (MPI_INT8_T=1275068727)
INTEGER MPI_INT16_T
PARAMETER (MPI_INT16_T=1275068984)
INTEGER MPI_INT32_T
PARAMETER (MPI_INT32_T=1275069497)
INTEGER MPI_INT64_T
PARAMETER (MPI_INT64_T=1275070522)
INTEGER MPI_UINT8_T
PARAMETER (MPI_UINT8_T=1275068731)
INTEGER MPI_UINT16_T
PARAMETER (MPI_UINT16_T=1275068988)
INTEGER MPI_UINT32_T
PARAMETER (MPI_UINT32_T=1275069501)
INTEGER MPI_UINT64_T
PARAMETER (MPI_UINT64_T=1275070526)
INTEGER MPI_C_BOOL
PARAMETER (MPI_C_BOOL=1275068735)
INTEGER MPI_C_FLOAT_COMPLEX
PARAMETER (MPI_C_FLOAT_COMPLEX=1275070528)
INTEGER MPI_C_COMPLEX
PARAMETER (MPI_C_COMPLEX=1275070528)
INTEGER MPI_C_DOUBLE_COMPLEX
PARAMETER (MPI_C_DOUBLE_COMPLEX=1275072577)
INTEGER MPI_C_LONG_DOUBLE_COMPLEX
PARAMETER (MPI_C_LONG_DOUBLE_COMPLEX=1275076674)
INTEGER MPI_AINT
PARAMETER (MPI_AINT=1275070531)
INTEGER MPI_OFFSET
PARAMETER (MPI_OFFSET=1275070532)
INTEGER MPI_COUNT
PARAMETER (MPI_COUNT=1275070533)
INTEGER MPI_CXX_BOOL
PARAMETER (MPI_CXX_BOOL=MPI_DATATYPE_NULL)
INTEGER MPI_CXX_FLOAT_COMPLEX
PARAMETER (MPI_CXX_FLOAT_COMPLEX=MPI_DATATYPE_NULL)
INTEGER MPI_CXX_DOUBLE_COMPLEX
PARAMETER (MPI_CXX_DOUBLE_COMPLEX=MPI_DATATYPE_NULL)
INTEGER MPI_CXX_LONG_DOUBLE_COMPLEX
PARAMETER (MPI_CXX_LONG_DOUBLE_COMPLEX=MPI_DATATYPE_NULL)
INTEGER MPI_COMBINER_NAMED
PARAMETER (MPI_COMBINER_NAMED=1)
INTEGER MPI_COMBINER_DUP
PARAMETER (MPI_COMBINER_DUP=2)
INTEGER MPI_COMBINER_CONTIGUOUS
PARAMETER (MPI_COMBINER_CONTIGUOUS=3)
INTEGER MPI_COMBINER_VECTOR
PARAMETER (MPI_COMBINER_VECTOR=4)
INTEGER MPI_COMBINER_HVECTOR_INTEGER
PARAMETER (MPI_COMBINER_HVECTOR_INTEGER=5)
INTEGER MPI_COMBINER_HVECTOR
PARAMETER (MPI_COMBINER_HVECTOR=6)
INTEGER MPI_COMBINER_INDEXED
PARAMETER (MPI_COMBINER_INDEXED=7)
INTEGER MPI_COMBINER_HINDEXED_INTEGER
PARAMETER (MPI_COMBINER_HINDEXED_INTEGER=8)
INTEGER MPI_COMBINER_HINDEXED
PARAMETER (MPI_COMBINER_HINDEXED=9)
INTEGER MPI_COMBINER_INDEXED_BLOCK
PARAMETER (MPI_COMBINER_INDEXED_BLOCK=10)
INTEGER MPI_COMBINER_STRUCT_INTEGER
PARAMETER (MPI_COMBINER_STRUCT_INTEGER=11)
INTEGER MPI_COMBINER_STRUCT
PARAMETER (MPI_COMBINER_STRUCT=12)
INTEGER MPI_COMBINER_SUBARRAY
PARAMETER (MPI_COMBINER_SUBARRAY=13)
INTEGER MPI_COMBINER_DARRAY
PARAMETER (MPI_COMBINER_DARRAY=14)
INTEGER MPI_COMBINER_F90_REAL
PARAMETER (MPI_COMBINER_F90_REAL=15)
INTEGER MPI_COMBINER_F90_COMPLEX
PARAMETER (MPI_COMBINER_F90_COMPLEX=16)
INTEGER MPI_COMBINER_F90_INTEGER
PARAMETER (MPI_COMBINER_F90_INTEGER=17)
INTEGER MPI_COMBINER_RESIZED
PARAMETER (MPI_COMBINER_RESIZED=18)
INTEGER MPI_COMBINER_HINDEXED_BLOCK
PARAMETER (MPI_COMBINER_HINDEXED_BLOCK=19)
INTEGER MPI_TYPECLASS_REAL
PARAMETER (MPI_TYPECLASS_REAL=1)
INTEGER MPI_TYPECLASS_INTEGER
PARAMETER (MPI_TYPECLASS_INTEGER=2)
INTEGER MPI_TYPECLASS_COMPLEX
PARAMETER (MPI_TYPECLASS_COMPLEX=3)
INTEGER MPI_MODE_NOCHECK
PARAMETER (MPI_MODE_NOCHECK=1024)
INTEGER MPI_MODE_NOSTORE
PARAMETER (MPI_MODE_NOSTORE=2048)
INTEGER MPI_MODE_NOPUT
PARAMETER (MPI_MODE_NOPUT=4096)
INTEGER MPI_MODE_NOPRECEDE
PARAMETER (MPI_MODE_NOPRECEDE=8192)
INTEGER MPI_MODE_NOSUCCEED
PARAMETER (MPI_MODE_NOSUCCEED=16384)
INTEGER MPI_COMM_TYPE_SHARED
PARAMETER (MPI_COMM_TYPE_SHARED=1)
INTEGER MPI_MESSAGE_NULL
PARAMETER (MPI_MESSAGE_NULL=738197504)
INTEGER MPI_MESSAGE_NO_PROC
PARAMETER (MPI_MESSAGE_NO_PROC=1811939328)
INTEGER MPI_THREAD_SINGLE
PARAMETER (MPI_THREAD_SINGLE=0)
INTEGER MPI_THREAD_FUNNELED
PARAMETER (MPI_THREAD_FUNNELED=1)
INTEGER MPI_THREAD_SERIALIZED
PARAMETER (MPI_THREAD_SERIALIZED=2)
INTEGER MPI_THREAD_MULTIPLE
PARAMETER (MPI_THREAD_MULTIPLE=3)
INTEGER MPI_MODE_RDONLY
PARAMETER (MPI_MODE_RDONLY=2)
INTEGER MPI_MODE_RDWR
PARAMETER (MPI_MODE_RDWR=8)
INTEGER MPI_MODE_WRONLY
PARAMETER (MPI_MODE_WRONLY=4)
INTEGER MPI_MODE_DELETE_ON_CLOSE
PARAMETER (MPI_MODE_DELETE_ON_CLOSE=16)
INTEGER MPI_MODE_UNIQUE_OPEN
PARAMETER (MPI_MODE_UNIQUE_OPEN=32)
INTEGER MPI_MODE_CREATE
PARAMETER (MPI_MODE_CREATE=1)
INTEGER MPI_MODE_EXCL
PARAMETER (MPI_MODE_EXCL=64)
INTEGER MPI_MODE_APPEND
PARAMETER (MPI_MODE_APPEND=128)
INTEGER MPI_MODE_SEQUENTIAL
PARAMETER (MPI_MODE_SEQUENTIAL=256)
INTEGER MPI_SEEK_SET
PARAMETER (MPI_SEEK_SET=600)
INTEGER MPI_SEEK_CUR
PARAMETER (MPI_SEEK_CUR=602)
INTEGER MPI_SEEK_END
PARAMETER (MPI_SEEK_END=604)
INTEGER MPI_ORDER_C
PARAMETER (MPI_ORDER_C=56)
INTEGER MPI_ORDER_FORTRAN
PARAMETER (MPI_ORDER_FORTRAN=57)
INTEGER MPI_DISTRIBUTE_BLOCK
PARAMETER (MPI_DISTRIBUTE_BLOCK=121)
INTEGER MPI_DISTRIBUTE_CYCLIC
PARAMETER (MPI_DISTRIBUTE_CYCLIC=122)
INTEGER MPI_DISTRIBUTE_NONE
PARAMETER (MPI_DISTRIBUTE_NONE=123)
INTEGER MPI_DISTRIBUTE_DFLT_DARG
PARAMETER (MPI_DISTRIBUTE_DFLT_DARG=-49767)
integer*8 MPI_DISPLACEMENT_CURRENT
PARAMETER (MPI_DISPLACEMENT_CURRENT=-54278278)
LOGICAL MPI_SUBARRAYS_SUPPORTED
PARAMETER(MPI_SUBARRAYS_SUPPORTED=.FALSE.)
LOGICAL MPI_ASYNC_PROTECTS_NONBLOCKING
PARAMETER(MPI_ASYNC_PROTECTS_NONBLOCKING=.FALSE.)
INTEGER MPI_BOTTOM, MPI_IN_PLACE, MPI_UNWEIGHTED
INTEGER MPI_WEIGHTS_EMPTY
EXTERNAL MPI_DUP_FN, MPI_NULL_DELETE_FN, MPI_NULL_COPY_FN
EXTERNAL MPI_WTIME, MPI_WTICK
EXTERNAL PMPI_WTIME, PMPI_WTICK
EXTERNAL MPI_COMM_DUP_FN, MPI_COMM_NULL_DELETE_FN
EXTERNAL MPI_COMM_NULL_COPY_FN
EXTERNAL MPI_WIN_DUP_FN, MPI_WIN_NULL_DELETE_FN
EXTERNAL MPI_WIN_NULL_COPY_FN
EXTERNAL MPI_TYPE_DUP_FN, MPI_TYPE_NULL_DELETE_FN
EXTERNAL MPI_TYPE_NULL_COPY_FN
EXTERNAL MPI_CONVERSION_FN_NULL
REAL*8 MPI_WTIME, MPI_WTICK
REAL*8 PMPI_WTIME, PMPI_WTICK
COMMON /MPIFCMB5/ MPI_UNWEIGHTED
COMMON /MPIFCMB9/ MPI_WEIGHTS_EMPTY
SAVE /MPIFCMB5/
SAVE /MPIFCMB9/
COMMON /MPIPRIV1/ MPI_BOTTOM, MPI_IN_PLACE, MPI_STATUS_IGNORE
COMMON /MPIPRIV2/ MPI_STATUSES_IGNORE, MPI_ERRCODES_IGNORE
SAVE /MPIPRIV1/,/MPIPRIV2/
COMMON /MPIPRIVC/ MPI_ARGVS_NULL, MPI_ARGV_NULL
SAVE /MPIPRIVC/
# 171 "../mpp/mpp_domains.F90" 2
!sgi_mipspro gets this from 'use mpi'
!--- public paramters imported from mpp_domains_parameter_mod
public :: GLOBAL_DATA_DOMAIN, CYCLIC_GLOBAL_DOMAIN, BGRID_NE, BGRID_SW, CGRID_NE, CGRID_SW, AGRID
public :: DGRID_NE, DGRID_SW, FOLD_WEST_EDGE, FOLD_EAST_EDGE, FOLD_SOUTH_EDGE, FOLD_NORTH_EDGE
public :: WUPDATE, EUPDATE, SUPDATE, NUPDATE, XUPDATE, YUPDATE
public :: NON_BITWISE_EXACT_SUM, BITWISE_EXACT_SUM, MPP_DOMAIN_TIME, BITWISE_EFP_SUM
public :: CENTER, CORNER, SCALAR_PAIR
public :: NORTH, NORTH_EAST, EAST, SOUTH_EAST
public :: SOUTH, SOUTH_WEST, WEST, NORTH_WEST
public :: ZERO, NINETY, MINUS_NINETY, ONE_HUNDRED_EIGHTY
public :: EDGEUPDATE, NONSYMEDGEUPDATE
!--- public data imported from mpp_data_mod
public :: NULL_DOMAIN1D, NULL_DOMAIN2D
public :: domain_axis_spec, domain1D, domain2D, DomainCommunicator2D
public :: nest_domain_type, mpp_group_update_type
!--- public interface from mpp_domains_util.h
public :: mpp_domains_set_stack_size, mpp_get_compute_domain, mpp_get_compute_domains
public :: mpp_get_data_domain, mpp_get_global_domain, mpp_get_domain_components
public :: mpp_get_layout, mpp_get_pelist, operator(.EQ.), operator(.NE.)
public :: mpp_domain_is_symmetry, mpp_domain_is_initialized
public :: mpp_get_neighbor_pe, mpp_nullify_domain_list
public :: mpp_set_compute_domain, mpp_set_data_domain, mpp_set_global_domain
public :: mpp_get_memory_domain, mpp_get_domain_shift, mpp_domain_is_tile_root_pe
public :: mpp_get_tile_id, mpp_get_domain_extents, mpp_get_current_ntile, mpp_get_ntile_count
public :: mpp_get_tile_list
public :: mpp_get_tile_npes, mpp_get_domain_root_pe, mpp_get_tile_pelist, mpp_get_tile_compute_domains
public :: mpp_get_num_overlap, mpp_get_overlap
public :: mpp_get_io_domain, mpp_get_domain_pe, mpp_get_domain_tile_root_pe
public :: mpp_get_domain_name, mpp_get_io_domain_layout
public :: mpp_copy_domain, mpp_set_domain_symmetry
public :: mpp_get_update_pelist, mpp_get_update_size
public :: mpp_get_domain_npes, mpp_get_domain_pelist
public :: mpp_clear_group_update
public :: mpp_group_update_initialized, mpp_group_update_is_set
!--- public interface from mpp_domains_reduce.h
public :: mpp_global_field, mpp_global_max, mpp_global_min, mpp_global_sum
public :: mpp_global_sum_tl, mpp_global_sum_ad
!--- public interface from mpp_domains_misc.h
public :: mpp_broadcast_domain, mpp_domains_init, mpp_domains_exit, mpp_redistribute
public :: mpp_update_domains, mpp_check_field
public :: mpp_start_update_domains, mpp_complete_update_domains
public :: mpp_create_group_update, mpp_do_group_update
public :: mpp_start_group_update, mpp_complete_group_update
public :: mpp_reset_group_update_field
public :: mpp_update_nest_fine, mpp_update_nest_coarse
public :: mpp_get_boundary
public :: mpp_update_domains_ad
public :: mpp_get_boundary_ad
public :: mpp_pass_SG_to_UG, mpp_pass_UG_to_SG
!--- public interface from mpp_domains_define.h
public :: mpp_define_layout, mpp_define_domains, mpp_modify_domain, mpp_define_mosaic
public :: mpp_define_mosaic_pelist, mpp_define_null_domain, mpp_mosaic_defined
public :: mpp_define_io_domain, mpp_deallocate_domain
public :: mpp_compute_extent, mpp_compute_block_extent
!--- public interface for unstruct domain
public :: mpp_define_unstruct_domain, domainUG, mpp_get_UG_io_domain
public :: mpp_get_UG_domain_npes, mpp_get_UG_compute_domain, mpp_get_UG_domain_tile_id
public :: mpp_get_UG_domain_pelist, mpp_get_ug_domain_grid_index
public :: mpp_get_UG_domain_ntiles, mpp_get_UG_global_domain
public :: mpp_global_field_ug, mpp_get_ug_domain_tile_list, mpp_get_UG_compute_domains
public :: mpp_define_null_UG_domain, NULL_DOMAINUG, mpp_get_UG_domains_index
public :: mpp_get_UG_SG_domain, mpp_get_UG_domain_tile_pe_inf
!--- public interface from mpp_define_domains.inc
public :: mpp_define_nest_domains, mpp_get_C2F_index, mpp_get_F2C_index
!----------
!ug support
public :: mpp_domain_UG_is_tile_root_pe
public :: mpp_deallocate_domainUG
public :: mpp_get_io_domain_UG_layout
!----------
integer, parameter :: NAME_LENGTH = 64
integer, parameter :: MAXLIST = 100
integer, parameter :: MAXOVERLAP = 200
integer, parameter :: FIELD_S = 0
integer, parameter :: FIELD_X = 1
integer, parameter :: FIELD_Y = 2
!--- data types used mpp_domains_mod.
type unstruct_axis_spec
private
integer :: begin, end, size, max_size
integer :: begin_index, end_index
end type unstruct_axis_spec
type unstruct_domain_spec
private
type(unstruct_axis_spec) :: compute
integer :: pe
integer :: pos
integer :: tile_id
end type unstruct_domain_spec
type unstruct_overlap_type
private
integer :: count = 0
integer :: pe
integer, pointer :: i(:)=>NULL()
integer, pointer :: j(:)=>NULL()
end type unstruct_overlap_type
type unstruct_pass_type
private
integer :: nsend, nrecv
type(unstruct_overlap_type), pointer :: recv(:)=>NULL()
type(unstruct_overlap_type), pointer :: send(:)=>NULL()
end type unstruct_pass_type
type domainUG
private
type(unstruct_axis_spec) :: compute, global
type(unstruct_domain_spec), pointer :: list(:)=>NULL()
type(domainUG), pointer :: io_domain=>NULL()
type(unstruct_pass_type) :: SG2UG
type(unstruct_pass_type) :: UG2SG
integer, pointer :: grid_index(:) => NULL() ! on current pe
type(domain2d), pointer :: SG_domain => NULL()
integer :: pe
integer :: pos
integer :: ntiles
integer :: tile_id
integer :: tile_root_pe
integer :: tile_npes
integer :: npes_io_group
integer(4) :: io_layout
end type domainUG
type domain_axis_spec !type used to specify index limits along an axis of a domain
private
integer :: begin, end, size, max_size !start, end of domain axis, size, max size in set
logical :: is_global !TRUE if domain axis extent covers global domain
end type domain_axis_spec
type domain1D
private
type(domain_axis_spec) :: compute, data, global, memory
logical :: cyclic
type(domain1D), pointer :: list(:) =>NULL()
integer :: pe !PE to which this domain is assigned
integer :: pos !position of this PE within link list, i.e domain%list(pos)%pe = pe
integer :: goffset, loffset !needed for global sum
end type domain1D
type domain1D_spec
private
type(domain_axis_spec) :: compute
integer :: pos
end type domain1D_spec
type domain2D_spec
private
type(domain1D_spec), pointer :: x(:) => NULL() ! x-direction domain decomposition
type(domain1D_spec), pointer :: y(:) => NULL() ! x-direction domain decomposition
integer, pointer :: tile_id(:) => NULL() ! tile id of each tile
integer :: pe ! PE to which this domain is assigned
integer :: pos ! position of this PE within link list
integer :: tile_root_pe ! root pe of tile.
end type domain2D_spec
type overlap_type
private
integer :: count = 0 ! number of ovrelapping
integer :: pe
integer :: start_pos ! start position in the buffer
integer :: totsize ! all message size
integer , pointer :: msgsize(:) => NULL() ! overlapping msgsize to be sent or received
integer, pointer :: tileMe(:) => NULL() ! my tile id for this overlap
integer, pointer :: tileNbr(:) => NULL() ! neighbor tile id for this overlap
integer, pointer :: is(:) => NULL() ! starting i-index
integer, pointer :: ie(:) => NULL() ! ending i-index
integer, pointer :: js(:) => NULL() ! starting j-index
integer, pointer :: je(:) => NULL() ! ending j-index
integer, pointer :: dir(:) => NULL() ! direction ( value 1,2,3,4 = E,S,W,N)
integer, pointer :: rotation(:) => NULL() ! rotation angle.
integer, pointer :: index(:) => NULL() ! for refinement
logical, pointer :: from_contact(:) => NULL() ! indicate if the overlap is computed from define_contact_overlap
end type overlap_type
type overlapSpec
private
integer :: whalo, ehalo, shalo, nhalo ! halo size
integer :: xbegin, xend, ybegin, yend
integer :: nsend, nrecv
integer :: sendsize, recvsize
type(overlap_type), pointer :: send(:) => NULL()
type(overlap_type), pointer :: recv(:) => NULL()
type(overlapSpec), pointer :: next => NULL()
end type overlapSpec
type tile_type
integer :: xbegin, xend, ybegin, yend
end type tile_type
!domaintypes of higher rank can be constructed from type domain1D
!typically we only need 1 and 2D, but could need higher (e.g 3D LES)
!some elements are repeated below if they are needed once per domain, not once per axis
type domain2D
private
character(len=NAME_LENGTH) :: name='unnamed' ! name of the domain, default is "unspecified"
integer(8) :: id
integer :: pe ! PE to which this domain is assigned
integer :: fold
integer :: pos ! position of this PE within link list
logical :: symmetry ! indicate the domain is symmetric or non-symmetric.
integer :: whalo, ehalo ! halo size in x-direction
integer :: shalo, nhalo ! halo size in y-direction
integer :: ntiles ! number of tiles within mosaic
integer :: max_ntile_pe ! maximum value in the pelist of number of tiles on each pe.
integer :: ncontacts ! number of contact region within mosaic.
logical :: rotated_ninety ! indicate if any contact rotate NINETY or MINUS_NINETY
logical :: initialized=.FALSE. ! indicate if the overlapping is computed or not.
integer :: tile_root_pe ! root pe of current tile.
integer :: io_layout(2) ! io_layout, will be set through mpp_define_io_domain
! default = domain layout
integer, pointer :: pearray(:,:) => NULL() ! pe of each layout position
integer, pointer :: tile_id(:) => NULL() ! tile id of each tile
type(domain1D), pointer :: x(:) => NULL() ! x-direction domain decomposition
type(domain1D), pointer :: y(:) => NULL() ! y-direction domain decomposition
type(domain2D_spec),pointer :: list(:) => NULL() ! domain decomposition on pe list
type(tile_type), pointer :: tileList(:) => NULL() ! store tile information
type(overlapSpec), pointer :: check_C => NULL() ! send and recv information for boundary consistency check of C-cell
type(overlapSpec), pointer :: check_E => NULL() ! send and recv information for boundary consistency check of E-cell
type(overlapSpec), pointer :: check_N => NULL() ! send and recv information for boundary consistency check of N-cell
type(overlapSpec), pointer :: bound_C => NULL() ! send information for getting boundary value for symmetry domain.
type(overlapSpec), pointer :: bound_E => NULL() ! send information for getting boundary value for symmetry domain.
type(overlapSpec), pointer :: bound_N => NULL() ! send information for getting boundary value for symmetry domain.
type(overlapSpec), pointer :: update_T => NULL() ! send and recv information for halo update of T-cell.
type(overlapSpec), pointer :: update_E => NULL() ! send and recv information for halo update of E-cell.
type(overlapSpec), pointer :: update_C => NULL() ! send and recv information for halo update of C-cell.
type(overlapSpec), pointer :: update_N => NULL() ! send and recv information for halo update of N-cell.
type(domain2d), pointer :: io_domain => NULL() ! domain for IO, will be set through calling mpp_set_io_domain ( this will be changed).
end type domain2D
!--- the following type is used to reprsent the contact between tiles.
!--- this type will only be used in mpp_domains_define.inc
type contact_type
private
integer :: ncontact ! number of neighbor tile.
integer, pointer :: tile(:) =>NULL() ! neighbor tile
integer, pointer :: align1(:)=>NULL(), align2(:)=>NULL() ! alignment of me and neighbor
real, pointer :: refine1(:)=>NULL(), refine2(:)=>NULL() !
integer, pointer :: is1(:)=>NULL(), ie1(:)=>NULL() ! i-index of current tile repsenting contact
integer, pointer :: js1(:)=>NULL(), je1(:)=>NULL() ! j-index of current tile repsenting contact
integer, pointer :: is2(:)=>NULL(), ie2(:)=>NULL() ! i-index of neighbor tile repsenting contact
integer, pointer :: js2(:)=>NULL(), je2(:)=>NULL() ! j-index of neighbor tile repsenting contact
end type contact_type
type index_type
integer :: is_me, ie_me, js_me, je_me
integer :: is_you, ie_you, js_you, je_you
end type index_type
type nestSpec
private
integer :: xbegin, xend, ybegin, yend
type(index_type) :: west, east, south, north, center
integer :: nsend, nrecv
integer :: extra_halo
type(overlap_type), pointer :: send(:) => NULL()
type(overlap_type), pointer :: recv(:) => NULL()
type(nestSpec), pointer :: next => NULL()
end type nestSpec
type nest_domain_type
private
integer :: tile_fine, tile_coarse
integer :: istart_fine, iend_fine, jstart_fine, jend_fine
integer :: istart_coarse, iend_coarse, jstart_coarse, jend_coarse
integer :: x_refine, y_refine
logical :: is_fine_pe, is_coarse_pe
integer, pointer :: pelist_fine(:) => NULL()
integer, pointer :: pelist_coarse(:) => NULL()
character(len=NAME_LENGTH) :: name
type(nestSpec), pointer :: C2F_T => NULL()
type(nestSpec), pointer :: C2F_C => NULL()
type(nestSpec), pointer :: C2F_E => NULL()
type(nestSpec), pointer :: C2F_N => NULL()
type(nestSpec), pointer :: F2C_T => NULL()
type(nestSpec), pointer :: F2C_C => NULL()
type(nestSpec), pointer :: F2C_E => NULL()
type(nestSpec), pointer :: F2C_N => NULL()
type(domain2d), pointer :: domain_fine => NULL()
type(domain2d), pointer :: domain_coarse => NULL()
end type nest_domain_type
type DomainCommunicator2D
private
logical :: initialized=.false.
integer(8) :: id=-9999
integer(8) :: l_addr =-9999
integer(8) :: l_addrx =-9999
integer(8) :: l_addry =-9999
type(domain2D), pointer :: domain =>NULL()
type(domain2D), pointer :: domain_in =>NULL()
type(domain2D), pointer :: domain_out =>NULL()
type(overlapSpec), pointer :: send(:,:,:,:) => NULL()
type(overlapSpec), pointer :: recv(:,:,:,:) => NULL()
integer, dimension(:,:), allocatable :: sendis
integer, dimension(:,:), allocatable :: sendie
integer, dimension(:,:), allocatable :: sendjs
integer, dimension(:,:), allocatable :: sendje
integer, dimension(:,:), allocatable :: recvis
integer, dimension(:,:), allocatable :: recvie
integer, dimension(:,:), allocatable :: recvjs
integer, dimension(:,:), allocatable :: recvje
logical, dimension(:), allocatable :: S_do_buf
logical, dimension(:), allocatable :: R_do_buf
integer, dimension(:), allocatable :: cto_pe
integer, dimension(:), allocatable :: cfrom_pe
integer, dimension(:), allocatable :: S_msize
integer, dimension(:), allocatable :: R_msize
integer :: Slist_size=0, Rlist_size=0
integer :: isize=0, jsize=0, ke=0
integer :: isize_in=0, jsize_in=0
integer :: isize_out=0, jsize_out=0
integer :: isize_max=0, jsize_max=0
integer :: gf_ioff=0, gf_joff=0
! Remote data
integer, dimension(:) , allocatable :: isizeR
integer, dimension(:) , allocatable :: jsizeR
integer, dimension(:,:), allocatable :: sendisR
integer, dimension(:,:), allocatable :: sendjsR
integer(8), dimension(:), allocatable :: rem_addr
integer(8), dimension(:), allocatable :: rem_addrx
integer(8), dimension(:), allocatable :: rem_addry
integer(8), dimension(:,:), allocatable :: rem_addrl
integer(8), dimension(:,:), allocatable :: rem_addrlx
integer(8), dimension(:,:), allocatable :: rem_addrly
integer :: position ! data location. T, E, C, or N.
end type DomainCommunicator2D
integer, parameter :: MAX_REQUEST = 100
type nonblock_type
integer :: recv_pos
integer :: send_pos
integer :: recv_msgsize
integer :: send_msgsize
integer :: update_flags
integer :: update_position
integer :: update_gridtype
integer :: update_whalo
integer :: update_ehalo
integer :: update_shalo
integer :: update_nhalo
integer :: request_send_count
integer :: request_recv_count
integer, dimension(MAX_REQUEST) :: request_send
integer, dimension(MAX_REQUEST) :: request_recv
integer, dimension(MAX_REQUEST) :: size_recv
integer, dimension(MAX_REQUEST) :: type_recv
integer, dimension(MAX_REQUEST) :: buffer_pos_send
integer, dimension(MAX_REQUEST) :: buffer_pos_recv
integer(8) :: field_addrs(MAX_DOMAIN_FIELDS)
integer(8) :: field_addrs2(MAX_DOMAIN_FIELDS)
integer :: nfields
end type nonblock_type
type mpp_group_update_type
private
logical :: initialized = .FALSE.
logical :: k_loop_inside = .TRUE.
logical :: nonsym_edge = .FALSE.
integer :: nscalar = 0
integer :: nvector = 0
integer :: flags_s=0, flags_v=0
integer :: whalo_s=0, ehalo_s=0, shalo_s=0, nhalo_s=0
integer :: isize_s=0, jsize_s=0, ksize_s=1
integer :: whalo_v=0, ehalo_v=0, shalo_v=0, nhalo_v=0
integer :: isize_x=0, jsize_x=0, ksize_v=1
integer :: isize_y=0, jsize_y=0
integer :: position=0, gridtype=0
logical :: recv_s(8), recv_x(8), recv_y(8)
integer :: is_s=0, ie_s=0, js_s=0, je_s=0
integer :: is_x=0, ie_x=0, js_x=0, je_x=0
integer :: is_y=0, ie_y=0, js_y=0, je_y=0
integer :: nrecv=0, nsend=0
integer :: npack=0, nunpack=0
integer :: reset_index_s = 0
integer :: reset_index_v = 0
integer :: tot_msgsize = 0
integer :: from_pe(MAXOVERLAP)
integer :: to_pe(MAXOVERLAP)
integer :: recv_size(MAXOVERLAP)
integer :: send_size(MAXOVERLAP)
integer :: buffer_pos_recv(MAXOVERLAP)
integer :: buffer_pos_send(MAXOVERLAP)
integer :: pack_type(MAXOVERLAP)
integer :: pack_buffer_pos(MAXOVERLAP)
integer :: pack_rotation(MAXOVERLAP)
integer :: pack_size(MAXOVERLAP)
integer :: pack_is(MAXOVERLAP)
integer :: pack_ie(MAXOVERLAP)
integer :: pack_js(MAXOVERLAP)
integer :: pack_je(MAXOVERLAP)
integer :: unpack_type(MAXOVERLAP)
integer :: unpack_buffer_pos(MAXOVERLAP)
integer :: unpack_rotation(MAXOVERLAP)
integer :: unpack_size(MAXOVERLAP)
integer :: unpack_is(MAXOVERLAP)
integer :: unpack_ie(MAXOVERLAP)
integer :: unpack_js(MAXOVERLAP)
integer :: unpack_je(MAXOVERLAP)
integer(8) :: addrs_s(MAX_DOMAIN_FIELDS)
integer(8) :: addrs_x(MAX_DOMAIN_FIELDS)
integer(8) :: addrs_y(MAX_DOMAIN_FIELDS)
integer :: buffer_start_pos = -1
integer :: request_send(MAX_REQUEST)
integer :: request_recv(MAX_REQUEST)
integer :: type_recv(MAX_REQUEST)
end type mpp_group_update_type
!#######################################################################
!***********************************************************************
!
! module variables
!
!***********************************************************************
integer :: pe
logical :: module_is_initialized = .false.
logical :: debug = .FALSE.
logical :: verbose=.FALSE.
logical :: mosaic_defined = .false.
integer :: mpp_domains_stack_size=0
integer :: mpp_domains_stack_hwm=0
type(domain1D),save :: NULL_DOMAIN1D
type(domain2D),save :: NULL_DOMAIN2D
type(domainUG),save :: NULL_DOMAINUG
integer :: current_id_update = 0
integer :: num_update = 0
integer :: num_nonblock_group_update = 0
integer :: nonblock_buffer_pos = 0
integer :: nonblock_group_buffer_pos = 0
logical :: start_update = .true.
logical :: complete_update = .false.
type(nonblock_type), allocatable :: nonblock_data(:)
integer, parameter :: MAX_NONBLOCK_UPDATE = 100
integer :: group_update_buffer_pos = 0
logical :: complete_group_update_on = .false.
!-------- The following variables are used in mpp_domains_comm.h
integer, parameter :: MAX_ADDRS=512
integer(8),dimension(MAX_ADDRS),save :: addrs_sorted=-9999 ! list of sorted local addrs
integer, dimension(-1:MAX_ADDRS),save :: addrs_idx=-9999 ! idx of addr assoicated w/ d_comm
integer, dimension(MAX_ADDRS),save :: a_salvage=-9999 ! freed idx list of addr
integer, save :: a_sort_len=0 ! len sorted memory list
integer, save :: n_addrs=0 ! num memory addresses used
integer(8), parameter :: ADDR2_BASE=Z'0000000000010000'
integer, parameter :: MAX_ADDRS2=128
integer(8),dimension(MAX_ADDRS2),save :: addrs2_sorted=-9999 ! list of sorted local addrs
integer, dimension(-1:MAX_ADDRS2),save :: addrs2_idx=-9999 ! idx of addr2 assoicated w/ d_comm
integer, dimension(MAX_ADDRS2),save :: a2_salvage=-9999 ! freed indices of addr2
integer, save :: a2_sort_len=0 ! len sorted memory list
integer, save :: n_addrs2=0 ! num memory addresses used
integer, parameter :: MAX_DOM_IDS=128
integer(8),dimension(MAX_DOM_IDS),save :: ids_sorted=-9999 ! list of sorted domain identifiers
integer, dimension(-1:MAX_DOM_IDS),save :: ids_idx=-9999 ! idx of d_comm associated w/ sorted addr
integer, save :: i_sort_len=0 ! len sorted domain ids list
integer, save :: n_ids=0 ! num domain ids used (=i_sort_len; dom ids never removed)
integer, parameter :: MAX_FIELDS=1024
integer(8), dimension(MAX_FIELDS),save :: dcKey_sorted=-9999 ! list of sorted local addrs
! Not sure why static d_comm fails during deallocation of derived type members; allocatable works
! type(DomainCommunicator2D),dimension(MAX_FIELDS),save,target :: d_comm ! domain communicators
type(DomainCommunicator2D),dimension(:),allocatable,save,target :: d_comm ! domain communicators
integer, dimension(-1:MAX_FIELDS),save :: d_comm_idx=-9999 ! idx of d_comm associated w/ sorted addr
integer, dimension(MAX_FIELDS),save :: dc_salvage=-9999 ! freed indices of d_comm
integer, save :: dc_sort_len=0 ! len sorted comm keys (=num active communicators)
integer, save :: n_comm=0 ! num communicators used
! integer(8), parameter :: GT_BASE=2**8
integer(8), parameter :: GT_BASE=Z'0000000000000100' ! Workaround for 64bit int init problem
! integer(8), parameter :: KE_BASE=2**48
integer(8), parameter :: KE_BASE=Z'0001000000000000' ! Workaround for 64bit int init problem
integer(8) :: domain_cnt=0
!--- the following variables are used in mpp_domains_misc.h
logical :: domain_clocks_on=.FALSE.
integer :: send_clock=0, recv_clock=0, unpk_clock=0
integer :: wait_clock=0, pack_clock=0
integer :: send_pack_clock_nonblock=0, recv_clock_nonblock=0, unpk_clock_nonblock=0
integer :: wait_clock_nonblock=0
integer :: nest_send_clock=0, nest_recv_clock=0, nest_unpk_clock=0
integer :: nest_wait_clock=0, nest_pack_clock=0
integer :: group_recv_clock=0, group_send_clock=0, group_pack_clock=0, group_unpk_clock=0, group_wait_clock=0
integer :: nonblock_group_recv_clock=0, nonblock_group_send_clock=0, nonblock_group_pack_clock=0
integer :: nonblock_group_unpk_clock=0, nonblock_group_wait_clock=0
!--- namelist interface
!
!
! when debug_update_domain = none, no debug will be done. When debug_update_domain is set to fatal,
! the run will be exited with fatal error message. When debug_update_domain is set to
! warning, the run will output warning message. when debug update_domain is set to
! note, the run will output some note message. Will check the consistency on the boundary between
! processor/tile when updating doamin for symmetric domain and check the consistency on the north
! folded edge.
!
!
! Set true to always do overflow_check when doing EFP bitwise mpp_global_sum.
!
!
! Determine the loop order for packing and unpacking. When number of threads is greater than nthread_control_loop,
! k-loop will be moved outside and combined with number of pack and unpack. When number of threads is less
! than or equal to nthread_control_loop, k-loop is moved inside but still outside of j,i loop.
!
!
character(len=32) :: debug_update_domain = "none"
logical :: debug_message_passing = .false.
integer :: nthread_control_loop = 8
logical :: efp_sum_overflow_check = .false.
logical :: use_alltoallw = .false.
namelist /mpp_domains_nml/ debug_update_domain, domain_clocks_on, debug_message_passing, nthread_control_loop, &
efp_sum_overflow_check, use_alltoallw
!***********************************************************************
integer, parameter :: NO_CHECK = -1
integer :: debug_update_level = NO_CHECK
!***********************************************************************
!
! public interface from mpp_domains_define.h
!
!***********************************************************************
!
!
! Retrieve layout associated with a domain decomposition.
!
!
! Given a global 2D domain and the number of divisions in the
! decomposition (ndivs: usually the PE count unless some
! domains are masked) this calls returns a 2D domain layout.
!
! By default, mpp_define_layout will attempt to divide the
! 2D index space into domains that maintain the aspect ratio of the
! global domain. If this cannot be done, the algorithm favours domains
! that are longer in x than y, a preference that could
! improve vector performance.
!
!
! call mpp_define_layout( global_indices, ndivs, layout )
!
!
!
!
!
interface mpp_define_layout
module procedure mpp_define_layout2D
end interface
!
!
! Set up a domain decomposition.
!
!
! There are two forms for the mpp_define_domains call. The 2D
! version is generally to be used but is built by repeated calls to the
! 1D version, also provided.
!
!
! call mpp_define_domains( global_indices, ndivs, domain, &
! pelist, flags, halo, extent, maskmap )
!
!
! call mpp_define_domains( global_indices, layout, domain, pelist, &
! xflags, yflags, xhalo, yhalo, &
! xextent, yextent, maskmap, name )
!
!
! Defines the global domain.
!
!
! Is the number of domain divisions required.
!
!
! Holds the resulting domain decomposition.
!
!
! List of PEs to which the domains are to be assigned.
!
!
! An optional flag to pass additional information
! about the desired domain topology. Useful flags in a 1D decomposition
! include GLOBAL_DATA_DOMAIN and
! CYCLIC_GLOBAL_DOMAIN. Flags are integers: multiple flags may
! be added together. The flag values are public parameters available by
! use association.
!
!
! Width of the halo.
!
!
! Normally mpp_define_domains attempts
! an even division of the global domain across ndivs
! domains. The extent array can be used by the user to pass a
! custom domain division. The extent array has ndivs
! elements and holds the compute domain widths, which should add up to
! cover the global domain exactly.
!
!
! Some divisions may be masked
! (maskmap=.FALSE.) to exclude them from the computation (e.g
! for ocean model domains that are all land). The maskmap array
! is dimensioned ndivs and contains .TRUE. values for
! any domain that must be included in the computation (default
! all). The pelist array length should match the number of
! domains included in the computation.
!
!
!
!
!
!
!
! For example:
!
!
!
! defines 10 compute domains spanning the range [1,100] of the global
! domain. The compute domains are non-overlapping blocks of 10. All the data
! domains are global, and with a halo of 2 span the range [-1:102]. And
! since the global domain has been declared to be cyclic,
! domain(9)%next => domain(0) and domain(0)%prev =>
! domain(9). A field is allocated on the data domain, and computations proceed on
! the compute domain. A call to mpp_update_domains would fill in
! the values in the halo region:
!
! call mpp_get_data_domain( domain, isd, ied ) !returns -1 and 102
! call mpp_get_compute_domain( domain, is, ie ) !returns (1,10) on PE 0 ...
! allocate( a(isd:ied) )
! do i = is,ie
! a(i) = <perform computations>
! end do
! call mpp_update_domains( a, domain )
!
! The call to mpp_update_domains fills in the regions outside
! the compute domain. Since the global domain is cyclic, the values at
! i=(-1,0) are the same as at i=(99,100); and
! i=(101,102) are the same as i=(1,2).
!
! The 2D version is just an extension of this syntax to two
! dimensions.
!
! The 2D version of the above should generally be used in
! codes, including 1D-decomposed ones, if there is a possibility of
! future evolution toward 2D decomposition. The arguments are similar to
! the 1D case, except that now we have optional arguments
! flags, halo, extent and maskmap
! along two axes.
!
! flags can now take an additional possible value to fold
! one or more edges. This is done by using flags
! FOLD_WEST_EDGE, FOLD_EAST_EDGE,
! FOLD_SOUTH_EDGE or FOLD_NORTH_EDGE. When a fold
! exists (e.g cylindrical domain), vector fields reverse sign upon
! crossing the fold. This parity reversal is performed only in the
! vector version of mpp_update_domains. In
! addition, shift operations may need to be applied to vector fields on
! staggered grids, also described in the vector interface to
! mpp_update_domains.
!
! name is the name associated with the decomposition,
! e.g 'Ocean model'. If this argument is present,
! mpp_define_domains will print the domain decomposition
! generated to stdlog.
!
! Examples:
!
!
!
! Again, we allocate arrays on the data domain, perform computations
! on the compute domain, and call mpp_update_domains to update
! the halo region.
!
! If we wished to perfom a 1D decomposition along Y
! on the same global domain, we could use:
!
!
!
interface mpp_define_domains
module procedure mpp_define_domains1D
module procedure mpp_define_domains2D
end interface
interface mpp_define_null_domain
module procedure mpp_define_null_domain1D
module procedure mpp_define_null_domain2D
end interface
interface mpp_copy_domain
module procedure mpp_copy_domain1D
module procedure mpp_copy_domain2D
end interface mpp_copy_domain
interface mpp_deallocate_domain
module procedure mpp_deallocate_domain1D
module procedure mpp_deallocate_domain2D
end interface
!
!
! modifies the extents (compute, data and global) of domain
!
!
! The source domain.
!
!
! Halo size of the returned 1D doamin. Default value is 0.
!
!
! Axis specifications associated with the compute domain of the returned 1D domain.
!
!
! Axis specifications associated with the global domain of the returned 1D domain.
!
!
! Zonal axis specifications associated with the compute domain of the returned 2D domain.
!
!
! Meridinal axis specifications associated with the compute domain of the returned 2D domain.
!
!
! Zonal axis specifications associated with the global domain of the returned 2D domain.
!
!
! Meridinal axis specifications associated with the global domain of the returned 2D domain.
!
!
! Halo size of the returned 2D doamin. Default value is 0.
!
!
! The returned domain.
!
!
interface mpp_modify_domain
module procedure mpp_modify_domain1D
module procedure mpp_modify_domain2D
end interface
!***********************************************************************
!
! public interface from mpp_domains_misc.h
!
!***********************************************************************
!
!
! Halo updates.
!
!
! mpp_update_domains is used to perform a halo update of a
! domain-decomposed array on each PE. MPP_TYPE_ can be of type
! complex, integer, logical or real;
! of 4-byte or 8-byte kind; of rank up to 5. The vector version (with
! two input data fields) is only present for real types.
!
! For 2D domain updates, if there are halos present along both
! x and y, we can choose to update one only, by
! specifying flags=XUPDATE or flags=YUPDATE. In
! addition, one-sided updates can be performed by setting flags
! to any combination of WUPDATE, EUPDATE,
! SUPDATE and NUPDATE, to update the west, east, north
! and south halos respectively. Any combination of halos may be used by
! adding the requisite flags, e.g: flags=XUPDATE+SUPDATE or
! flags=EUPDATE+WUPDATE+SUPDATE will update the east, west and
! south halos.
!
! If a call to mpp_update_domains involves at least one E-W
! halo and one N-S halo, the corners involved will also be updated, i.e,
! in the example above, the SE and SW corners will be updated.
!
! If flags is not supplied, that is
! equivalent to flags=XUPDATE+YUPDATE.
!
! The vector version is passed the x and y
! components of a vector field in tandem, and both are updated upon
! return. They are passed together to treat parity issues on various
! grids. For example, on a cubic sphere projection, the x and
! y components may be interchanged when passing from an
! equatorial cube face to a polar face. For grids with folds, vector
! components change sign on crossing the fold. Paired scalar quantities
! can also be passed with the vector version if flags=SCALAR_PAIR, in which
! case components are appropriately interchanged, but signs are not.
!
! Special treatment at boundaries such as folds is also required for
! staggered grids. The following types of staggered grids are
! recognized:
!
! 1) AGRID: values are at grid centers.
! 2) BGRID_NE: vector fields are at the NE vertex of a grid
! cell, i.e: the array elements u(i,j) and v(i,j) are
! actually at (i+½,j+½) with respect to the grid centers.
! 3) BGRID_SW: vector fields are at the SW vertex of a grid
! cell, i.e: the array elements u(i,j) and v(i,j) are
! actually at (i-½,j-½) with respect to the grid centers.
! 4) CGRID_NE: vector fields are at the N and E faces of a
! grid cell, i.e: the array elements u(i,j) and v(i,j)
! are actually at (i+½,j) and (i,j+½) with respect to the
! grid centers.
! 5) CGRID_SW: vector fields are at the S and W faces of a
! grid cell, i.e: the array elements u(i,j) and v(i,j)
! are actually at (i-½,j) and (i,j-½) with respect to the
! grid centers.
!
! The gridtypes listed above are all available by use association as
! integer parameters. The scalar version of mpp_update_domains
! assumes that the values of a scalar field are always at AGRID
! locations, and no special boundary treatment is required. If vector
! fields are at staggered locations, the optional argument
! gridtype must be appropriately set for correct treatment at
! boundaries.
!
! It is safe to apply vector field updates to the appropriate arrays
! irrespective of the domain topology: if the topology requires no
! special treatment of vector fields, specifying gridtype will
! do no harm.
!
! mpp_update_domains internally buffers the date being sent
! and received into single messages for efficiency. A turnable internal
! buffer area in memory is provided for this purpose by
! mpp_domains_mod. The size of this buffer area can be set by
! the user by calling
! mpp_domains_set_stack_size.
!
!
! call mpp_update_domains( field, domain, flags )
!
!
! call mpp_update_domains( fieldx, fieldy, domain, flags, gridtype )
!
!
interface mpp_update_domains
module procedure mpp_update_domain2D_r8_2d
module procedure mpp_update_domain2D_r8_3d
module procedure mpp_update_domain2D_r8_4d
module procedure mpp_update_domain2D_r8_5d
module procedure mpp_update_domain2D_r8_2dv
module procedure mpp_update_domain2D_r8_3dv
module procedure mpp_update_domain2D_r8_4dv
module procedure mpp_update_domain2D_r8_5dv
# 1082
module procedure mpp_update_domain2D_i8_2d
module procedure mpp_update_domain2D_i8_3d
module procedure mpp_update_domain2D_i8_4d
module procedure mpp_update_domain2D_i8_5d
module procedure mpp_update_domain2D_r4_2d
module procedure mpp_update_domain2D_r4_3d
module procedure mpp_update_domain2D_r4_4d
module procedure mpp_update_domain2D_r4_5d
module procedure mpp_update_domain2D_r4_2dv
module procedure mpp_update_domain2D_r4_3dv
module procedure mpp_update_domain2D_r4_4dv
module procedure mpp_update_domain2D_r4_5dv
# 1104
module procedure mpp_update_domain2D_i4_2d
module procedure mpp_update_domain2D_i4_3d
module procedure mpp_update_domain2D_i4_4d
module procedure mpp_update_domain2D_i4_5d
end interface
!
!
! Interface to start halo updates.
!
!
! mpp_start_update_domains is used to start a halo update of a
! domain-decomposed array on each PE. MPP_TYPE_ can be of type
! complex, integer, logical or real;
! of 4-byte or 8-byte kind; of rank up to 5. The vector version (with
! two input data fields) is only present for real types.
!
! mpp_start_update_domains must be paired together with
! mpp_complete_update_domains. In mpp_start_update_domains,
! a buffer will be pre-post to receive (non-blocking) the
! data and data on computational domain will be packed and sent (non-blocking send)
! to other processor. In mpp_complete_update_domains, buffer will
! be unpacked to fill the halo and mpp_sync_self will be called to
! to ensure communication safe at the last call of mpp_complete_update_domains.
!
! Each mpp_update_domains can be replaced by the combination of mpp_start_update_domains
! and mpp_complete_update_domains. The arguments in mpp_start_update_domains
! and mpp_complete_update_domains should be the exact the same as in
! mpp_update_domains to be replaced except no optional argument "complete".
! The following are examples on how to replace mpp_update_domains with
! mpp_start_update_domains/mpp_complete_update_domains
!
! Example 1: Replace one scalar mpp_update_domains.
!
! Replace
!
! call mpp_update_domains(data, domain, flags=update_flags)
!
! with
!
! id_update = mpp_start_update_domains(data, domain, flags=update_flags)
! ...( doing some computation )
! call mpp_complete_update_domains(id_update, data, domain, flags=update_flags)
!
! Example 2: Replace group scalar mpp_update_domains,
!
! Replace
!
! call mpp_update_domains(data_1, domain, flags=update_flags, complete=.false.)
! .... ( other n-2 call mpp_update_domains with complete = .false. )
! call mpp_update_domains(data_n, domain, flags=update_flags, complete=.true. )
!
! With
!
! id_up_1 = mpp_start_update_domains(data_1, domain, flags=update_flags)
! .... ( other n-2 call mpp_start_update_domains )
! id_up_n = mpp_start_update_domains(data_n, domain, flags=update_flags)
!
! ..... ( doing some computation )
!
! call mpp_complete_update_domains(id_up_1, data_1, domain, flags=update_flags)
! .... ( other n-2 call mpp_complete_update_domains )
! call mpp_complete_update_domains(id_up_n, data_n, domain, flags=update_flags)
!
! Example 3: Replace group CGRID_NE vector, mpp_update_domains
!
! Replace
!
! call mpp_update_domains(u_1, v_1, domain, flags=update_flgs, gridtype=CGRID_NE, complete=.false.)
! .... ( other n-2 call mpp_update_domains with complete = .false. )
! call mpp_update_domains(u_1, v_1, domain, flags=update_flags, gridtype=CGRID_NE, complete=.true. )
!
! with
!
! id_up_1 = mpp_start_update_domains(u_1, v_1, domain, flags=update_flags, gridtype=CGRID_NE)
! .... ( other n-2 call mpp_start_update_domains )
! id_up_n = mpp_start_update_domains(u_n, v_n, domain, flags=update_flags, gridtype=CGRID_NE)
!
! ..... ( doing some computation )
!
! call mpp_complete_update_domains(id_up_1, u_1, v_1, domain, flags=update_flags, gridtype=CGRID_NE)
! .... ( other n-2 call mpp_complete_update_domains )
! call mpp_complete_update_domains(id_up_n, u_n, v_n, domain, flags=update_flags, gridtype=CGRID_NE)
!
! For 2D domain updates, if there are halos present along both
! x and y, we can choose to update one only, by
! specifying flags=XUPDATE or flags=YUPDATE. In
! addition, one-sided updates can be performed by setting flags
! to any combination of WUPDATE, EUPDATE,
! SUPDATE and NUPDATE, to update the west, east, north
! and south halos respectively. Any combination of halos may be used by
! adding the requisite flags, e.g: flags=XUPDATE+SUPDATE or
! flags=EUPDATE+WUPDATE+SUPDATE will update the east, west and
! south halos.
!
! If a call to mpp_start_update_domains/mpp_complete_update_domains involves at least one E-W
! halo and one N-S halo, the corners involved will also be updated, i.e,
! in the example above, the SE and SW corners will be updated.
!
! If flags is not supplied, that is
! equivalent to flags=XUPDATE+YUPDATE.
!
! The vector version is passed the x and y
! components of a vector field in tandem, and both are updated upon
! return. They are passed together to treat parity issues on various
! grids. For example, on a cubic sphere projection, the x and
! y components may be interchanged when passing from an
! equatorial cube face to a polar face. For grids with folds, vector
! components change sign on crossing the fold. Paired scalar quantities
! can also be passed with the vector version if flags=SCALAR_PAIR, in which
! case components are appropriately interchanged, but signs are not.
!
! Special treatment at boundaries such as folds is also required for
! staggered grids. The following types of staggered grids are
! recognized:
!
! 1) AGRID: values are at grid centers.
! 2) BGRID_NE: vector fields are at the NE vertex of a grid
! cell, i.e: the array elements u(i,j) and v(i,j) are
! actually at (i+½,j+½) with respect to the grid centers.
! 3) BGRID_SW: vector fields are at the SW vertex of a grid
! cell, i.e: the array elements u(i,j) and v(i,j) are
! actually at (i-½,j-½) with respect to the grid centers.
! 4) CGRID_NE: vector fields are at the N and E faces of a
! grid cell, i.e: the array elements u(i,j) and v(i,j)
! are actually at (i+½,j) and (i,j+½) with respect to the
! grid centers.
! 5) CGRID_SW: vector fields are at the S and W faces of a
! grid cell, i.e: the array elements u(i,j) and v(i,j)
! are actually at (i-½,j) and (i,j-½) with respect to the
! grid centers.
!
! The gridtypes listed above are all available by use association as
! integer parameters. If vector fields are at staggered locations, the
! optional argument gridtype must be appropriately set for
! correct treatment at boundaries.
!
! It is safe to apply vector field updates to the appropriate arrays
! irrespective of the domain topology: if the topology requires no
! special treatment of vector fields, specifying gridtype will
! do no harm.
!
! mpp_start_update_domains/mpp_complete_update_domains internally
! buffers the data being sent and received into single messages for efficiency.
! A turnable internal buffer area in memory is provided for this purpose by
! mpp_domains_mod. The size of this buffer area can be set by
! the user by calling
! mpp_domains_set_stack_size.
!
!
! call mpp_start_update_domains( field, domain, flags )
! call mpp_complete_update_domains( field, domain, flags )
!
!
interface mpp_start_update_domains
module procedure mpp_start_update_domain2D_r8_2d
module procedure mpp_start_update_domain2D_r8_3d
module procedure mpp_start_update_domain2D_r8_4d
module procedure mpp_start_update_domain2D_r8_5d
module procedure mpp_start_update_domain2D_r8_2dv
module procedure mpp_start_update_domain2D_r8_3dv
module procedure mpp_start_update_domain2D_r8_4dv
module procedure mpp_start_update_domain2D_r8_5dv
# 1275
module procedure mpp_start_update_domain2D_i8_2d
module procedure mpp_start_update_domain2D_i8_3d
module procedure mpp_start_update_domain2D_i8_4d
module procedure mpp_start_update_domain2D_i8_5d
module procedure mpp_start_update_domain2D_r4_2d
module procedure mpp_start_update_domain2D_r4_3d
module procedure mpp_start_update_domain2D_r4_4d
module procedure mpp_start_update_domain2D_r4_5d
module procedure mpp_start_update_domain2D_r4_2dv
module procedure mpp_start_update_domain2D_r4_3dv
module procedure mpp_start_update_domain2D_r4_4dv
module procedure mpp_start_update_domain2D_r4_5dv
# 1297
module procedure mpp_start_update_domain2D_i4_2d
module procedure mpp_start_update_domain2D_i4_3d
module procedure mpp_start_update_domain2D_i4_4d
module procedure mpp_start_update_domain2D_i4_5d
end interface
interface mpp_complete_update_domains
module procedure mpp_complete_update_domain2D_r8_2d
module procedure mpp_complete_update_domain2D_r8_3d
module procedure mpp_complete_update_domain2D_r8_4d
module procedure mpp_complete_update_domain2D_r8_5d
module procedure mpp_complete_update_domain2D_r8_2dv
module procedure mpp_complete_update_domain2D_r8_3dv
module procedure mpp_complete_update_domain2D_r8_4dv
module procedure mpp_complete_update_domain2D_r8_5dv
# 1318
module procedure mpp_complete_update_domain2D_i8_2d
module procedure mpp_complete_update_domain2D_i8_3d
module procedure mpp_complete_update_domain2D_i8_4d
module procedure mpp_complete_update_domain2D_i8_5d
module procedure mpp_complete_update_domain2D_r4_2d
module procedure mpp_complete_update_domain2D_r4_3d
module procedure mpp_complete_update_domain2D_r4_4d
module procedure mpp_complete_update_domain2D_r4_5d
module procedure mpp_complete_update_domain2D_r4_2dv
module procedure mpp_complete_update_domain2D_r4_3dv
module procedure mpp_complete_update_domain2D_r4_4dv
module procedure mpp_complete_update_domain2D_r4_5dv
# 1340
module procedure mpp_complete_update_domain2D_i4_2d
module procedure mpp_complete_update_domain2D_i4_3d
module procedure mpp_complete_update_domain2D_i4_4d
module procedure mpp_complete_update_domain2D_i4_5d
end interface
interface mpp_start_do_update
module procedure mpp_start_do_update_r8_3d
module procedure mpp_start_do_update_r8_3dv
# 1352
module procedure mpp_start_do_update_i8_3d
module procedure mpp_start_do_update_r4_3d
module procedure mpp_start_do_update_r4_3dv
# 1362
module procedure mpp_start_do_update_i4_3d
end interface
interface mpp_complete_do_update
module procedure mpp_complete_do_update_r8_3d
module procedure mpp_complete_do_update_r8_3dv
# 1371
module procedure mpp_complete_do_update_i8_3d
module procedure mpp_complete_do_update_r4_3d
module procedure mpp_complete_do_update_r4_3dv
# 1381
module procedure mpp_complete_do_update_i4_3d
end interface
interface mpp_create_group_update
module procedure mpp_create_group_update_r4_2d
module procedure mpp_create_group_update_r4_3d
module procedure mpp_create_group_update_r4_4d
module procedure mpp_create_group_update_r4_2dv
module procedure mpp_create_group_update_r4_3dv
module procedure mpp_create_group_update_r4_4dv
module procedure mpp_create_group_update_r8_2d
module procedure mpp_create_group_update_r8_3d
module procedure mpp_create_group_update_r8_4d
module procedure mpp_create_group_update_r8_2dv
module procedure mpp_create_group_update_r8_3dv
module procedure mpp_create_group_update_r8_4dv
end interface mpp_create_group_update
interface mpp_do_group_update
module procedure mpp_do_group_update_r4
module procedure mpp_do_group_update_r8
end interface mpp_do_group_update
interface mpp_start_group_update
module procedure mpp_start_group_update_r4
module procedure mpp_start_group_update_r8
end interface mpp_start_group_update
interface mpp_complete_group_update
module procedure mpp_complete_group_update_r4
module procedure mpp_complete_group_update_r8
end interface mpp_complete_group_update
interface mpp_reset_group_update_field
module procedure mpp_reset_group_update_field_r4_2d
module procedure mpp_reset_group_update_field_r4_3d
module procedure mpp_reset_group_update_field_r4_4d
module procedure mpp_reset_group_update_field_r4_2dv
module procedure mpp_reset_group_update_field_r4_3dv
module procedure mpp_reset_group_update_field_r4_4dv
module procedure mpp_reset_group_update_field_r8_2d
module procedure mpp_reset_group_update_field_r8_3d
module procedure mpp_reset_group_update_field_r8_4d
module procedure mpp_reset_group_update_field_r8_2dv
module procedure mpp_reset_group_update_field_r8_3dv
module procedure mpp_reset_group_update_field_r8_4dv
end interface mpp_reset_group_update_field
!
!
! Set up a domain to pass data between coarse and fine grid of nested model.
!
!
! Set up a domain to pass data between coarse and fine grid of nested model.
! Currently it only support one fine nest region over the corase grid region.
! It supports both serial and concurrent nesting. The serial nesting is that
! both coarse and fine grid are on the exact same processor list. Concurrent
! nesting is that coarse and fine grid are on individual processor list and
! no overlapping. Coarse and fine grid domain need to be defined before
! calling mpp_define_nest_domains. For concurrent nesting, mpp_broadcast
! need to be called to broadcast both fine and coarse grid domain onto
! all the processors.
!
!
!
!
! mpp_update_nest_coarse is used to pass data from fine grid to coarse grid computing domain.
! mpp_update_nest_fine is used to pass data from coarse grid to fine grid halo.
! You may call mpp_get_C2F_index before calling mpp_update_nest_fine to get the index for
! passing data from coarse to fine. You may call mpp_get_F2C_index before calling
! mpp_update_nest_coarse to get the index for passing data from coarse to fine.
!
!
!
!
! NOTE: The following tests are done in test_mpp_domains: the coarse grid is cubic sphere
! grid and the fine grid is a regular-latlon grid (symmetric domain) nested inside
! face 3 of the cubic sphere grid. Tests are done for data at T, E, C, N-cell center.
!
! Below is an example to pass data between fine and coarse grid (More details on how to
! use the nesting domain update are available in routing test_update_nest_domain of
! shared/mpp/test_mpp_domains.F90.
!
!
!
!
! call mpp_define_nest_domains(nest_domain, domain_fine, domain_coarse, tile_fine, tile_coarse,
! istart_fine, iend_fine, jstart_fine, jend_fine,
! istart_coarse, iend_coarse, jstart_coarse, jend_coarse,
! pelist, extra_halo, name)
!
!
!
! Holds the information to pass data between fine and coarse grid.
!
!
! domain for fine grid.
!
!
! domain for coarse grid.
!
!
! tile number of the fine grid. Currently this value should be 1.
!
!
! tile numuber of the coarse grid.
!
!
! index in the fine grid of the nested region
!
!
! index in the coarse grid of the nested region
!
!
! List of PEs to which the domains are to be assigned.
!
!
! optional argument. extra halo for passing data from coarse grid to fine grid.
! Default is 0 and currently only support extra_halo = 0.
!
!
! opitonal argument. Name of the nest domain.
!
!
!
!
! Get the index of the data passed from coarse grid to fine grid.
!
!
! Get the index of the data passed from coarse grid to fine grid.
!
!
! call mpp_get_C2F_index(nest_domain, is_fine, ie_fine, js_fine, je_fine,
! is_coarse, ie_coarse, js_coarse, je_coarse, dir, position)
!
!
!
! Holds the information to pass data between fine and coarse grid.
!
!
! index in the fine grid of the nested region
!
!
! index in the coarse grid of the nested region
!
!
! direction of the halo update. Its value should be WEST, EAST, SOUTH or NORTH.
!
!
! Cell position. It value should be CENTER, EAST, NORTH or SOUTH.
!
!
!
!
! Get the index of the data passed from fine grid to coarse grid.
!
!
! Get the index of the data passed from fine grid to coarse grid.
!
!
! call mpp_get_F2C_index(nest_domain, is_coarse, ie_coarse, js_coarse, je_coarse,
! is_fine, ie_fine, js_fine, je_fine, position)
!
!
!
! Holds the information to pass data between fine and coarse grid.
!
!
! index in the fine grid of the nested region
!
!
! index in the coarse grid of the nested region
!
!
! Cell position. It value should be CENTER, EAST, NORTH or SOUTH.
!
!
!
!
! Pass the data from coarse grid to fill the buffer to be ready to be interpolated
! onto fine grid.
!
!
! Pass the data from coarse grid to fill the buffer to be ready to be interpolated
! onto fine grid.
!
!
! call mpp_update_nest_fine(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer,
! flags, complete, position, extra_halo, name, tile_count)
!
!
!
! field on the model grid.
!
!
! Holds the information to pass data between fine and coarse grid.
!
!
! west side buffer to be filled with data on coarse grid.
!
!
! east side buffer to be filled with data on coarse grid.
!
!
! south side buffer to be filled with data on coarse grid.
!
!
! north side buffer to be filled with data on coarse grid.
!
!
! optional arguments. Specify the direction of fine grid halo buffer to be filled.
! Default value is XUPDATE+YUPDATE.
!
!
! optional argument. When true, do the buffer filling. Default value is true.
!
!
! Cell position. It value should be CENTER, EAST, NORTH or SOUTH. Default is CENTER.
!
!
! optional argument. extra halo for passing data from coarse grid to fine grid.
! Default is 0 and currently only support extra_halo = 0.
!
!
! opitonal argument. Name of the nest domain.
!
!
! optional argument. Used to support multiple-tile-per-pe. default is 1 and currently
! only support tile_count = 1.
!
!
!
!
! Pass the data from fine grid to fill the buffer to be ready to be interpolated
! onto coarse grid.
!
!
! Pass the data from fine grid to fill the buffer to be ready to be interpolated
! onto coarse grid.
!
!
! call mpp_update_nest_coarse(field, nest_domain, buffer, complete, position, name, tile_count)
!
!
!
! field on the model grid.
!
!
! Holds the information to pass data between fine and coarse grid.
!
!
! buffer to be filled with data on coarse grid.
!
!
! optional argument. When true, do the buffer filling. Default value is true.
!
!
! Cell position. It value should be CENTER, EAST, NORTH or SOUTH. Default is CENTER.
!
!
! opitonal argument. Name of the nest domain.
!
!
! optional argument. Used to support multiple-tile-per-pe. default is 1 and currently
! only support tile_count = 1.
!
!
interface mpp_update_nest_fine
module procedure mpp_update_nest_fine_r8_2d
module procedure mpp_update_nest_fine_r8_3d
module procedure mpp_update_nest_fine_r8_4d
# 1689
module procedure mpp_update_nest_fine_i8_2d
module procedure mpp_update_nest_fine_i8_3d
module procedure mpp_update_nest_fine_i8_4d
module procedure mpp_update_nest_fine_r4_2d
module procedure mpp_update_nest_fine_r4_3d
module procedure mpp_update_nest_fine_r4_4d
# 1704
module procedure mpp_update_nest_fine_i4_2d
module procedure mpp_update_nest_fine_i4_3d
module procedure mpp_update_nest_fine_i4_4d
end interface
interface mpp_do_update_nest_fine
module procedure mpp_do_update_nest_fine_r8_3d
# 1714
module procedure mpp_do_update_nest_fine_i8_3d
module procedure mpp_do_update_nest_fine_r4_3d
# 1723
module procedure mpp_do_update_nest_fine_i4_3d
end interface
interface mpp_update_nest_coarse
module procedure mpp_update_nest_coarse_r8_2d
module procedure mpp_update_nest_coarse_r8_3d
module procedure mpp_update_nest_coarse_r8_4d
# 1735
module procedure mpp_update_nest_coarse_i8_2d
module procedure mpp_update_nest_coarse_i8_3d
module procedure mpp_update_nest_coarse_i8_4d
module procedure mpp_update_nest_coarse_r4_2d
module procedure mpp_update_nest_coarse_r4_3d
module procedure mpp_update_nest_coarse_r4_4d
# 1750
module procedure mpp_update_nest_coarse_i4_2d
module procedure mpp_update_nest_coarse_i4_3d
module procedure mpp_update_nest_coarse_i4_4d
end interface
interface mpp_do_update_nest_coarse
module procedure mpp_do_update_nest_coarse_r8_3d
# 1760
module procedure mpp_do_update_nest_coarse_i8_3d
module procedure mpp_do_update_nest_coarse_r4_3d
# 1769
module procedure mpp_do_update_nest_coarse_i4_3d
end interface
interface mpp_broadcast_domain
module procedure mpp_broadcast_domain_1
module procedure mpp_broadcast_domain_2
module procedure mpp_broadcast_domain_ug
end interface
!--------------------------------------------------------------
! for adjoint update
!--------------------------------------------------------------
interface mpp_update_domains_ad
module procedure mpp_update_domains_ad_2D_r8_2d
module procedure mpp_update_domains_ad_2D_r8_3d
module procedure mpp_update_domains_ad_2D_r8_4d
module procedure mpp_update_domains_ad_2D_r8_5d
module procedure mpp_update_domains_ad_2D_r8_2dv
module procedure mpp_update_domains_ad_2D_r8_3dv
module procedure mpp_update_domains_ad_2D_r8_4dv
module procedure mpp_update_domains_ad_2D_r8_5dv
module procedure mpp_update_domains_ad_2D_r4_2d
module procedure mpp_update_domains_ad_2D_r4_3d
module procedure mpp_update_domains_ad_2D_r4_4d
module procedure mpp_update_domains_ad_2D_r4_5d
module procedure mpp_update_domains_ad_2D_r4_2dv
module procedure mpp_update_domains_ad_2D_r4_3dv
module procedure mpp_update_domains_ad_2D_r4_4dv
module procedure mpp_update_domains_ad_2D_r4_5dv
end interface
!
interface mpp_do_update
module procedure mpp_do_update_r8_3d
module procedure mpp_do_update_r8_3dv
# 1810
module procedure mpp_do_update_i8_3d
module procedure mpp_do_update_r4_3d
module procedure mpp_do_update_r4_3dv
# 1820
module procedure mpp_do_update_i4_3d
end interface
interface mpp_do_check
module procedure mpp_do_check_r8_3d
module procedure mpp_do_check_r8_3dv
# 1829
module procedure mpp_do_check_i8_3d
module procedure mpp_do_check_r4_3d
module procedure mpp_do_check_r4_3dv
# 1839
module procedure mpp_do_check_i4_3d
end interface
interface mpp_pass_SG_to_UG
module procedure mpp_pass_SG_to_UG_r8_2d
module procedure mpp_pass_SG_to_UG_r8_3d
module procedure mpp_pass_SG_to_UG_r4_2d
module procedure mpp_pass_SG_to_UG_r4_3d
module procedure mpp_pass_SG_to_UG_i4_2d
module procedure mpp_pass_SG_to_UG_i4_3d
module procedure mpp_pass_SG_to_UG_l4_2d
module procedure mpp_pass_SG_to_UG_l4_3d
end interface
interface mpp_pass_UG_to_SG
module procedure mpp_pass_UG_to_SG_r8_2d
module procedure mpp_pass_UG_to_SG_r8_3d
module procedure mpp_pass_UG_to_SG_r4_2d
module procedure mpp_pass_UG_to_SG_r4_3d
module procedure mpp_pass_UG_to_SG_i4_2d
module procedure mpp_pass_UG_to_SG_i4_3d
module procedure mpp_pass_UG_to_SG_l4_2d
module procedure mpp_pass_UG_to_SG_l4_3d
end interface
!!$ module procedure mpp_do_update_ad_i4_3d
!!$ end interface
!
interface mpp_do_update_ad
module procedure mpp_do_update_ad_r8_3d
module procedure mpp_do_update_ad_r8_3dv
module procedure mpp_do_update_ad_r4_3d
module procedure mpp_do_update_ad_r4_3dv
end interface
!
!
!
! Get the boundary data for symmetric domain when the data is at C, E, or N-cell center
!
!
! mpp_get_boundary is used to get the boundary data for symmetric domain
! when the data is at C, E, or N-cell center. For cubic grid, the data should
! always at C-cell center.
!
!
! call mpp_get_boundary
!
!
! call mpp_get_boundary
!
!
interface mpp_get_boundary
module procedure mpp_get_boundary_r8_2d
module procedure mpp_get_boundary_r8_3d
! module procedure mpp_get_boundary_r8_4d
! module procedure mpp_get_boundary_r8_5d
module procedure mpp_get_boundary_r8_2dv
module procedure mpp_get_boundary_r8_3dv
! module procedure mpp_get_boundary_r8_4dv
! module procedure mpp_get_boundary_r8_5dv
module procedure mpp_get_boundary_r4_2d
module procedure mpp_get_boundary_r4_3d
! module procedure mpp_get_boundary_r4_4d
! module procedure mpp_get_boundary_r4_5d
module procedure mpp_get_boundary_r4_2dv
module procedure mpp_get_boundary_r4_3dv
! module procedure mpp_get_boundary_r4_4dv
! module procedure mpp_get_boundary_r4_5dv
end interface
interface mpp_get_boundary_ad
module procedure mpp_get_boundary_ad_r8_2d
module procedure mpp_get_boundary_ad_r8_3d
module procedure mpp_get_boundary_ad_r8_2dv
module procedure mpp_get_boundary_ad_r8_3dv
module procedure mpp_get_boundary_ad_r4_2d
module procedure mpp_get_boundary_ad_r4_3d
module procedure mpp_get_boundary_ad_r4_2dv
module procedure mpp_get_boundary_ad_r4_3dv
end interface
interface mpp_do_get_boundary
module procedure mpp_do_get_boundary_r8_3d
module procedure mpp_do_get_boundary_r8_3dv
module procedure mpp_do_get_boundary_r4_3d
module procedure mpp_do_get_boundary_r4_3dv
end interface
interface mpp_do_get_boundary_ad
module procedure mpp_do_get_boundary_ad_r8_3d
module procedure mpp_do_get_boundary_ad_r8_3dv
module procedure mpp_do_get_boundary_ad_r4_3d
module procedure mpp_do_get_boundary_ad_r4_3dv
end interface
!
!
! Reorganization of distributed global arrays.
!
!
! mpp_redistribute is used to reorganize a distributed
! array. MPP_TYPE_ can be of type integer,
! complex, or real; of 4-byte or 8-byte kind; of rank
! up to 5.
!
!
! call mpp_redistribute( domain_in, field_in, domain_out, field_out )
!
!
! field_in is dimensioned on the data domain of domain_in.
!
!
! field_out on the data domain of domain_out.
!
!
interface mpp_redistribute
module procedure mpp_redistribute_r8_2D
module procedure mpp_redistribute_r8_3D
module procedure mpp_redistribute_r8_4D
module procedure mpp_redistribute_r8_5D
# 1982
module procedure mpp_redistribute_i8_2D
module procedure mpp_redistribute_i8_3D
module procedure mpp_redistribute_i8_4D
module procedure mpp_redistribute_i8_5D
!!$ module procedure mpp_redistribute_l8_2D
!!$ module procedure mpp_redistribute_l8_3D
!!$ module procedure mpp_redistribute_l8_4D
!!$ module procedure mpp_redistribute_l8_5D
module procedure mpp_redistribute_r4_2D
module procedure mpp_redistribute_r4_3D
module procedure mpp_redistribute_r4_4D
module procedure mpp_redistribute_r4_5D
# 2004
module procedure mpp_redistribute_i4_2D
module procedure mpp_redistribute_i4_3D
module procedure mpp_redistribute_i4_4D
module procedure mpp_redistribute_i4_5D
!!$ module procedure mpp_redistribute_l4_2D
!!$ module procedure mpp_redistribute_l4_3D
!!$ module procedure mpp_redistribute_l4_4D
!!$ module procedure mpp_redistribute_l4_5D
end interface
interface mpp_do_redistribute
module procedure mpp_do_redistribute_r8_3D
# 2019
module procedure mpp_do_redistribute_i8_3D
module procedure mpp_do_redistribute_l8_3D
module procedure mpp_do_redistribute_r4_3D
# 2029
module procedure mpp_do_redistribute_i4_3D
module procedure mpp_do_redistribute_l4_3D
end interface
!
!
! Parallel checking between two ensembles which run
! on different set pes at the same time.
!
!
! There are two forms for the mpp_check_field call. The 2D
! version is generally to be used and 3D version is built by repeated calls to the
! 2D version.
!
!
! call mpp_check_field(field_in, pelist1, pelist2, domain, mesg, &
! w_halo, s_halo, e_halo, n_halo, force_abort )
!
!
! Field to be checked
!
!
! Pelist of the two ensembles to be compared
!
!
! Domain of current pe
!
!
! Message to be printed out
!
!
! Halo size to be checked. Default value is 0.
!
!
! When true, abort program when any difference found. Default value is false.
!
!
interface mpp_check_field
module procedure mpp_check_field_2D
module procedure mpp_check_field_3D
end interface
!***********************************************************************
!
! public interface from mpp_domains_reduce.h
!
!***********************************************************************
!
!
! Fill in a global array from domain-decomposed arrays.
!
!
! mpp_global_field is used to get an entire
! domain-decomposed array on each PE. MPP_TYPE_ can be of type
! complex, integer, logical or real;
! of 4-byte or 8-byte kind; of rank up to 5.
!
! All PEs in a domain decomposition must call
! mpp_global_field, and each will have a complete global field
! at the end. Please note that a global array of rank 3 or higher could
! occupy a lot of memory.
!
!
! call mpp_global_field( domain, local, global, flags )
!
!
!
! local is dimensioned on either the compute domain or the
! data domain of domain.
!
!
! global is dimensioned on the corresponding global domain.
!
!
! flags can be given the value XONLY or
! YONLY, to specify a globalization on one axis only.
!
!
interface mpp_global_field
module procedure mpp_global_field2D_r8_2d
module procedure mpp_global_field2D_r8_3d
module procedure mpp_global_field2D_r8_4d
module procedure mpp_global_field2D_r8_5d
# 2121
module procedure mpp_global_field2D_i8_2d
module procedure mpp_global_field2D_i8_3d
module procedure mpp_global_field2D_i8_4d
module procedure mpp_global_field2D_i8_5d
module procedure mpp_global_field2D_l8_2d
module procedure mpp_global_field2D_l8_3d
module procedure mpp_global_field2D_l8_4d
module procedure mpp_global_field2D_l8_5d
module procedure mpp_global_field2D_r4_2d
module procedure mpp_global_field2D_r4_3d
module procedure mpp_global_field2D_r4_4d
module procedure mpp_global_field2D_r4_5d
# 2143
module procedure mpp_global_field2D_i4_2d
module procedure mpp_global_field2D_i4_3d
module procedure mpp_global_field2D_i4_4d
module procedure mpp_global_field2D_i4_5d
module procedure mpp_global_field2D_l4_2d
module procedure mpp_global_field2D_l4_3d
module procedure mpp_global_field2D_l4_4d
module procedure mpp_global_field2D_l4_5d
end interface
interface mpp_global_field_ad
module procedure mpp_global_field2D_r8_2d_ad
module procedure mpp_global_field2D_r8_3d_ad
module procedure mpp_global_field2D_r8_4d_ad
module procedure mpp_global_field2D_r8_5d_ad
# 2164
module procedure mpp_global_field2D_i8_2d_ad
module procedure mpp_global_field2D_i8_3d_ad
module procedure mpp_global_field2D_i8_4d_ad
module procedure mpp_global_field2D_i8_5d_ad
module procedure mpp_global_field2D_l8_2d_ad
module procedure mpp_global_field2D_l8_3d_ad
module procedure mpp_global_field2D_l8_4d_ad
module procedure mpp_global_field2D_l8_5d_ad
module procedure mpp_global_field2D_r4_2d_ad
module procedure mpp_global_field2D_r4_3d_ad
module procedure mpp_global_field2D_r4_4d_ad
module procedure mpp_global_field2D_r4_5d_ad
# 2186
module procedure mpp_global_field2D_i4_2d_ad
module procedure mpp_global_field2D_i4_3d_ad
module procedure mpp_global_field2D_i4_4d_ad
module procedure mpp_global_field2D_i4_5d_ad
module procedure mpp_global_field2D_l4_2d_ad
module procedure mpp_global_field2D_l4_3d_ad
module procedure mpp_global_field2D_l4_4d_ad
module procedure mpp_global_field2D_l4_5d_ad
end interface
interface mpp_do_global_field
module procedure mpp_do_global_field2D_r8_3d
# 2201
module procedure mpp_do_global_field2D_i8_3d
module procedure mpp_do_global_field2D_l8_3d
module procedure mpp_do_global_field2D_r4_3d
# 2211
module procedure mpp_do_global_field2D_i4_3d
module procedure mpp_do_global_field2D_l4_3d
end interface
interface mpp_do_global_field_a2a
module procedure mpp_do_global_field2D_a2a_r8_3d
# 2220
module procedure mpp_do_global_field2D_a2a_i8_3d
module procedure mpp_do_global_field2D_a2a_l8_3d
module procedure mpp_do_global_field2D_a2a_r4_3d
# 2230
module procedure mpp_do_global_field2D_a2a_i4_3d
module procedure mpp_do_global_field2D_a2a_l4_3d
end interface
interface mpp_global_field_ug
module procedure mpp_global_field2D_ug_r8_2d
module procedure mpp_global_field2D_ug_r8_3d
module procedure mpp_global_field2D_ug_r8_4d
module procedure mpp_global_field2D_ug_r8_5d
module procedure mpp_global_field2D_ug_i8_2d
module procedure mpp_global_field2D_ug_i8_3d
module procedure mpp_global_field2D_ug_i8_4d
module procedure mpp_global_field2D_ug_i8_5d
module procedure mpp_global_field2D_ug_r4_2d
module procedure mpp_global_field2D_ug_r4_3d
module procedure mpp_global_field2D_ug_r4_4d
module procedure mpp_global_field2D_ug_r4_5d
module procedure mpp_global_field2D_ug_i4_2d
module procedure mpp_global_field2D_ug_i4_3d
module procedure mpp_global_field2D_ug_i4_4d
module procedure mpp_global_field2D_ug_i4_5d
end interface
interface mpp_do_global_field_ad
module procedure mpp_do_global_field2D_r8_3d_ad
# 2262
module procedure mpp_do_global_field2D_i8_3d_ad
module procedure mpp_do_global_field2D_l8_3d_ad
module procedure mpp_do_global_field2D_r4_3d_ad
# 2272
module procedure mpp_do_global_field2D_i4_3d_ad
module procedure mpp_do_global_field2D_l4_3d_ad
end interface
!
!
! Global max/min of domain-decomposed arrays.
!
!
! mpp_global_max is used to get the maximum value of a
! domain-decomposed array on each PE. MPP_TYPE_ can be of type
! integer or real; of 4-byte or 8-byte kind; of rank
! up to 5. The dimension of locus must equal the rank of
! field.
!
! All PEs in a domain decomposition must call
! mpp_global_max, and each will have the result upon exit.
!
! The function mpp_global_min, with an identical syntax. is
! also available.
!
!
! mpp_global_max( domain, field, locus )
!
!
!
! field is dimensioned on either the compute domain or the
! data domain of domain.
!
!
! locus, if present, can be used to retrieve the location of
! the maximum (as in the MAXLOC intrinsic of f90).
!
!
interface mpp_global_max
module procedure mpp_global_max_r8_2d
module procedure mpp_global_max_r8_3d
module procedure mpp_global_max_r8_4d
module procedure mpp_global_max_r8_5d
module procedure mpp_global_max_r4_2d
module procedure mpp_global_max_r4_3d
module procedure mpp_global_max_r4_4d
module procedure mpp_global_max_r4_5d
module procedure mpp_global_max_i8_2d
module procedure mpp_global_max_i8_3d
module procedure mpp_global_max_i8_4d
module procedure mpp_global_max_i8_5d
module procedure mpp_global_max_i4_2d
module procedure mpp_global_max_i4_3d
module procedure mpp_global_max_i4_4d
module procedure mpp_global_max_i4_5d
end interface
interface mpp_global_min
module procedure mpp_global_min_r8_2d
module procedure mpp_global_min_r8_3d
module procedure mpp_global_min_r8_4d
module procedure mpp_global_min_r8_5d
module procedure mpp_global_min_r4_2d
module procedure mpp_global_min_r4_3d
module procedure mpp_global_min_r4_4d
module procedure mpp_global_min_r4_5d
module procedure mpp_global_min_i8_2d
module procedure mpp_global_min_i8_3d
module procedure mpp_global_min_i8_4d
module procedure mpp_global_min_i8_5d
module procedure mpp_global_min_i4_2d
module procedure mpp_global_min_i4_3d
module procedure mpp_global_min_i4_4d
module procedure mpp_global_min_i4_5d
end interface
!
!
! Global sum of domain-decomposed arrays.
!
!
! mpp_global_sum is used to get the sum of a
! domain-decomposed array on each PE. MPP_TYPE_ can be of type
! integer, complex, or real; of 4-byte or
! 8-byte kind; of rank up to 5.
!
!
! call mpp_global_sum( domain, field, flags )
!
!
!
! field is dimensioned on either the compute domain or the
! data domain of domain.
!
!
! flags, if present, must have the value
! BITWISE_EXACT_SUM. This produces a sum that is guaranteed to
! produce the identical result irrespective of how the domain is
! decomposed. This method does the sum first along the ranks beyond 2,
! and then calls mpp_global_field to produce a
! global 2D array which is then summed. The default method, which is
! considerably faster, does a local sum followed by mpp_sum across the domain
! decomposition.
!
!
! All PEs in a domain decomposition must call
! mpp_global_sum, and each will have the result upon exit.
!
!
interface mpp_global_sum
module procedure mpp_global_sum_r8_2d
module procedure mpp_global_sum_r8_3d
module procedure mpp_global_sum_r8_4d
module procedure mpp_global_sum_r8_5d
# 2400
module procedure mpp_global_sum_r4_2d
module procedure mpp_global_sum_r4_3d
module procedure mpp_global_sum_r4_4d
module procedure mpp_global_sum_r4_5d
# 2412
module procedure mpp_global_sum_i8_2d
module procedure mpp_global_sum_i8_3d
module procedure mpp_global_sum_i8_4d
module procedure mpp_global_sum_i8_5d
module procedure mpp_global_sum_i4_2d
module procedure mpp_global_sum_i4_3d
module procedure mpp_global_sum_i4_4d
module procedure mpp_global_sum_i4_5d
end interface
!gag
interface mpp_global_sum_tl
module procedure mpp_global_sum_tl_r8_2d
module procedure mpp_global_sum_tl_r8_3d
module procedure mpp_global_sum_tl_r8_4d
module procedure mpp_global_sum_tl_r8_5d
# 2436
module procedure mpp_global_sum_tl_r4_2d
module procedure mpp_global_sum_tl_r4_3d
module procedure mpp_global_sum_tl_r4_4d
module procedure mpp_global_sum_tl_r4_5d
# 2448
module procedure mpp_global_sum_tl_i8_2d
module procedure mpp_global_sum_tl_i8_3d
module procedure mpp_global_sum_tl_i8_4d
module procedure mpp_global_sum_tl_i8_5d
module procedure mpp_global_sum_tl_i4_2d
module procedure mpp_global_sum_tl_i4_3d
module procedure mpp_global_sum_tl_i4_4d
module procedure mpp_global_sum_tl_i4_5d
end interface
!gag
!bnc
interface mpp_global_sum_ad
module procedure mpp_global_sum_ad_r8_2d
module procedure mpp_global_sum_ad_r8_3d
module procedure mpp_global_sum_ad_r8_4d
module procedure mpp_global_sum_ad_r8_5d
# 2473
module procedure mpp_global_sum_ad_r4_2d
module procedure mpp_global_sum_ad_r4_3d
module procedure mpp_global_sum_ad_r4_4d
module procedure mpp_global_sum_ad_r4_5d
# 2485
module procedure mpp_global_sum_ad_i8_2d
module procedure mpp_global_sum_ad_i8_3d
module procedure mpp_global_sum_ad_i8_4d
module procedure mpp_global_sum_ad_i8_5d
module procedure mpp_global_sum_ad_i4_2d
module procedure mpp_global_sum_ad_i4_3d
module procedure mpp_global_sum_ad_i4_4d
module procedure mpp_global_sum_ad_i4_5d
end interface
!bnc
!***********************************************************************
!
! public interface from mpp_domain_util.h
!
!***********************************************************************
!
!
! Retrieve PE number of a neighboring domain.
!
!
! Given a 1-D or 2-D domain decomposition, this call allows users to retrieve
! the PE number of an adjacent PE-domain while taking into account that the
! domain may have holes (masked) and/or have cyclic boundary conditions and/or a
! folded edge. Which PE-domain will be retrived will depend on "direction":
! +1 (right) or -1 (left) for a 1-D domain decomposition and either NORTH, SOUTH,
! EAST, WEST, NORTH_EAST, SOUTH_EAST, SOUTH_WEST, or NORTH_WEST for a 2-D
! decomposition. If no neighboring domain exists (masked domain), then the
! returned "pe" value will be set to NULL_PE.
!
!
! call mpp_get_neighbor_pe( domain1d, direction=+1 , pe)
! call mpp_get_neighbor_pe( domain2d, direction=NORTH, pe)
!
!
interface mpp_get_neighbor_pe
module procedure mpp_get_neighbor_pe_1d
module procedure mpp_get_neighbor_pe_2d
end interface
!
!
! Equality/inequality operators for domaintypes.
!
!
! The module provides public operators to check for
! equality/inequality of domaintypes, e.g:
!
!
! type(domain1D) :: a, b
! type(domain2D) :: c, d
! ...
! if( a.NE.b )then
! ...
! end if
! if( c==d )then
! ...
! end if
!
!
! Domains are considered equal if and only if the start and end
! indices of each of their component global, data and compute domains
! are equal.
!
!
interface operator(.EQ.)
module procedure mpp_domain1D_eq
module procedure mpp_domain2D_eq
module procedure mpp_domainUG_eq
end interface
interface operator(.NE.)
module procedure mpp_domain1D_ne
module procedure mpp_domain2D_ne
module procedure mpp_domainUG_ne
end interface
!
!
! These routines retrieve the axis specifications associated with the compute domains.
!
!
! The domain is a derived type with private elements. These routines
! retrieve the axis specifications associated with the compute domains
! The 2D version of these is a simple extension of 1D.
!
!
! call mpp_get_compute_domain
!
!
interface mpp_get_compute_domain
module procedure mpp_get_compute_domain1D
module procedure mpp_get_compute_domain2D
end interface
!
!
! Retrieve the entire array of compute domain extents associated with a decomposition.
!
!
! Retrieve the entire array of compute domain extents associated with a decomposition.
!
!
! call mpp_get_compute_domains( domain, xbegin, xend, xsize, &
! ybegin, yend, ysize )
!
!
!
!
!
!
interface mpp_get_compute_domains
module procedure mpp_get_compute_domains1D
module procedure mpp_get_compute_domains2D
end interface
!
!
! These routines retrieve the axis specifications associated with the data domains.
!
!
! The domain is a derived type with private elements. These routines
! retrieve the axis specifications associated with the data domains.
! The 2D version of these is a simple extension of 1D.
!
!
! call mpp_get_data_domain
!
!
interface mpp_get_data_domain
module procedure mpp_get_data_domain1D
module procedure mpp_get_data_domain2D
end interface
!
!
! These routines retrieve the axis specifications associated with the global domains.
!
!
! The domain is a derived type with private elements. These routines
! retrieve the axis specifications associated with the global domains.
! The 2D version of these is a simple extension of 1D.
!
!
! call mpp_get_global_domain
!
!
interface mpp_get_global_domain
module procedure mpp_get_global_domain1D
module procedure mpp_get_global_domain2D
end interface
!
!
! These routines retrieve the axis specifications associated with the memory domains.
!
!
! The domain is a derived type with private elements. These routines
! retrieve the axis specifications associated with the memory domains.
! The 2D version of these is a simple extension of 1D.
!
!
! call mpp_get_memory_domain
!
!
interface mpp_get_memory_domain
module procedure mpp_get_memory_domain1D
module procedure mpp_get_memory_domain2D
end interface
interface mpp_get_domain_extents
module procedure mpp_get_domain_extents1D
module procedure mpp_get_domain_extents2D
end interface
!
!
! These routines set the axis specifications associated with the compute domains.
!
!
! The domain is a derived type with private elements. These routines
! set the axis specifications associated with the compute domains
! The 2D version of these is a simple extension of 1D.
!
!
! call mpp_set_compute_domain
!
!
interface mpp_set_compute_domain
module procedure mpp_set_compute_domain1D
module procedure mpp_set_compute_domain2D
end interface
!
!
! These routines set the axis specifications associated with the data domains.
!
!
! The domain is a derived type with private elements. These routines
! set the axis specifications associated with the data domains.
! The 2D version of these is a simple extension of 1D.
!
!
! call mpp_set_data_domain
!
!
interface mpp_set_data_domain
module procedure mpp_set_data_domain1D
module procedure mpp_set_data_domain2D
end interface
!
!
! These routines set the axis specifications associated with the global domains.
!
!
! The domain is a derived type with private elements. These routines
! set the axis specifications associated with the global domains.
! The 2D version of these is a simple extension of 1D.
!
!
! call mpp_set_global_domain
!
!
interface mpp_set_global_domain
module procedure mpp_set_global_domain1D
module procedure mpp_set_global_domain2D
end interface
!
!
! Retrieve list of PEs associated with a domain decomposition.
!
!
! The 1D version of this call returns an array of the PEs assigned to this 1D domain
! decomposition. In addition the optional argument pos may be
! used to retrieve the 0-based position of the domain local to the
! calling PE, i.e domain%list(pos)%pe is the local PE,
! as returned by mpp_pe().
! The 2D version of this call is identical to 1D version.
!
!
!
!
!
interface mpp_get_pelist
module procedure mpp_get_pelist1D
module procedure mpp_get_pelist2D
end interface
!
!
! Retrieve layout associated with a domain decomposition.
!
!
! The 1D version of this call returns the number of divisions that was assigned to this
! decomposition axis. The 2D version of this call returns an array of
! dimension 2 holding the results on two axes.
!
!
! call mpp_get_layout( domain, layout )
!
!
!
!
interface mpp_get_layout
module procedure mpp_get_layout1D
module procedure mpp_get_layout2D
end interface
!
!
! nullify domain list.
!
!
! Nullify domain list. This interface is needed in mpp_domains_test.
! 1-D case can be added in if needed.
!
!
! call mpp_nullify_domain_list( domain)
!
!
!
interface mpp_nullify_domain_list
module procedure nullify_domain2d_list
end interface
! Include variable "version" to be written to log file.
# 1 "../include/file_version.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
# 23
character(len=*), parameter :: version = 'unknown'
# 2778 "../mpp/mpp_domains.F90" 2
public version
contains
# 1 "../mpp/include/mpp_define_nest_domains.inc" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
!#############################################################################
! Currently the contact will be limited to overlap contact.
subroutine mpp_define_nest_domains(nest_domain, domain_fine, domain_coarse, tile_fine, tile_coarse, &
istart_fine, iend_fine, jstart_fine, jend_fine, &
istart_coarse, iend_coarse, jstart_coarse, jend_coarse, &
pelist, extra_halo, name)
type(nest_domain_type), intent(inout) :: nest_domain
type(domain2D), target, intent(in ) :: domain_fine, domain_coarse
integer, intent(in ) :: tile_fine, tile_coarse
integer, intent(in ) :: istart_fine, iend_fine, jstart_fine, jend_fine
integer, intent(in ) :: istart_coarse, iend_coarse, jstart_coarse, jend_coarse
integer, optional, intent(in ) :: pelist(:)
integer, optional, intent(in ) :: extra_halo
character(len=*), optional, intent(in ) :: name
logical :: concurrent
integer :: n
integer :: nx_coarse, ny_coarse
integer :: nx_fine, ny_fine
integer :: x_refine, y_refine
integer :: npes, npes_fine, npes_coarse
integer :: extra_halo_local
integer, allocatable :: pes(:)
integer, allocatable :: pes_coarse(:)
integer, allocatable :: pes_fine(:)
if(PRESENT(name)) then
if(len_trim(name) > NAME_LENGTH) then
call mpp_error(FATAL, "mpp_domains_define.inc(mpp_define_nest_domain): "// &
"the len_trim of optional argument name ="//trim(name)// &
" is greater than NAME_LENGTH, change the argument name or increase NAME_LENGTH")
endif
nest_domain%name = name
endif
extra_halo_local = 0
if(present(extra_halo)) then
if(extra_halo .NE. 0) call mpp_error(FATAL, "mpp_define_nest_domains.inc: only support extra_halo=0, contact developer")
extra_halo_local = extra_halo
endif
nest_domain%tile_fine = tile_fine
nest_domain%tile_coarse = tile_coarse
nest_domain%istart_fine = istart_fine
nest_domain%iend_fine = iend_fine
nest_domain%jstart_fine = jstart_fine
nest_domain%jend_fine = jend_fine
nest_domain%istart_coarse = istart_coarse
nest_domain%iend_coarse = iend_coarse
nest_domain%jstart_coarse = jstart_coarse
nest_domain%jend_coarse = jend_coarse
! since it is overlap contact, ie_fine > is_fine, je_fine > js_fine
! and ie_coarse>is_coarse, je_coarse>js_coarse
! if( tile_fine .NE. 1 ) call mpp_error(FATAL, "mpp_define_nest_domains.inc: only support tile_fine = 1, contact developer")
if( iend_fine .LE. istart_fine .OR. jend_fine .LE. jstart_fine ) then
call mpp_error(FATAL, "mpp_define_nest_domains.inc: ie_fine <= is_fine or je_fine <= js_fine "// &
" for domain "//trim(nest_domain%name) )
endif
if( iend_coarse .LE. istart_coarse .OR. jend_coarse .LE. jstart_coarse ) then
call mpp_error(FATAL, "mpp_define_nest_domains.inc: ie_coarse <= is_coarse or je_coarse <= js_coarse "// &
" for nest domain "//trim(nest_domain%name) )
endif
!--- check the pelist, Either domain_coarse%pelist = pelist or
!--- domain_coarse%pelist + domain_fine%pelist = pelist
if( PRESENT(pelist) )then
allocate( pes(size(pelist(:))) )
pes = pelist
else
allocate( pes(mpp_npes()) )
call mpp_get_current_pelist(pes)
end if
npes = size(pes)
npes_coarse = size(domain_coarse%list(:))
npes_fine = size(domain_fine%list(:))
!--- pes_fine and pes_coarse should be subset of pelist
allocate( pes_coarse(npes_coarse) )
allocate( pes_fine (npes_fine ) )
do n = 1, npes_coarse
pes_coarse(n) = domain_coarse%list(n-1)%pe
if( .NOT. ANY(pes(:) == pes_coarse(n)) ) then
call mpp_error(FATAL, "mpp_domains_define.inc: pelist_coarse is not subset of pelist")
endif
enddo
do n = 1, npes_fine
pes_fine(n) = domain_fine%list(n-1)%pe
if( .NOT. ANY(pes(:) == pes_fine(n)) ) then
call mpp_error(FATAL, "mpp_domains_define.inc: pelist_fine is not subset of pelist")
endif
enddo
allocate(nest_domain%pelist_fine(npes_fine))
allocate(nest_domain%pelist_coarse(npes_coarse))
nest_domain%pelist_fine = pes_fine
nest_domain%pelist_coarse = pes_coarse
nest_domain%is_fine_pe = ANY(pes_fine(:) == mpp_pe())
nest_domain%is_coarse_pe = ANY(pes_coarse(:) == mpp_pe())
!--- We are assuming the fine grid is fully overlapped with coarse grid.
if( nest_domain%is_fine_pe ) then
if( iend_fine - istart_fine + 1 .NE. domain_fine%x(1)%global%size .OR. &
jend_fine - jstart_fine + 1 .NE. domain_fine%y(1)%global%size ) then
call mpp_error(FATAL, "mpp_domains_define.inc: The fine global domain is not covered by coarse domain")
endif
endif
! First computing the send and recv information from find to coarse.
if( npes == npes_coarse ) then
concurrent = .false.
else if( npes_fine + npes_coarse == npes ) then
concurrent = .true.
else
call mpp_error(FATAL, "mpp_domains_define.inc: size(pelist_coarse) .NE. size(pelist) and "// &
"size(pelist_coarse)+size(pelist_fine) .NE. size(pelist)")
endif
!--- to confirm integer refinement.
nx_coarse = iend_coarse - istart_coarse + 1
ny_coarse = jend_coarse - jstart_coarse + 1
nx_fine = iend_fine - istart_fine + 1
ny_fine = jend_fine - jstart_fine + 1
if( mod(nx_fine,nx_coarse) .NE. 0 ) call mpp_error(FATAL, &
"mpp_domains_define.inc: The refinement in x-direction is not integer for nest domain"//trim(nest_domain%name) )
x_refine = nx_fine/nx_coarse
if( mod(ny_fine,ny_coarse) .NE. 0 ) call mpp_error(FATAL, &
"mpp_domains_define.inc: The refinement in y-direction is not integer for nest domain"//trim(nest_domain%name) )
y_refine = ny_fine/ny_coarse
!--- coarse grid and fine grid should be both symmetry or non-symmetry.
if(domain_coarse%symmetry .AND. .NOT. domain_fine%symmetry) then
call mpp_error(FATAL, "mpp_domains_define.inc: coarse grid domain is symmetric, fine grid domain is not")
endif
if(.NOT. domain_coarse%symmetry .AND. domain_fine%symmetry) then
call mpp_error(FATAL, "mpp_domains_define.inc: fine grid domain is symmetric, coarse grid domain is not")
endif
nest_domain%x_refine = x_refine
nest_domain%y_refine = y_refine
nest_domain%domain_fine => domain_fine
nest_domain%domain_coarse => domain_coarse
allocate( nest_domain%C2F_T, nest_domain%C2F_C, nest_domain%C2F_E, nest_domain%C2F_N )
nest_domain%C2F_T%next => NULL()
nest_domain%C2F_C%next => NULL()
nest_domain%C2F_N%next => NULL()
nest_domain%C2F_E%next => NULL()
allocate( nest_domain%F2C_T, nest_domain%F2C_C, nest_domain%F2C_E, nest_domain%F2C_N )
call compute_overlap_fine_to_coarse(nest_domain, nest_domain%F2C_T, CENTER, trim(nest_domain%name)//" T-cell")
call compute_overlap_fine_to_coarse(nest_domain, nest_domain%F2C_E, EAST, trim(nest_domain%name)//" E-cell")
call compute_overlap_fine_to_coarse(nest_domain, nest_domain%F2C_C, CORNER, trim(nest_domain%name)//" C-cell")
call compute_overlap_fine_to_coarse(nest_domain, nest_domain%F2C_N, NORTH, trim(nest_domain%name)//" N-cell")
call compute_overlap_coarse_to_fine(nest_domain, nest_domain%C2F_T, extra_halo_local, CENTER, trim(nest_domain%name)//" T-cell")
call compute_overlap_coarse_to_fine(nest_domain, nest_domain%C2F_E, extra_halo_local, EAST, trim(nest_domain%name)//" E-cell")
call compute_overlap_coarse_to_fine(nest_domain, nest_domain%C2F_C, extra_halo_local, CORNER, trim(nest_domain%name)//" C-cell")
call compute_overlap_coarse_to_fine(nest_domain, nest_domain%C2F_N, extra_halo_local, NORTH, trim(nest_domain%name)//" N-cell")
deallocate(pes, pes_fine, pes_coarse)
end subroutine mpp_define_nest_domains
!###############################################################################
subroutine compute_overlap_coarse_to_fine(nest_domain, overlap, extra_halo, position, name)
type(nest_domain_type), intent(inout) :: nest_domain
type(nestSpec), intent(inout) :: overlap
integer, intent(in ) :: extra_halo
integer, intent(in ) :: position
character(len=*), intent(in ) :: name
type(domain2D), pointer :: domain_fine =>NULL()
type(domain2D), pointer :: domain_coarse=>NULL()
type(overlap_type), allocatable :: overlapList(:)
logical :: is_first
integer :: tile_fine, tile_coarse
integer :: istart_fine, iend_fine, jstart_fine, jend_fine
integer :: istart_coarse, iend_coarse, jstart_coarse, jend_coarse
integer :: whalo, ehalo, shalo, nhalo
integer :: npes, npes_fine, npes_coarse, n, m
integer :: isg_fine, ieg_fine, jsg_fine, jeg_fine
integer :: isc_coarse, iec_coarse, jsc_coarse, jec_coarse
integer :: is_coarse, ie_coarse, js_coarse, je_coarse
integer :: isc_fine, iec_fine, jsc_fine, jec_fine
integer :: isd_fine, ied_fine, jsd_fine, jed_fine
integer :: isc_east, iec_east, jsc_east, jec_east
integer :: isc_west, iec_west, jsc_west, jec_west
integer :: isc_south, iec_south, jsc_south, jec_south
integer :: isc_north, iec_north, jsc_north, jec_north
integer :: x_refine, y_refine, ishift, jshift
integer :: nsend, nrecv, dir, from_pe, l
integer :: is, ie, js, je, msgsize
integer, allocatable :: msg1(:), msg2(:)
integer, allocatable :: isl_coarse(:), iel_coarse(:), jsl_coarse(:), jel_coarse(:)
integer, allocatable :: isl_fine(:), iel_fine(:), jsl_fine(:), jel_fine(:)
integer :: outunit
outunit = stdout()
domain_fine => nest_domain%domain_fine
domain_coarse => nest_domain%domain_coarse
call mpp_get_domain_shift (domain_coarse, ishift, jshift, position)
tile_fine = nest_domain%tile_fine
tile_coarse = nest_domain%tile_coarse
istart_fine = nest_domain%istart_fine
iend_fine = nest_domain%iend_fine
jstart_fine = nest_domain%jstart_fine
jend_fine = nest_domain%jend_fine
istart_coarse = nest_domain%istart_coarse
iend_coarse = nest_domain%iend_coarse + ishift
jstart_coarse = nest_domain%jstart_coarse
jend_coarse = nest_domain%jend_coarse + jshift
x_refine = nest_domain%x_refine
y_refine = nest_domain%y_refine
npes = mpp_npes()
npes_fine = size(nest_domain%pelist_fine(:))
npes_coarse = size(nest_domain%pelist_coarse(:))
whalo = domain_fine%whalo + extra_halo
ehalo = domain_fine%ehalo + extra_halo
shalo = domain_fine%shalo + extra_halo
nhalo = domain_fine%nhalo + extra_halo
allocate(isl_coarse(npes_coarse), iel_coarse(npes_coarse))
allocate(jsl_coarse(npes_coarse), jel_coarse(npes_coarse))
allocate(isl_fine (npes_fine ), iel_fine (npes_fine ))
allocate(jsl_fine (npes_fine ), jel_fine (npes_fine ))
call mpp_get_global_domain (domain_fine, xbegin=isg_fine, xend=ieg_fine, &
ybegin=jsg_fine, yend=jeg_fine, position=position)
call mpp_get_compute_domain (domain_coarse, xbegin=isc_coarse, xend=iec_coarse, &
ybegin=jsc_coarse, yend=jec_coarse, position=position)
call mpp_get_compute_domain (domain_fine, xbegin=isc_fine, xend=iec_fine, &
ybegin=jsc_fine, yend=jec_fine, position=position)
call mpp_get_compute_domains(domain_coarse, xbegin=isl_coarse, xend=iel_coarse, &
ybegin=jsl_coarse, yend=jel_coarse, position=position)
call mpp_get_compute_domains(domain_fine, xbegin=isl_fine, xend=iel_fine, &
ybegin=jsl_fine, yend=jel_fine, position=position)
overlap%extra_halo = extra_halo
if( nest_domain%is_coarse_pe ) then
overlap%xbegin = isc_coarse - domain_coarse%whalo
overlap%xend = iec_coarse + domain_coarse%ehalo
overlap%ybegin = jsc_coarse - domain_coarse%shalo
overlap%yend = jec_coarse + domain_coarse%nhalo
else
overlap%xbegin = isc_fine - domain_fine%whalo
overlap%xend = iec_fine + domain_fine%ehalo
overlap%ybegin = jsc_fine - domain_fine%shalo
overlap%yend = jec_fine + domain_fine%nhalo
endif
isd_fine = isc_fine - whalo
ied_fine = iec_fine + ehalo
jsd_fine = jsc_fine - shalo
jed_fine = jec_fine + nhalo
overlap%nsend = 0
overlap%nrecv = 0
call init_index_type(overlap%west)
call init_index_type(overlap%east)
call init_index_type(overlap%south)
call init_index_type(overlap%north)
!--- first compute the halo region and corresponding index in coarse grid.
if( nest_domain%is_fine_pe ) then
if( ieg_fine == iec_fine .AND. domain_fine%tile_id(1) == tile_fine ) then ! east halo
is_coarse = iend_coarse
ie_coarse = iend_coarse + ehalo
js_coarse = jstart_coarse + ( jsc_fine - jsg_fine )/y_refine
je_coarse = jstart_coarse + ( jec_fine - jsg_fine )/y_refine
js_coarse = js_coarse - shalo
je_coarse = je_coarse + nhalo
overlap%east%is_me = iec_fine + 1
overlap%east%ie_me = ied_fine
overlap%east%js_me = jsd_fine
overlap%east%je_me = jed_fine
overlap%east%is_you = is_coarse
overlap%east%ie_you = ie_coarse
overlap%east%js_you = js_coarse
overlap%east%je_you = je_coarse
endif
if( jsg_fine == jsc_fine .AND. domain_fine%tile_id(1) == tile_fine) then ! south
is_coarse = istart_coarse + ( isc_fine - isg_fine )/x_refine
ie_coarse = istart_coarse + ( iec_fine - isg_fine )/x_refine
is_coarse = is_coarse - whalo
ie_coarse = ie_coarse + ehalo
js_coarse = jstart_coarse - shalo
je_coarse = jstart_coarse
overlap%south%is_me = isd_fine
overlap%south%ie_me = ied_fine
overlap%south%js_me = jsd_fine
overlap%south%je_me = jsc_fine-1
overlap%south%is_you = is_coarse
overlap%south%ie_you = ie_coarse
overlap%south%js_you = js_coarse
overlap%south%je_you = je_coarse
endif
if( isg_fine == isc_fine .AND. domain_fine%tile_id(1) == tile_fine) then ! west
is_coarse = istart_coarse - whalo
ie_coarse = istart_coarse
js_coarse = jstart_coarse + ( jsc_fine - jsg_fine )/y_refine
je_coarse = jstart_coarse + ( jec_fine - jsg_fine )/y_refine
js_coarse = js_coarse - shalo
je_coarse = je_coarse + nhalo
overlap%west%is_me = isd_fine
overlap%west%ie_me = isc_fine-1
overlap%west%js_me = jsd_fine
overlap%west%je_me = jed_fine
overlap%west%is_you = is_coarse
overlap%west%ie_you = ie_coarse
overlap%west%js_you = js_coarse
overlap%west%je_you = je_coarse
endif
if( jeg_fine == jec_fine .AND. domain_fine%tile_id(1) == tile_fine) then ! north
is_coarse = istart_coarse + ( isc_fine - isg_fine )/x_refine
ie_coarse = istart_coarse + ( iec_fine - isg_fine )/x_refine
is_coarse = is_coarse - whalo
ie_coarse = ie_coarse + ehalo
js_coarse = jend_coarse
je_coarse = jend_coarse + nhalo
overlap%north%is_me = isd_fine
overlap%north%ie_me = ied_fine
overlap%north%js_me = jec_fine+1
overlap%north%je_me = jed_fine
overlap%north%is_you = is_coarse
overlap%north%ie_you = ie_coarse
overlap%north%js_you = js_coarse
overlap%north%je_you = je_coarse
endif
allocate(overLaplist(npes_coarse))
!-------------------------------------------------------------------------
!
! Receiving
!
!-------------------------------------------------------------------------
!--- loop through coarse pelist
nrecv = 0
do n = 1, npes_coarse
if( domain_coarse%list(n-1)%tile_id(1) .NE. tile_coarse ) cycle
is_first = .true.
!--- east halo receiving
is_coarse = overlap%east%is_you
ie_coarse = overlap%east%ie_you
js_coarse = overlap%east%js_you
je_coarse = overlap%east%je_you
if( je_coarse .GE. js_coarse .AND. ie_coarse .GE. is_coarse ) then
dir = 1
is_coarse = max( is_coarse, isl_coarse(n) )
ie_coarse = min( ie_coarse, iel_coarse(n) )
js_coarse = max( js_coarse, jsl_coarse(n) )
je_coarse = min( je_coarse, jel_coarse(n) )
if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
if(is_first) then
nrecv = nrecv + 1
call allocate_nest_overlap(overLaplist(nrecv), MAXOVERLAP)
is_first = .false.
endif
call insert_nest_overlap(overLaplist(nrecv), nest_domain%pelist_coarse(n), &
is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
endif
endif
!--- south halo receiving
is_coarse = overlap%south%is_you
ie_coarse = overlap%south%ie_you
js_coarse = overlap%south%js_you
je_coarse = overlap%south%je_you
if( je_coarse .GE. js_coarse .AND. ie_coarse .GE. is_coarse ) then
dir = 3
is_coarse = max( is_coarse, isl_coarse(n) )
ie_coarse = min( ie_coarse, iel_coarse(n) )
js_coarse = max( js_coarse, jsl_coarse(n) )
je_coarse = min( je_coarse, jel_coarse(n) )
if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
if(is_first) then
nrecv = nrecv + 1
call allocate_nest_overlap(overLaplist(nrecv), MAXOVERLAP)
is_first = .false.
endif
call insert_nest_overlap(overLaplist(nrecv), nest_domain%pelist_coarse(n), &
is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
endif
endif
!--- west halo receiving
is_coarse = overlap%west%is_you
ie_coarse = overlap%west%ie_you
js_coarse = overlap%west%js_you
je_coarse = overlap%west%je_you
if( je_coarse .GE. js_coarse .AND. ie_coarse .GE. is_coarse ) then
dir = 5
is_coarse = max( is_coarse, isl_coarse(n) )
ie_coarse = min( ie_coarse, iel_coarse(n) )
js_coarse = max( js_coarse, jsl_coarse(n) )
je_coarse = min( je_coarse, jel_coarse(n) )
if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
if(is_first) then
nrecv = nrecv + 1
call allocate_nest_overlap(overLaplist(nrecv), MAXOVERLAP)
is_first = .false.
endif
call insert_nest_overlap(overLaplist(nrecv), nest_domain%pelist_coarse(n), &
is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
endif
endif
!--- north halo receiving
is_coarse = overlap%north%is_you
ie_coarse = overlap%north%ie_you
js_coarse = overlap%north%js_you
je_coarse = overlap%north%je_you
if( je_coarse .GE. js_coarse .AND. ie_coarse .GE. is_coarse ) then
dir = 7
is_coarse = max( is_coarse, isl_coarse(n) )
ie_coarse = min( ie_coarse, iel_coarse(n) )
js_coarse = max( js_coarse, jsl_coarse(n) )
je_coarse = min( je_coarse, jel_coarse(n) )
if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
if(is_first) then
nrecv = nrecv + 1
call allocate_nest_overlap(overLaplist(nrecv), MAXOVERLAP)
is_first = .false.
endif
call insert_nest_overlap(overLaplist(nrecv), nest_domain%pelist_coarse(n), &
is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
endif
endif
enddo
!--- copy the overlapping into nest_domain data.
overlap%nrecv = nrecv
if( nrecv > 0 ) then
allocate(overlap%recv(nrecv))
do n = 1, nrecv
call copy_nest_overlap( overlap%recv(n), overLaplist(n) )
call deallocate_nest_overlap( overLaplist(n) )
enddo
endif
if(allocated(overlaplist))deallocate(overlapList)
endif
!-----------------------------------------------------------------------
!
! Sending
!
!-----------------------------------------------------------------------
if( nest_domain%is_coarse_pe ) then
nsend = 0
if(domain_coarse%tile_id(1) == tile_coarse) then
isc_east = iend_coarse
iec_east = iend_coarse + ehalo
jsc_east = jstart_coarse - shalo
jec_east = jend_coarse + nhalo
isc_east = max(isc_coarse, isc_east)
iec_east = min(iec_coarse, iec_east)
jsc_east = max(jsc_coarse, jsc_east)
jec_east = min(jec_coarse, jec_east)
isc_south = istart_coarse - whalo
iec_south = iend_coarse + ehalo
jsc_south = jstart_coarse - shalo
jec_south = jstart_coarse
isc_south = max(isc_coarse, isc_south)
iec_south = min(iec_coarse, iec_south)
jsc_south = max(jsc_coarse, jsc_south)
jec_south = min(jec_coarse, jec_south)
isc_west = istart_coarse - whalo
iec_west = istart_coarse
jsc_west = jstart_coarse - shalo
jec_west = jend_coarse + nhalo
isc_west = max(isc_coarse, isc_west)
iec_west = min(iec_coarse, iec_west)
jsc_west = max(jsc_coarse, jsc_west)
jec_west = min(jec_coarse, jec_west)
isc_north = istart_coarse - whalo
iec_north = iend_coarse + ehalo
jsc_north = jend_coarse
jec_north = jend_coarse + nhalo
isc_north = max(isc_coarse, isc_north)
iec_north = min(iec_coarse, iec_north)
jsc_north = max(jsc_coarse, jsc_north)
jec_north = min(jec_coarse, jec_north)
else
isc_west = 0; iec_west = -1; jsc_west = 0; jec_west = -1
isc_east = 0; iec_east = -1; jsc_east = 0; jec_west = -1
isc_south = 0; iec_south = -1; jsc_south = 0; jec_south = -1
isc_north = 0; iec_north = -1; jsc_north = 0; jec_north = -1
endif
allocate(overLaplist(npes_fine))
do n = 1, npes_fine
if( domain_fine%list(n-1)%tile_id(1) .NE. tile_fine ) cycle
is_first = .true.
!--- to_pe's east
if( ieg_fine == iel_fine(n) ) then
dir = 1
if( iec_east .GE. isc_east .AND. jec_east .GE. jsc_east ) then
is_coarse = iend_coarse
ie_coarse = iend_coarse + ehalo
js_coarse = jstart_coarse + ( jsl_fine(n) - jsg_fine )/y_refine
je_coarse = jstart_coarse + ( jel_fine(n) - jsg_fine )/y_refine
js_coarse = js_coarse - shalo
je_coarse = je_coarse + nhalo
is_coarse = max(isc_east, is_coarse)
ie_coarse = min(iec_east, ie_coarse)
js_coarse = max(jsc_east, js_coarse)
je_coarse = min(jec_east, je_coarse)
if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
if(is_first) then
nsend = nsend + 1
call allocate_nest_overlap(overLaplist(nsend), MAXOVERLAP)
is_first = .false.
endif
call insert_nest_overlap(overLaplist(nsend), nest_domain%pelist_fine(n), &
is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
endif
endif
endif
!--- to_pe's south
if( jsg_fine == jsl_fine(n) ) then
dir = 3
if( iec_south .GE. isc_south .AND. jec_south .GE. jsc_south ) then
is_coarse = istart_coarse + ( isl_fine(n) - isg_fine )/x_refine
ie_coarse = istart_coarse + ( iel_fine(n) - isg_fine )/x_refine
is_coarse = is_coarse - shalo
ie_coarse = ie_coarse + nhalo
js_coarse = jstart_coarse - shalo
je_coarse = jstart_coarse
is_coarse = max(isc_south, is_coarse)
ie_coarse = min(iec_south, ie_coarse)
js_coarse = max(jsc_south, js_coarse)
je_coarse = min(jec_south, je_coarse)
if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
if(is_first) then
nsend = nsend + 1
call allocate_nest_overlap(overLaplist(nsend), MAXOVERLAP)
is_first = .false.
endif
call insert_nest_overlap(overLaplist(nsend), nest_domain%pelist_fine(n), &
is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
endif
endif
endif
!--- to_pe's west
if( isg_fine == isl_fine(n) ) then
dir = 5
if( iec_west .GE. isc_west .AND. jec_west .GE. jsc_west ) then
is_coarse = istart_coarse - whalo
ie_coarse = istart_coarse
js_coarse = jstart_coarse + ( jsl_fine(n) - jsg_fine )/y_refine
je_coarse = jstart_coarse + ( jel_fine(n) - jsg_fine )/y_refine
js_coarse = js_coarse - shalo
je_coarse = je_coarse + nhalo
is_coarse = max(isc_west, is_coarse)
ie_coarse = min(iec_west, ie_coarse)
js_coarse = max(jsc_west, js_coarse)
je_coarse = min(jec_west, je_coarse)
if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
if(is_first) then
nsend = nsend + 1
call allocate_nest_overlap(overLaplist(nsend), MAXOVERLAP)
is_first = .false.
endif
call insert_nest_overlap(overLaplist(nsend), nest_domain%pelist_fine(n), &
is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
endif
endif
endif
!--- to_pe's north
if( jeg_fine == jel_fine(n) ) then
dir = 7
if( iec_north .GE. isc_north .AND. jec_north .GE. jsc_north ) then
is_coarse = istart_coarse + ( isl_fine(n) - isg_fine )/x_refine
ie_coarse = istart_coarse + ( iel_fine(n) - isg_fine )/x_refine
is_coarse = is_coarse - shalo
ie_coarse = ie_coarse + nhalo
js_coarse = jend_coarse
je_coarse = jend_coarse + nhalo
is_coarse = max(isc_north, is_coarse)
ie_coarse = min(iec_north, ie_coarse)
js_coarse = max(jsc_north, js_coarse)
je_coarse = min(jec_north, je_coarse)
if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
if(is_first) then
nsend = nsend + 1
call allocate_nest_overlap(overLaplist(nsend), MAXOVERLAP)
is_first = .false.
endif
call insert_nest_overlap(overLaplist(nsend), nest_domain%pelist_fine(n), &
is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
endif
endif
endif
enddo
!--- copy the overlapping into nest_domain data.
overlap%nsend = nsend
if( nsend > 0 ) then
allocate(overlap%send(nsend))
do n = 1, nsend
call copy_nest_overlap( overlap%send(n), overLaplist(n) )
call deallocate_nest_overlap( overLaplist(n) )
enddo
endif
if(allocated(overlaplist))deallocate(overLaplist)
endif
deallocate(isl_coarse, iel_coarse, jsl_coarse, jel_coarse)
deallocate(isl_fine, iel_fine, jsl_fine, jel_fine)
if(debug_message_passing) then
allocate(msg1(0:npes-1), msg2(0:npes-1) )
msg1 = 0
msg2 = 0
do m = 1, overlap%nrecv
msgsize = 0
do n = 1, overlap%recv(m)%count
is = overlap%recv(m)%is(n); ie = overlap%recv(m)%ie(n)
js = overlap%recv(m)%js(n); je = overlap%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
from_pe = overlap%recv(m)%pe
l = from_pe-mpp_root_pe()
call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1)
msg2(l) = msgsize
enddo
do m = 1, overlap%nsend
msgsize = 0
do n = 1, overlap%send(m)%count
is = overlap%send(m)%is(n); ie = overlap%send(m)%ie(n)
js = overlap%send(m)%js(n); je = overlap%send(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
call mpp_send( msgsize, plen=1, to_pe=overlap%send(m)%pe, tag=COMM_TAG_1)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, npes-1
if(msg1(m) .NE. msg2(m)) then
print*, "compute_overlap_coarse_to_fine: My pe = ", mpp_pe(), ",name =", trim(name),", from pe=", &
m+mpp_root_pe(), ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_compute_overlap_coarse_to_fine: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from compute_overlap_coarse_to_fine: "// &
"message sizes are matched between send and recv for "//trim(name)
deallocate(msg1, msg2)
endif
end subroutine compute_overlap_coarse_to_fine
!###############################################################################
!-- This routine will compute the send and recv information between overlapped nesting
!-- region. The data is assumed on T-cell center.
subroutine compute_overlap_fine_to_coarse(nest_domain, overlap, position, name)
type(nest_domain_type), intent(inout) :: nest_domain
type(nestSpec), intent(inout) :: overlap
integer, intent(in ) :: position
character(len=*), intent(in ) :: name
!--- local variables
type(domain2D), pointer :: domain_fine =>NULL()
type(domain2D), pointer :: domain_coarse=>NULL()
type(overlap_type), allocatable :: overlapList(:)
logical :: is_first
integer :: tile_fine, tile_coarse
integer :: istart_fine, iend_fine, jstart_fine, jend_fine
integer :: istart_coarse, iend_coarse, jstart_coarse, jend_coarse
integer :: whalo, ehalo, shalo, nhalo
integer :: npes, npes_fine, npes_coarse, n, m
integer :: isg_fine, ieg_fine, jsg_fine, jeg_fine
integer :: isc_coarse, iec_coarse, jsc_coarse, jec_coarse
integer :: is_coarse, ie_coarse, js_coarse, je_coarse
integer :: is_fine, ie_fine, js_fine, je_fine
integer :: isc_fine, iec_fine, jsc_fine, jec_fine
integer :: is_you, ie_you, js_you, je_you
integer :: x_refine, y_refine, ishift, jshift
integer :: nsend, nrecv, dir, from_pe, l
integer :: is, ie, js, je, msgsize
integer, allocatable :: msg1(:), msg2(:)
integer, allocatable :: isl_coarse(:), iel_coarse(:), jsl_coarse(:), jel_coarse(:)
integer, allocatable :: isl_fine(:), iel_fine(:), jsl_fine(:), jel_fine(:)
integer :: outunit
outunit = stdout()
domain_fine => nest_domain%domain_fine
domain_coarse => nest_domain%domain_coarse
tile_fine = nest_domain%tile_fine
tile_coarse = nest_domain%tile_coarse
istart_fine = nest_domain%istart_fine
iend_fine = nest_domain%iend_fine
jstart_fine = nest_domain%jstart_fine
jend_fine = nest_domain%jend_fine
istart_coarse = nest_domain%istart_coarse
iend_coarse = nest_domain%iend_coarse
jstart_coarse = nest_domain%jstart_coarse
jend_coarse = nest_domain%jend_coarse
x_refine = nest_domain%x_refine
y_refine = nest_domain%y_refine
npes = mpp_npes()
npes_fine = size(nest_domain%pelist_fine(:))
npes_coarse = size(nest_domain%pelist_coarse(:))
allocate(isl_coarse(npes_coarse), iel_coarse(npes_coarse) )
allocate(jsl_coarse(npes_coarse), jel_coarse(npes_coarse) )
allocate(isl_fine(npes_fine), iel_fine(npes_fine) )
allocate(jsl_fine(npes_fine), jel_fine(npes_fine) )
call mpp_get_compute_domain (domain_coarse, xbegin=isc_coarse, xend=iec_coarse, ybegin=jsc_coarse, yend=jec_coarse)
call mpp_get_compute_domain (domain_fine, xbegin=isc_fine, xend=iec_fine, ybegin=jsc_fine, yend=jec_fine)
call mpp_get_compute_domains(domain_coarse, xbegin=isl_coarse, xend=iel_coarse, ybegin=jsl_coarse, yend=jel_coarse)
call mpp_get_compute_domains(domain_fine, xbegin=isl_fine, xend=iel_fine, ybegin=jsl_fine, yend=jel_fine)
call mpp_get_domain_shift (domain_coarse, ishift, jshift, position)
overlap%center%is_you = 0; overlap%center%ie_you = -1
overlap%center%js_you = 0; overlap%center%je_you = -1
if( nest_domain%is_fine_pe ) then
overlap%xbegin = isc_fine - domain_fine%whalo
overlap%xend = iec_fine + domain_fine%ehalo + ishift
overlap%ybegin = jsc_fine - domain_fine%shalo
overlap%yend = jec_fine + domain_fine%nhalo + jshift
else
overlap%xbegin = isc_coarse - domain_coarse%whalo
overlap%xend = iec_coarse + domain_coarse%ehalo + ishift
overlap%ybegin = jsc_coarse - domain_coarse%shalo
overlap%yend = jec_coarse + domain_coarse%nhalo + jshift
endif
overlap%nsend = 0
overlap%nrecv = 0
call init_index_type(overlap%center)
!-----------------------------------------------------------------------------------------
!
! Sending From fine to coarse.
! compute the send information from fine grid to coarse grid. This will only need to send
! the internal of fine grid to coarse grid.
!-----------------------------------------------------------------------------------------
nsend = 0
if( nest_domain%is_fine_pe ) then
allocate(overLaplist(npes_coarse))
do n = 1, npes_coarse
if(domain_coarse%list(n-1)%tile_id(1) == tile_coarse) then
is_coarse = max( istart_coarse, isl_coarse(n) )
ie_coarse = min( iend_coarse, iel_coarse(n) )
js_coarse = max( jstart_coarse, jsl_coarse(n) )
je_coarse = min( jend_coarse, jel_coarse(n) )
if(ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
is_fine = istart_fine + (is_coarse - istart_coarse) * x_refine
ie_fine = istart_fine + (ie_coarse - istart_coarse + 1) * x_refine - 1
js_fine = jstart_fine + (js_coarse - jstart_coarse) * y_refine
je_fine = jstart_fine + (je_coarse - jstart_coarse + 1) * y_refine - 1
dir = 0
is_fine = max(isc_fine, is_fine)
ie_fine = min(iec_fine, ie_fine)
js_fine = max(jsc_fine, js_fine)
je_fine = min(jec_fine, je_fine)
if( ie_fine .GE. is_fine .AND. je_fine .GE. js_fine ) then
nsend = nsend + 1
call allocate_nest_overlap(overLaplist(nsend), MAXOVERLAP)
call insert_nest_overlap(overLaplist(nsend), nest_domain%pelist_coarse(n), &
is_fine, ie_fine+ishift, js_fine, je_fine+jshift, dir, ZERO)
endif
endif
endif
enddo
overlap%nsend = nsend
if(nsend > 0) then
allocate(overlap%send(nsend))
do n = 1, nsend
call copy_nest_overlap(overlap%send(n), overlaplist(n) )
call deallocate_nest_overlap(overlaplist(n))
enddo
endif
if(allocated(overlaplist))deallocate(overlaplist)
endif
!--------------------------------------------------------------------------------
! compute the recv information from fine grid to coarse grid. This will only need to send
! the internal of fine grid to coarse grid.
!--------------------------------------------------------------------------------
if( nest_domain%is_coarse_pe ) then
nrecv = 0
if(domain_coarse%tile_id(1) == tile_coarse) then
is_coarse = max( istart_coarse, isc_coarse )
ie_coarse = min( iend_coarse, iec_coarse )
js_coarse = max( jstart_coarse, jsc_coarse )
je_coarse = min( jend_coarse, jec_coarse )
if(ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
is_fine = istart_fine + (is_coarse - istart_coarse) * x_refine
ie_fine = istart_fine + (ie_coarse - istart_coarse + 1) * x_refine - 1
js_fine = jstart_fine + (js_coarse - jstart_coarse) * y_refine
je_fine = jstart_fine + (je_coarse - jstart_coarse + 1) * y_refine - 1
overlap%center%is_me = is_coarse; overlap%center%ie_me = ie_coarse + ishift
overlap%center%js_me = js_coarse; overlap%center%je_me = je_coarse + jshift
overlap%center%is_you = is_fine; overlap%center%ie_you = ie_fine + ishift
overlap%center%js_you = js_fine; overlap%center%je_you = je_fine + jshift
dir = 0
allocate(overLaplist(npes_fine))
do n = 1, npes_fine
is_you = max(isl_fine(n), is_fine)
ie_you = min(iel_fine(n), ie_fine)
js_you = max(jsl_fine(n), js_fine)
je_you = min(jel_fine(n), je_fine)
if( ie_you .GE. is_you .AND. je_you .GE. js_you ) then
nrecv = nrecv + 1
call allocate_nest_overlap(overLaplist(nrecv), MAXOVERLAP)
call insert_nest_overlap(overLaplist(nrecv), nest_domain%pelist_fine(n), &
is_you, ie_you+ishift, js_you, je_you+jshift , dir, ZERO)
endif
enddo
endif
endif
overlap%nrecv = nrecv
if(nrecv > 0) then
allocate(overlap%recv(nrecv))
do n = 1, nrecv
call copy_nest_overlap(overlap%recv(n), overlaplist(n) )
call deallocate_nest_overlap( overLaplist(n) )
enddo
endif
if(allocated(overlaplist))deallocate(overlaplist)
endif
deallocate(isl_coarse, iel_coarse, jsl_coarse, jel_coarse)
deallocate(isl_fine, iel_fine, jsl_fine, jel_fine)
if(debug_message_passing) then
allocate(msg1(0:npes-1), msg2(0:npes-1) )
msg1 = 0
msg2 = 0
do m = 1, overlap%nrecv
msgsize = 0
do n = 1, overlap%recv(m)%count
is = overlap%recv(m)%is(n); ie = overlap%recv(m)%ie(n)
js = overlap%recv(m)%js(n); je = overlap%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
from_pe = overlap%recv(m)%pe
l = from_pe-mpp_root_pe()
call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2)
msg2(l) = msgsize
enddo
do m = 1, overlap%nsend
msgsize = 0
do n = 1, overlap%send(m)%count
is = overlap%send(m)%is(n); ie = overlap%send(m)%ie(n)
js = overlap%send(m)%js(n); je = overlap%send(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
call mpp_send( msgsize, plen=1, to_pe=overlap%send(m)%pe, tag=COMM_TAG_2)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, npes-1
if(msg1(m) .NE. msg2(m)) then
print*, "compute_overlap_fine_to_coarse: My pe = ", mpp_pe(), ",name =", trim(name),", from pe=", &
m+mpp_root_pe(), ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_compute_overlap_coarse_to_fine: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from compute_overlap_fine_to_coarse: "// &
"message sizes are matched between send and recv for "//trim(name)
deallocate(msg1, msg2)
endif
end subroutine compute_overlap_fine_to_coarse
!!$subroutine set_overlap_fine_to_coarse(nest_domain, position)
!!$ type(nest_domain_type), intent(inout) :: nest_domain
!!$ integer, intent(in ) :: position
!!$
!!$
!!$ call mpp_get_domain_shift(domain, ishift, jshift, position)
!!$ update_in => nest_domain%F2C_T
!!$ select case(position)
!!$ case (CORNER)
!!$ update_out => nest_domain%F2C_C
!!$ case (EAST)
!!$ update_out => nest_domain%F2C_E
!!$ case (NORTH)
!!$ update_out => nest_domain%F2C_N
!!$ case default
!!$ call mpp_error(FATAL, "mpp_domains_define.inc(set_overlap_fine_to_coarse): the position should be CORNER, EAST or NORTH")
!!$ end select
!!$
!!$ nsend = update_in%nsend
!!$ nrecv = update_in%nrecv
!!$ update_out%pe = update_in%pe
!!$ update_out%nsend = nsend
!!$ update_out%nrecv = nrecv
!!$
!!$ if( nsend > 0 ) then
!!$ allocate(update_out%send(nsend))
!!$ do n = 1, nsend
!!$ count = update_in%send(n)%count
!!$ call allocate_overlap_type(update_out%send(n), update_in%count, overlap_in%type)
!!$ do m = 1, count
!!$ update_out%send(n)%is (count) = update_in%send(n)%is (count)
!!$ update_out%send(n)%ie (count) = update_in%send(n)%ie (count) + ishift
!!$ update_out%send(n)%js (count) = update_in%send(n)%js (count)
!!$ update_out%send(n)%je (count) = update_in%send(n)%je (count) + jshift
!!$ update_out%send(n)%tileMe (count) = update_in%send(n)%tileMe (count)
!!$ update_out%send(n)%dir (count) = update_in%send(n)%dir (count)
!!$ update_out%send(n)%rotation(count) = update_in%send(n)%rotation(count)
!!$ enddo
!!$ enddo
!!$ endif
!!$
!!$
!!$ if( nrecv > 0 ) then
!!$ allocate(update_out%recv(nrecv))
!!$ do n = 1, nrecv
!!$ count = update_in%recv(n)%count
!!$ call allocate_overlap_type(update_out%recv(n), update_in%count, overlap_in%type)
!!$ do m = 1, count
!!$ update_out%recv(n)%is (count) = update_in%recv(n)%is (count)
!!$ update_out%recv(n)%ie (count) = update_in%recv(n)%ie (count) + ishift
!!$ update_out%recv(n)%js (count) = update_in%recv(n)%js (count)
!!$ update_out%recv(n)%je (count) = update_in%recv(n)%je (count) + jshift
!!$ update_out%recv(n)%tileMe (count) = update_in%recv(n)%tileMe (count)
!!$ update_out%recv(n)%dir (count) = update_in%recv(n)%dir (count)
!!$ update_out%recv(n)%rotation(count) = update_in%recv(n)%rotation(count)
!!$ enddo
!!$ enddo
!!$ endif
!!$
!!$end subroutine set_overlap_fine_to_coarse
!###############################################################################
subroutine init_index_type (indexData )
type(index_type), intent(inout) :: indexData
indexData%is_me = 0
indexData%ie_me = -1
indexData%js_me = 0
indexData%je_me = -1
indexData%is_you = 0
indexData%ie_you = -1
indexData%js_you = 0
indexData%je_you = -1
end subroutine init_index_type
subroutine allocate_nest_overlap(overlap, count)
type(overlap_type), intent(inout) :: overlap
integer, intent(in ) :: count
overlap%count = 0
overlap%pe = NULL_PE
if( ASSOCIATED(overlap%is) ) call mpp_error(FATAL, &
"mpp_define_nest_domains.inc: overlap is already been allocated")
allocate(overlap%is (count) )
allocate(overlap%ie (count) )
allocate(overlap%js (count) )
allocate(overlap%je (count) )
allocate(overlap%dir (count) )
allocate(overlap%rotation (count) )
allocate(overlap%msgsize (count) )
end subroutine allocate_nest_overlap
!##############################################################################
subroutine deallocate_nest_overlap(overlap)
type(overlap_type), intent(inout) :: overlap
overlap%count = 0
overlap%pe = NULL_PE
deallocate(overlap%is)
deallocate(overlap%ie)
deallocate(overlap%js)
deallocate(overlap%je)
deallocate(overlap%dir)
deallocate(overlap%rotation)
deallocate(overlap%msgsize)
end subroutine deallocate_nest_overlap
!##############################################################################
subroutine insert_nest_overlap(overlap, pe, is, ie, js, je, dir, rotation)
type(overlap_type), intent(inout) :: overlap
integer, intent(in ) :: pe
integer, intent(in ) :: is, ie, js, je
integer, intent(in ) :: dir, rotation
integer :: count
if( overlap%count == 0 ) then
overlap%pe = pe
else
if(overlap%pe .NE. pe) call mpp_error(FATAL, &
"mpp_define_nest_domains.inc: mismatch on pe")
endif
overlap%count = overlap%count+1
count = overlap%count
if(count > size(overlap%is(:))) call mpp_error(FATAL, &
"mpp_define_nest_domains.inc: overlap%count > size(overlap%is), contact developer")
overlap%is (count) = is
overlap%ie (count) = ie
overlap%js (count) = js
overlap%je (count) = je
overlap%dir (count) = dir
overlap%rotation (count) = rotation
overlap%msgsize (count) = (ie-is+1)*(je-js+1)
end subroutine insert_nest_overlap
!#########################################################
subroutine copy_nest_overlap(overlap_out, overlap_in)
type(overlap_type), intent(inout) :: overlap_out
type(overlap_type), intent(in) :: overlap_in
if(overlap_in%count == 0) call mpp_error(FATAL, &
"mpp_define_nest_domains.inc: overlap_in%count is 0")
if(associated(overlap_out%is)) call mpp_error(FATAL, &
"mpp_define_nest_domains.inc: overlap_out is already been allocated")
call allocate_nest_overlap(overlap_out, overlap_in%count)
overlap_out%count = overlap_in%count
overlap_out%pe = overlap_in%pe
overlap_out%is(:) = overlap_in%is(1:overlap_in%count)
overlap_out%ie(:) = overlap_in%ie(1:overlap_in%count)
overlap_out%js(:) = overlap_in%js(1:overlap_in%count)
overlap_out%je(:) = overlap_in%je(1:overlap_in%count)
overlap_out%is(:) = overlap_in%is(1:overlap_in%count)
overlap_out%dir(:) = overlap_in%dir(1:overlap_in%count)
overlap_out%rotation(:) = overlap_in%rotation(1:overlap_in%count)
overlap_out%msgsize(:) = overlap_in%msgsize(1:overlap_in%count)
end subroutine copy_nest_overlap
!#######################################################################
! this routine found the domain has the same halo size with the input
! whalo, ehalo,
function search_C2F_nest_overlap(nest_domain, extra_halo, position)
type(nest_domain_type), intent(inout) :: nest_domain
integer, intent(in) :: extra_halo
integer, intent(in) :: position
type(nestSpec), pointer :: search_C2F_nest_overlap
type(nestSpec), pointer :: update_ref
character(len=128) :: name
select case(position)
case (CENTER)
name = trim(nest_domain%name)//" T-cell"
update_ref => nest_domain%C2F_T
case (CORNER)
update_ref => nest_domain%C2F_C
case (NORTH)
update_ref => nest_domain%C2F_N
case (EAST)
update_ref => nest_domain%C2F_E
case default
call mpp_error(FATAL,"mpp_define_nest_domains.inc(search_C2F_nest_overlap): position should be CENTER|CORNER|EAST|NORTH")
end select
search_C2F_nest_overlap => update_ref
do
if(extra_halo == search_C2F_nest_overlap%extra_halo) then
exit ! found domain
endif
!--- if not found, switch to next
if(.NOT. ASSOCIATED(search_C2F_nest_overlap%next)) then
allocate(search_C2F_nest_overlap%next)
search_C2F_nest_overlap => search_C2F_nest_overlap%next
call compute_overlap_coarse_to_fine(nest_domain, search_C2F_nest_overlap, extra_halo, position, name)
exit
else
search_C2F_nest_overlap => search_C2F_nest_overlap%next
end if
end do
update_ref => NULL()
end function search_C2F_nest_overlap
!#######################################################################
! this routine found the domain has the same halo size with the input
! whalo, ehalo,
function search_F2C_nest_overlap(nest_domain, position)
type(nest_domain_type), intent(inout) :: nest_domain
integer, intent(in) :: position
type(nestSpec), pointer :: search_F2C_nest_overlap
select case(position)
case (CENTER)
search_F2C_nest_overlap => nest_domain%F2C_T
case (CORNER)
search_F2C_nest_overlap => nest_domain%F2C_C
case (NORTH)
search_F2C_nest_overlap => nest_domain%F2C_N
case (EAST)
search_F2C_nest_overlap => nest_domain%F2C_E
case default
call mpp_error(FATAL,"mpp_define_nest_domains.inc(search_F2C_nest_overlap): position should be CENTER|CORNER|EAST|NORTH")
end select
end function search_F2C_nest_overlap
!################################################################
subroutine mpp_get_C2F_index(nest_domain, is_fine, ie_fine, js_fine, je_fine, &
is_coarse, ie_coarse, js_coarse, je_coarse, dir, position)
type(nest_domain_type), intent(in ) :: nest_domain
integer, intent(out) :: is_fine, ie_fine, js_fine, je_fine
integer, intent(out) :: is_coarse, ie_coarse, js_coarse, je_coarse
integer, intent(in ) :: dir
integer, optional, intent(in ) :: position
integer :: update_position
type(nestSpec), pointer :: update => NULL()
update_position = CENTER
if(present(position)) update_position = position
select case(update_position)
case (CENTER)
update => nest_domain%C2F_T
case (EAST)
update => nest_domain%C2F_E
case (CORNER)
update => nest_domain%C2F_C
case (NORTH)
update => nest_domain%C2F_N
case default
call mpp_error(FATAL, "mpp_define_nest_domains.inc(mpp_get_C2F_index): invalid option argument position")
end select
select case(dir)
case(WEST)
is_fine = update%west%is_me
ie_fine = update%west%ie_me
js_fine = update%west%js_me
je_fine = update%west%je_me
is_coarse = update%west%is_you
ie_coarse = update%west%ie_you
js_coarse = update%west%js_you
je_coarse = update%west%je_you
case(EAST)
is_fine = update%east%is_me
ie_fine = update%east%ie_me
js_fine = update%east%js_me
je_fine = update%east%je_me
is_coarse = update%east%is_you
ie_coarse = update%east%ie_you
js_coarse = update%east%js_you
je_coarse = update%east%je_you
case(SOUTH)
is_fine = update%south%is_me
ie_fine = update%south%ie_me
js_fine = update%south%js_me
je_fine = update%south%je_me
is_coarse = update%south%is_you
ie_coarse = update%south%ie_you
js_coarse = update%south%js_you
je_coarse = update%south%je_you
case(NORTH)
is_fine = update%north%is_me
ie_fine = update%north%ie_me
js_fine = update%north%js_me
je_fine = update%north%je_me
is_coarse = update%north%is_you
ie_coarse = update%north%ie_you
js_coarse = update%north%js_you
je_coarse = update%north%je_you
case default
call mpp_error(FATAL, "mpp_define_nest_domains.inc: invalid value for argument dir")
end select
end subroutine mpp_get_C2F_index
!################################################################
subroutine mpp_get_F2C_index(nest_domain, is_coarse, ie_coarse, js_coarse, je_coarse, &
is_fine, ie_fine, js_fine, je_fine, position)
type(nest_domain_type), intent(in ) :: nest_domain
integer, intent(out) :: is_fine, ie_fine, js_fine, je_fine
integer, intent(out) :: is_coarse, ie_coarse, js_coarse, je_coarse
integer, optional, intent(in ) :: position
integer :: update_position
type(nestSpec), pointer :: update => NULL()
update_position = CENTER
if(present(position)) update_position = position
select case(update_position)
case (CENTER)
update => nest_domain%F2C_T
case (EAST)
update => nest_domain%F2C_E
case (CORNER)
update => nest_domain%F2C_C
case (NORTH)
update => nest_domain%F2C_N
case default
call mpp_error(FATAL, "mpp_define_nest_domains.inc(mpp_get_F2C_index): invalid option argument position")
end select
is_fine = update%center%is_you
ie_fine = update%center%ie_you
js_fine = update%center%js_you
je_fine = update%center%je_you
is_coarse = update%center%is_me
ie_coarse = update%center%ie_me
js_coarse = update%center%js_me
je_coarse = update%center%je_me
end subroutine mpp_get_F2C_index
# 2784 "../mpp/mpp_domains.F90" 2
# 1 "../mpp/include/mpp_domains_util.inc" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
!
!
! Set user stack size.
!
!
! This sets the size of an array that is used for internal storage by
! mpp_domains. This array is used, for instance, to buffer the
! data sent and received in halo updates.
!
! This call has implied global synchronization. It should be
! placed somewhere where all PEs can call it.
!
!
! call mpp_domains_set_stack_size(n)
!
!
!
subroutine mpp_domains_set_stack_size(n)
!set the mpp_domains_stack variable to be at least n LONG words long
integer, intent(in) :: n
character(len=8) :: text
if( n.LE.mpp_domains_stack_size )return
# 48
if( allocated(mpp_domains_stack) )deallocate(mpp_domains_stack)
allocate( mpp_domains_stack(n) )
if( allocated(mpp_domains_stack_nonblock) )deallocate(mpp_domains_stack_nonblock)
allocate( mpp_domains_stack_nonblock(n) )
mpp_domains_stack_size = n
write( text,'(i8)' )n
if( mpp_pe().EQ.mpp_root_pe() )call mpp_error( NOTE, 'MPP_DOMAINS_SET_STACK_SIZE: stack size set to '//text//'.' )
return
end subroutine mpp_domains_set_stack_size
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_DOMAINS: overloaded operators (==, /=) !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
function mpp_domain1D_eq( a, b )
logical :: mpp_domain1D_eq
type(domain1D), intent(in) :: a, b
mpp_domain1D_eq = ( a%compute%begin.EQ.b%compute%begin .AND. &
a%compute%end .EQ.b%compute%end .AND. &
a%data%begin .EQ.b%data%begin .AND. &
a%data%end .EQ.b%data%end .AND. &
a%global%begin .EQ.b%global%begin .AND. &
a%global%end .EQ.b%global%end )
!compare pelists
! if( mpp_domain1D_eq )mpp_domain1D_eq = ASSOCIATED(a%list) .AND. ASSOCIATED(b%list)
! if( mpp_domain1D_eq )mpp_domain1D_eq = size(a%list(:)).EQ.size(b%list(:))
! if( mpp_domain1D_eq )mpp_domain1D_eq = ALL(a%list%pe.EQ.b%list%pe)
return
end function mpp_domain1D_eq
function mpp_domain1D_ne( a, b )
logical :: mpp_domain1D_ne
type(domain1D), intent(in) :: a, b
mpp_domain1D_ne = .NOT. ( a.EQ.b )
return
end function mpp_domain1D_ne
function mpp_domain2D_eq( a, b )
logical :: mpp_domain2D_eq
type(domain2D), intent(in) :: a, b
integer :: nt, n
mpp_domain2d_eq = size(a%x(:)) .EQ. size(b%x(:))
nt = size(a%x(:))
do n = 1, nt
if(mpp_domain2d_eq) mpp_domain2D_eq = a%x(n).EQ.b%x(n) .AND. a%y(n).EQ.b%y(n)
end do
if( mpp_domain2D_eq .AND. ((a%pe.EQ.NULL_PE).OR.(b%pe.EQ.NULL_PE)) )return !NULL_DOMAIN2D
!compare pelists
if( mpp_domain2D_eq )mpp_domain2D_eq = ASSOCIATED(a%list) .AND. ASSOCIATED(b%list)
if( mpp_domain2D_eq )mpp_domain2D_eq = size(a%list(:)).EQ.size(b%list(:))
if( mpp_domain2D_eq )mpp_domain2D_eq = ALL(a%list%pe.EQ.b%list%pe)
if( mpp_domain2D_eq )mpp_domain2D_eq = ALL(a%io_layout .EQ. b%io_layout)
if( mpp_domain2D_eq )mpp_domain2D_eq = a%symmetry .eqv. b%symmetry
return
end function mpp_domain2D_eq
!#####################################################################
function mpp_domain2D_ne( a, b )
logical :: mpp_domain2D_ne
type(domain2D), intent(in) :: a, b
mpp_domain2D_ne = .NOT. ( a.EQ.b )
return
end function mpp_domain2D_ne
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_GET and SET routiness: retrieve various components of domains !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
subroutine mpp_get_compute_domain1D( domain, begin, end, size, max_size, is_global )
type(domain1D), intent(in) :: domain
integer, intent(out), optional :: begin, end, size, max_size
logical, intent(out), optional :: is_global
if( PRESENT(begin) )begin = domain%compute%begin
if( PRESENT(end) )end = domain%compute%end
if( PRESENT(size) )size = domain%compute%size
if( PRESENT(max_size) )max_size = domain%compute%max_size
if( PRESENT(is_global) )is_global = domain%compute%is_global
return
end subroutine mpp_get_compute_domain1D
!#####################################################################
subroutine mpp_get_data_domain1D( domain, begin, end, size, max_size, is_global )
type(domain1D), intent(in) :: domain
integer, intent(out), optional :: begin, end, size, max_size
logical, intent(out), optional :: is_global
if( PRESENT(begin) )begin = domain%data%begin
if( PRESENT(end) )end = domain%data%end
if( PRESENT(size) )size = domain%data%size
if( PRESENT(max_size) )max_size = domain%data%max_size
if( PRESENT(is_global) )is_global = domain%data%is_global
return
end subroutine mpp_get_data_domain1D
!#####################################################################
subroutine mpp_get_global_domain1D( domain, begin, end, size, max_size )
type(domain1D), intent(in) :: domain
integer, intent(out), optional :: begin, end, size, max_size
if( PRESENT(begin) )begin = domain%global%begin
if( PRESENT(end) )end = domain%global%end
if( PRESENT(size) )size = domain%global%size
if( PRESENT(max_size) )max_size = domain%global%max_size
return
end subroutine mpp_get_global_domain1D
!#####################################################################
subroutine mpp_get_memory_domain1D( domain, begin, end, size, max_size, is_global )
type(domain1D), intent(in) :: domain
integer, intent(out), optional :: begin, end, size, max_size
logical, intent(out), optional :: is_global
if( PRESENT(begin) )begin = domain%memory%begin
if( PRESENT(end) )end = domain%memory%end
if( PRESENT(size) )size = domain%memory%size
if( PRESENT(max_size) )max_size = domain%memory%max_size
if( PRESENT(is_global) )is_global = domain%memory%is_global
return
end subroutine mpp_get_memory_domain1D
!#####################################################################
subroutine mpp_get_compute_domain2D( domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, &
x_is_global, y_is_global, tile_count, position )
type(domain2D), intent(in) :: domain
integer, intent(out), optional :: xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size
logical, intent(out), optional :: x_is_global, y_is_global
integer, intent(in), optional :: tile_count, position
integer :: tile, ishift, jshift
tile = 1
if(present(tile_count)) tile = tile_count
call mpp_get_compute_domain( domain%x(tile), xbegin, xend, xsize, xmax_size, x_is_global )
call mpp_get_compute_domain( domain%y(tile), ybegin, yend, ysize, ymax_size, y_is_global )
call mpp_get_domain_shift( domain, ishift, jshift, position )
if( PRESENT(xend) ) xend = xend + ishift
if( PRESENT(yend) ) yend = yend + jshift
if( PRESENT(xsize)) xsize = xsize + ishift
if( PRESENT(ysize)) ysize = ysize + jshift
if(PRESENT(xmax_size))xmax_size = xmax_size + ishift
if(PRESENT(ymax_size))ymax_size = ymax_size + jshift
return
end subroutine mpp_get_compute_domain2D
!#####################################################################
subroutine mpp_get_data_domain2D( domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, &
x_is_global, y_is_global, tile_count, position )
type(domain2D), intent(in) :: domain
integer, intent(out), optional :: xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size
logical, intent(out), optional :: x_is_global, y_is_global
integer, intent(in), optional :: tile_count, position
integer :: tile, ishift, jshift
tile = 1
if(present(tile_count)) tile = tile_count
call mpp_get_data_domain( domain%x(tile), xbegin, xend, xsize, xmax_size, x_is_global )
call mpp_get_data_domain( domain%y(tile), ybegin, yend, ysize, ymax_size, y_is_global )
call mpp_get_domain_shift( domain, ishift, jshift, position )
if( PRESENT(xend) ) xend = xend + ishift
if( PRESENT(yend) ) yend = yend + jshift
if( PRESENT(xsize)) xsize = xsize + ishift
if( PRESENT(ysize)) ysize = ysize + jshift
if(PRESENT(xmax_size))xmax_size = xmax_size + ishift
if(PRESENT(ymax_size))ymax_size = ymax_size + jshift
return
end subroutine mpp_get_data_domain2D
!#####################################################################
subroutine mpp_get_global_domain2D( domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, &
tile_count, position )
type(domain2D), intent(in) :: domain
integer, intent(out), optional :: xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size
integer, intent(in), optional :: tile_count, position
integer :: tile, ishift, jshift
tile = 1
if(present(tile_count)) tile = tile_count
call mpp_get_global_domain( domain%x(tile), xbegin, xend, xsize, xmax_size )
call mpp_get_global_domain( domain%y(tile), ybegin, yend, ysize, ymax_size )
call mpp_get_domain_shift( domain, ishift, jshift, position )
if( PRESENT(xend) ) xend = xend + ishift
if( PRESENT(yend) ) yend = yend + jshift
if( PRESENT(xsize)) xsize = xsize + ishift
if( PRESENT(ysize)) ysize = ysize + jshift
if(PRESENT(xmax_size))xmax_size = xmax_size + ishift
if(PRESENT(ymax_size))ymax_size = ymax_size + jshift
return
end subroutine mpp_get_global_domain2D
!#####################################################################
subroutine mpp_get_memory_domain2D( domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, &
x_is_global, y_is_global, position)
type(domain2D), intent(in) :: domain
integer, intent(out), optional :: xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size
logical, intent(out), optional :: x_is_global, y_is_global
integer, intent(in), optional :: position
integer :: tile, ishift, jshift
tile = 1
call mpp_get_memory_domain( domain%x(tile), xbegin, xend, xsize, xmax_size, x_is_global )
call mpp_get_memory_domain( domain%y(tile), ybegin, yend, ysize, ymax_size, y_is_global )
call mpp_get_domain_shift( domain, ishift, jshift, position )
if( PRESENT(xend) ) xend = xend + ishift
if( PRESENT(yend) ) yend = yend + jshift
if( PRESENT(xsize)) xsize = xsize + ishift
if( PRESENT(ysize)) ysize = ysize + jshift
if(PRESENT(xmax_size))xmax_size = xmax_size + ishift
if(PRESENT(ymax_size))ymax_size = ymax_size + jshift
return
end subroutine mpp_get_memory_domain2D
!#####################################################################
subroutine mpp_set_compute_domain1D( domain, begin, end, size, is_global )
type(domain1D), intent(inout) :: domain
integer, intent(in), optional :: begin, end, size
logical, intent(in), optional :: is_global
if(present(begin)) domain%compute%begin = begin
if(present(end)) domain%compute%end = end
if(present(size)) domain%compute%size = size
if(present(is_global)) domain%compute%is_global = is_global
end subroutine mpp_set_compute_domain1D
!#####################################################################
subroutine mpp_set_compute_domain2D( domain, xbegin, xend, ybegin, yend, xsize, ysize, &
x_is_global, y_is_global, tile_count )
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: xbegin, xend, ybegin, yend, xsize, ysize
logical, intent(in), optional :: x_is_global, y_is_global
integer, intent(in), optional :: tile_count
integer :: tile
tile = 1
if(present(tile_count)) tile = tile_count
call mpp_set_compute_domain(domain%x(tile), xbegin, xend, xsize, x_is_global)
call mpp_set_compute_domain(domain%y(tile), ybegin, yend, ysize, y_is_global)
end subroutine mpp_set_compute_domain2D
!#####################################################################
subroutine mpp_set_data_domain1D( domain, begin, end, size, is_global )
type(domain1D), intent(inout) :: domain
integer, intent(in), optional :: begin, end, size
logical, intent(in), optional :: is_global
if(present(begin)) domain%data%begin = begin
if(present(end)) domain%data%end = end
if(present(size)) domain%data%size = size
if(present(is_global)) domain%data%is_global = is_global
end subroutine mpp_set_data_domain1D
!#####################################################################
subroutine mpp_set_data_domain2D( domain, xbegin, xend, ybegin, yend, xsize, ysize, &
x_is_global, y_is_global, tile_count )
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: xbegin, xend, ybegin, yend, xsize, ysize
logical, intent(in), optional :: x_is_global, y_is_global
integer, intent(in), optional :: tile_count
integer :: tile
tile = 1
if(present(tile_count)) tile = tile_count
call mpp_set_data_domain(domain%x(tile), xbegin, xend, xsize, x_is_global)
call mpp_set_data_domain(domain%y(tile), ybegin, yend, ysize, y_is_global)
end subroutine mpp_set_data_domain2D
!#####################################################################
subroutine mpp_set_global_domain1D( domain, begin, end, size)
type(domain1D), intent(inout) :: domain
integer, intent(in), optional :: begin, end, size
if(present(begin)) domain%global%begin = begin
if(present(end)) domain%global%end = end
if(present(size)) domain%global%size = size
end subroutine mpp_set_global_domain1D
!#####################################################################
subroutine mpp_set_global_domain2D( domain, xbegin, xend, ybegin, yend, xsize, ysize, tile_count )
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: xbegin, xend, ybegin, yend, xsize, ysize
integer, intent(in), optional :: tile_count
integer :: tile
tile = 1
if(present(tile_count)) tile = tile_count
call mpp_set_global_domain(domain%x(tile), xbegin, xend, xsize)
call mpp_set_global_domain(domain%y(tile), ybegin, yend, ysize)
end subroutine mpp_set_global_domain2D
!#####################################################################
!
!
! Retrieve 1D components of 2D decomposition.
!
!
! It is sometime necessary to have direct recourse to the domain1D types
! that compose a domain2D object. This call retrieves them.
!
!
! call mpp_get_domain_components( domain, x, y )
!
!
!
!
subroutine mpp_get_domain_components( domain, x, y, tile_count )
type(domain2D), intent(in) :: domain
type(domain1D), intent(inout), optional :: x, y
integer, intent(in), optional :: tile_count
integer :: tile
tile = 1
if(present(tile_count)) tile = tile_count
if( PRESENT(x) )x = domain%x(tile)
if( PRESENT(y) )y = domain%y(tile)
return
end subroutine mpp_get_domain_components
!#####################################################################
subroutine mpp_get_compute_domains1D( domain, begin, end, size )
type(domain1D), intent(in) :: domain
integer, intent(out), optional, dimension(:) :: begin, end, size
if( .NOT.module_is_initialized ) &
call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: must first call mpp_domains_init.' )
!we use shape instead of size for error checks because size is used as an argument
if( PRESENT(begin) )then
if( any(shape(begin).NE.shape(domain%list)) ) &
call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: begin array size does not match domain.' )
begin(:) = domain%list(:)%compute%begin
end if
if( PRESENT(end) )then
if( any(shape(end).NE.shape(domain%list)) ) &
call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: end array size does not match domain.' )
end(:) = domain%list(:)%compute%end
end if
if( PRESENT(size) )then
if( any(shape(size).NE.shape(domain%list)) ) &
call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: size array size does not match domain.' )
size(:) = domain%list(:)%compute%size
end if
return
end subroutine mpp_get_compute_domains1D
!#####################################################################
subroutine mpp_get_compute_domains2D( domain, xbegin, xend, xsize, ybegin, yend, ysize, position )
type(domain2D), intent(in) :: domain
integer, intent(out), optional, dimension(:) :: xbegin, xend, xsize, ybegin, yend, ysize
integer, intent(in ), optional :: position
integer :: i, ishift, jshift
call mpp_get_domain_shift( domain, ishift, jshift, position )
if( .NOT.module_is_initialized ) &
call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: must first call mpp_domains_init.' )
if( PRESENT(xbegin) )then
if( size(xbegin(:)).NE.size(domain%list(:)) ) &
call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: xbegin array size does not match domain.' )
do i = 1, size(xbegin(:))
xbegin(i) = domain%list(i-1)%x(1)%compute%begin
end do
end if
if( PRESENT(xend) )then
if( size(xend(:)).NE.size(domain%list(:)) ) &
call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: xend array size does not match domain.' )
do i = 1, size(xend(:))
xend(i) = domain%list(i-1)%x(1)%compute%end + ishift
end do
end if
if( PRESENT(xsize) )then
if( size(xsize(:)).NE.size(domain%list(:)) ) &
call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: xsize array size does not match domain.' )
do i = 1, size(xsize(:))
xsize(i) = domain%list(i-1)%x(1)%compute%size + ishift
end do
end if
if( PRESENT(ybegin) )then
if( size(ybegin(:)).NE.size(domain%list(:)) ) &
call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: ybegin array size does not match domain.' )
do i = 1, size(ybegin(:))
ybegin(i) = domain%list(i-1)%y(1)%compute%begin
end do
end if
if( PRESENT(yend) )then
if( size(yend(:)).NE.size(domain%list(:)) ) &
call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: yend array size does not match domain.' )
do i = 1, size(yend(:))
yend(i) = domain%list(i-1)%y(1)%compute%end + jshift
end do
end if
if( PRESENT(ysize) )then
if( size(ysize(:)).NE.size(domain%list(:)) ) &
call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: ysize array size does not match domain.' )
do i = 1, size(ysize(:))
ysize(i) = domain%list(i-1)%y(1)%compute%size + jshift
end do
end if
return
end subroutine mpp_get_compute_domains2D
!#####################################################################
subroutine mpp_get_domain_extents1D(domain, xextent, yextent)
type(domain2d), intent(in) :: domain
integer, dimension(0:), intent(inout) :: xextent, yextent
integer :: n
if(domain%ntiles .NE. 1) call mpp_error(FATAL,"mpp_domains_util.inc(mpp_get_domain_extents1D): "// &
"ntiles is more than 1, please use mpp_get_domain_extents2D")
if(size(xextent) .NE. size(domain%x(1)%list(:))) call mpp_error(FATAL,"mpp_domains_util.inc(mpp_get_domain_extents1D): "// &
"size(xextent) does not equal to size(domain%x(1)%list(:)))")
if(size(yextent) .NE. size(domain%y(1)%list(:))) call mpp_error(FATAL,"mpp_domains_util.inc(mpp_get_domain_extents1D): "// &
"size(yextent) does not equal to size(domain%y(1)%list(:)))")
do n = 0, size(domain%x(1)%list(:))-1
xextent(n) = domain%x(1)%list(n)%compute%size
enddo
do n = 0, size(domain%y(1)%list(:))-1
yextent(n) = domain%y(1)%list(n)%compute%size
enddo
end subroutine mpp_get_domain_extents1D
!#####################################################################
! This will return xextent and yextent for each tile
subroutine mpp_get_domain_extents2D(domain, xextent, yextent)
type(domain2d), intent(in) :: domain
integer, dimension(:,:), intent(inout) :: xextent, yextent
integer :: ntile, nlist, n, m, ndivx, ndivy, tile, pos
ntile = domain%ntiles
nlist = size(domain%list(:))
if(size(xextent,2) .ne. ntile .or. size(yextent,2) .ne. ntile) call mpp_error(FATAL, &
"mpp_domains_utile.inc: the second dimension size of xextent/yextent is not correct")
ndivx = size(xextent,1); ndivy = size(yextent,1)
do n = 0, nlist-1
if(ANY(domain%list(n)%x(:)%pos>ndivx-1) ) call mpp_error(FATAL, &
"mpp_domains_utile.inc: first dimension size of xextent is less than the x-layout in some tile")
if(ANY(domain%list(n)%y(:)%pos>ndivy-1) ) call mpp_error(FATAL, &
"mpp_domains_utile.inc: first dimension size of yextent is less than the y-layout in some tile")
end do
xextent = 0; yextent=0
do n = 0, nlist-1
do m = 1, size(domain%list(n)%tile_id(:))
tile = domain%list(n)%tile_id(m)
pos = domain%list(n)%x(m)%pos+1
if(xextent(pos, tile) == 0) xextent(pos,tile) = domain%list(n)%x(m)%compute%size
pos = domain%list(n)%y(m)%pos+1
if(yextent(pos, tile) == 0) yextent(pos,tile) = domain%list(n)%y(m)%compute%size
end do
end do
end subroutine mpp_get_domain_extents2D
!#####################################################################
function mpp_get_domain_pe(domain)
type(domain2d), intent(in) :: domain
integer :: mpp_get_domain_pe
mpp_get_domain_pe = domain%pe
end function mpp_get_domain_pe
function mpp_get_domain_tile_root_pe(domain)
type(domain2d), intent(in) :: domain
integer :: mpp_get_domain_tile_root_pe
mpp_get_domain_tile_root_pe = domain%tile_root_pe
end function mpp_get_domain_tile_root_pe
function mpp_get_io_domain(domain)
type(domain2d), intent(in) :: domain
type(domain2d), pointer :: mpp_get_io_domain
if(ASSOCIATED(domain%io_domain)) then
mpp_get_io_domain => domain%io_domain
else
mpp_get_io_domain => NULL()
endif
end function mpp_get_io_domain
!#####################################################################
!
!
!
!
!
subroutine mpp_get_pelist1D( domain, pelist, pos )
type(domain1D), intent(in) :: domain
integer, intent(out) :: pelist(:)
integer, intent(out), optional :: pos
integer :: ndivs
if( .NOT.module_is_initialized ) &
call mpp_error( FATAL, 'MPP_GET_PELIST: must first call mpp_domains_init.' )
ndivs = size(domain%list(:))
if( size(pelist(:)).NE.ndivs ) &
call mpp_error( FATAL, 'MPP_GET_PELIST: pelist array size does not match domain.' )
pelist(:) = domain%list(0:ndivs-1)%pe
if( PRESENT(pos) )pos = domain%pos
return
end subroutine mpp_get_pelist1D
!#####################################################################
!
!
!
!
!
subroutine mpp_get_pelist2D( domain, pelist, pos )
type(domain2D), intent(in) :: domain
integer, intent(out) :: pelist(:)
integer, intent(out), optional :: pos
if( .NOT.module_is_initialized ) &
call mpp_error( FATAL, 'MPP_GET_PELIST: must first call mpp_domains_init.' )
if( size(pelist(:)).NE.size(domain%list(:)) ) &
call mpp_error( FATAL, 'MPP_GET_PELIST: pelist array size does not match domain.' )
pelist(:) = domain%list(:)%pe
if( PRESENT(pos) )pos = domain%pos
return
end subroutine mpp_get_pelist2D
!#####################################################################
!
!
!
!
subroutine mpp_get_layout1D( domain, layout )
type(domain1D), intent(in) :: domain
integer, intent(out) :: layout
if( .NOT.module_is_initialized ) &
call mpp_error( FATAL, 'MPP_GET_LAYOUT: must first call mpp_domains_init.' )
layout = size(domain%list(:))
return
end subroutine mpp_get_layout1D
!#####################################################################
!
!
!
!
subroutine mpp_get_layout2D( domain, layout )
type(domain2D), intent(in) :: domain
integer, intent(out) :: layout(2)
if( .NOT.module_is_initialized ) &
call mpp_error( FATAL, 'MPP_GET_LAYOUT: must first call mpp_domains_init.' )
layout(1) = size(domain%x(1)%list(:))
layout(2) = size(domain%y(1)%list(:))
return
end subroutine mpp_get_layout2D
!#####################################################################
!
!
! Returns the shift value in x and y-direction according to domain position..
!
!
! When domain is symmetry, one extra point maybe needed in
! x- and/or y-direction. This routine will return the shift value based
! on the position
!
!
! call mpp_get_domain_shift( domain, ishift, jshift, position )
!
!
! predefined data contains 2-d domain decomposition.
!
!
! return value will be 0 or 1.
!
!
! position of data. Its value can be CENTER, EAST, NORTH or CORNER.
!
!
subroutine mpp_get_domain_shift(domain, ishift, jshift, position)
type(domain2D), intent(in) :: domain
integer, intent(out) :: ishift, jshift
integer, optional, intent(in) :: position
integer :: pos
ishift = 0 ; jshift = 0
pos = CENTER
if(present(position)) pos = position
if(domain%symmetry) then ! shift is non-zero only when the domain is symmetry.
select case(pos)
case(CORNER)
ishift = 1; jshift = 1
case(EAST)
ishift = 1
case(NORTH)
jshift = 1
end select
end if
end subroutine mpp_get_domain_shift
!#####################################################################
subroutine mpp_get_neighbor_pe_1d(domain, direction, pe)
! Return PE to the righ/left of this PE-domain.
type(domain1D), intent(inout) :: domain
integer, intent(in) :: direction
integer, intent(out) :: pe
integer ipos, ipos2, npx
pe = NULL_PE
npx = size(domain%list(:)) ! 0..npx-1
ipos = domain%pos
select case (direction)
case (:-1)
! neighbor on the left
ipos2 = ipos - 1
if(ipos2 < 0) then
if(domain%cyclic) then
ipos2 = npx-1
else
ipos2 = -999
endif
endif
case (0)
! identity
ipos2 = ipos
case (1:)
! neighbor on the right
ipos2 = ipos + 1
if(ipos2 > npx-1) then
if(domain%cyclic) then
ipos2 = 0
else
ipos2 = -999
endif
endif
end select
if(ipos2 >= 0) pe = domain%list(ipos2)%pe
end subroutine mpp_get_neighbor_pe_1d
!#####################################################################
subroutine mpp_get_neighbor_pe_2d(domain, direction, pe)
! Return PE North/South/East/West of this PE-domain.
! direction must be NORTH, SOUTH, EAST or WEST.
type(domain2D), intent(inout) :: domain
integer, intent(in) :: direction
integer, intent(out) :: pe
integer ipos, jpos, npx, npy, ix, iy, ipos0, jpos0
pe = NULL_PE
npx = size(domain%x(1)%list(:)) ! 0..npx-1
npy = size(domain%y(1)%list(:)) ! 0..npy-1
ipos0 = domain%x(1)%pos
jpos0 = domain%y(1)%pos
select case (direction)
case (NORTH)
ix = 0
iy = 1
case (NORTH_EAST)
ix = 1
iy = 1
case (EAST)
ix = 1
iy = 0
case (SOUTH_EAST)
ix = 1
iy =-1
case (SOUTH)
ix = 0
iy =-1
case (SOUTH_WEST)
ix =-1
iy =-1
case (WEST)
ix =-1
iy = 0
case (NORTH_WEST)
ix =-1
iy = 1
case default
call mpp_error( FATAL, &
& 'MPP_GET_NEIGHBOR_PE_2D: direction must be either NORTH, ' &
& // 'SOUTH, EAST, WEST, NORTH_EAST, SOUTH_EAST, SOUTH_WEST or NORTH_WEST')
end select
ipos = ipos0 + ix
jpos = jpos0 + iy
if( (ipos < 0 .or. ipos > npx-1) .and. domain%x(1)%cyclic ) then
! E/W cyclic domain
ipos = modulo(ipos, npx)
endif
if( (ipos < 0 .and. btest(domain%fold,WEST)) .or. &
& (ipos > npx-1 .and. btest(domain%fold,EAST)) ) then
! E or W folded domain
ipos = ipos0
jpos = npy-jpos-1
endif
if( (jpos < 0 .or. jpos > npy-1) .and. domain%y(1)%cyclic ) then
! N/S cyclic
jpos = modulo(jpos, npy)
endif
if( (jpos < 0 .and. btest(domain%fold,SOUTH)) .or. &
& (jpos > npy-1 .and. btest(domain%fold,NORTH)) ) then
! N or S folded
ipos = npx-ipos-1
jpos = jpos0
endif
! get the PE number
pe = NULL_PE
if(ipos >= 0 .and. ipos <= npx-1 .and. jpos >= 0 .and. jpos <= npy-1) then
pe = domain%pearray(ipos, jpos)
endif
end subroutine mpp_get_neighbor_pe_2d
!#######################################################################
subroutine nullify_domain2d_list(domain)
type(domain2d), intent(inout) :: domain
domain%list =>NULL()
end subroutine nullify_domain2d_list
!#######################################################################
function mpp_domain_is_symmetry(domain)
type(domain2d), intent(in) :: domain
logical :: mpp_domain_is_symmetry
mpp_domain_is_symmetry = domain%symmetry
return
end function mpp_domain_is_symmetry
!#######################################################################
function mpp_domain_is_initialized(domain)
type(domain2d), intent(in) :: domain
logical :: mpp_domain_is_initialized
mpp_domain_is_initialized = domain%initialized
return
end function mpp_domain_is_initialized
!#######################################################################
!--- private routine used only for mpp_update_domains. This routine will
!--- compare whalo, ehalo, shalo, nhalo with the halo size when defining "domain"
!--- to decide if update is needed. Also it check the sign of whalo, ehalo, shalo and nhalo.
function domain_update_is_needed(domain, whalo, ehalo, shalo, nhalo)
type(domain2d), intent(in) :: domain
integer, intent(in) :: whalo, ehalo, shalo, nhalo
logical :: domain_update_is_needed
domain_update_is_needed = .true.
if(whalo == 0 .AND. ehalo==0 .AND. shalo == 0 .AND. nhalo==0 ) then
domain_update_is_needed = .false.
if( debug )call mpp_error(NOTE, &
'mpp_domains_util.inc: halo size to be updated are all zero, no update will be done')
return
end if
if( (whalo == -domain%whalo .AND. domain%whalo .NE. 0) .or. &
(ehalo == -domain%ehalo .AND. domain%ehalo .NE. 0) .or. &
(shalo == -domain%shalo .AND. domain%shalo .NE. 0) .or. &
(nhalo == -domain%nhalo .AND. domain%nhalo .NE. 0) ) then
domain_update_is_needed = .false.
call mpp_error(NOTE, 'mpp_domains_util.inc: at least one of w/e/s/n halo size to be updated '// &
'is the inverse of the original halo when defining domain, no update will be done')
return
end if
end function domain_update_is_needed
!#######################################################################
! this routine found the domain has the same halo size with the input
! whalo, ehalo,
function search_update_overlap(domain, whalo, ehalo, shalo, nhalo, position)
type(domain2d), intent(inout) :: domain
integer, intent(in) :: whalo, ehalo, shalo, nhalo
integer, intent(in) :: position
type(overlapSpec), pointer :: search_update_overlap
type(overlapSpec), pointer :: update_ref
type(overlapSpec), pointer :: check => NULL()
integer :: ishift, jshift, shift
shift = 0; if(domain%symmetry) shift = 1
select case(position)
case (CENTER)
update_ref => domain%update_T
ishift = 0; jshift = 0
case (CORNER)
update_ref => domain%update_C
ishift = shift; jshift = shift
case (NORTH)
update_ref => domain%update_N
ishift = 0; jshift = shift
case (EAST)
update_ref => domain%update_E
ishift = shift; jshift = 0
case default
call mpp_error(FATAL,"mpp_domains_util.inc(search_update_overlap): position should be CENTER|CORNER|EAST|NORTH")
end select
search_update_overlap => update_ref
do
if(whalo == search_update_overlap%whalo .AND. ehalo == search_update_overlap%ehalo .AND. &
shalo == search_update_overlap%shalo .AND. nhalo == search_update_overlap%nhalo ) then
exit ! found domain
endif
!--- if not found, switch to next
if(.NOT. ASSOCIATED(search_update_overlap%next)) then
allocate(search_update_overlap%next)
search_update_overlap => search_update_overlap%next
if(domain%fold .NE. 0) then
call compute_overlaps(domain, position, search_update_overlap, check, &
ishift, jshift, 0, 0, whalo, ehalo, shalo, nhalo)
else
call set_overlaps(domain, update_ref, search_update_overlap, whalo, ehalo, shalo, nhalo )
endif
exit
else
search_update_overlap => search_update_overlap%next
end if
end do
update_ref => NULL()
end function search_update_overlap
!#######################################################################
! this routine found the check at certain position
function search_check_overlap(domain, position)
type(domain2d), intent(in) :: domain
integer, intent(in) :: position
type(overlapSpec), pointer :: search_check_overlap
select case(position)
case (CENTER)
search_check_overlap => NULL()
case (CORNER)
search_check_overlap => domain%check_C
case (NORTH)
search_check_overlap => domain%check_N
case (EAST)
search_check_overlap => domain%check_E
case default
call mpp_error(FATAL,"mpp_domains_util.inc(search_check_overlap): position should be CENTER|CORNER|EAST|NORTH")
end select
end function search_check_overlap
!#######################################################################
! this routine found the bound at certain position
function search_bound_overlap(domain, position)
type(domain2d), intent(in) :: domain
integer, intent(in) :: position
type(overlapSpec), pointer :: search_bound_overlap
select case(position)
case (CENTER)
search_bound_overlap => NULL()
case (CORNER)
search_bound_overlap => domain%bound_C
case (NORTH)
search_bound_overlap => domain%bound_N
case (EAST)
search_bound_overlap => domain%bound_E
case default
call mpp_error(FATAL,"mpp_domains_util.inc(search_bound_overlap): position should be CENTER|CORNER|EAST|NORTH")
end select
end function search_bound_overlap
!########################################################################
! return the tile_id on current pe
function mpp_get_tile_id(domain)
type(domain2d), intent(in) :: domain
integer, dimension(size(domain%tile_id(:))) :: mpp_get_tile_id
mpp_get_tile_id = domain%tile_id
return
end function mpp_get_tile_id
!#######################################################################
! return the tile_id on current pelist. one-tile-per-pe is assumed.
subroutine mpp_get_tile_list(domain, tiles)
type(domain2d), intent(in) :: domain
integer, intent(inout) :: tiles(:)
integer :: i
if( size(tiles(:)).NE.size(domain%list(:)) ) &
call mpp_error( FATAL, 'mpp_get_tile_list: tiles array size does not match domain.' )
do i = 1, size(tiles(:))
if(size(domain%list(i-1)%tile_id(:)) > 1) call mpp_error( FATAL, &
'mpp_get_tile_list: only support one-tile-per-pe now, contact developer');
tiles(i) = domain%list(i-1)%tile_id(1)
end do
end subroutine mpp_get_tile_list
!########################################################################
! return number of tiles in mosaic
function mpp_get_ntile_count(domain)
type(domain2d), intent(in) :: domain
integer :: mpp_get_ntile_count
mpp_get_ntile_count = domain%ntiles
return
end function mpp_get_ntile_count
!########################################################################
! return number of tile on current pe
function mpp_get_current_ntile(domain)
type(domain2d), intent(in) :: domain
integer :: mpp_get_current_ntile
mpp_get_current_ntile = size(domain%tile_id(:))
return
end function mpp_get_current_ntile
!#######################################################################
! return if current pe is the root pe of the tile, if number of tiles on current pe
! is greater than 1, will return true, if isc==isg and jsc==jsg also will return true,
! otherwise false will be returned.
function mpp_domain_is_tile_root_pe(domain)
type(domain2d), intent(in) :: domain
logical :: mpp_domain_is_tile_root_pe
mpp_domain_is_tile_root_pe = domain%pe == domain%tile_root_pe;
end function mpp_domain_is_tile_root_pe
!#########################################################################
! return number of processors used on current tile.
function mpp_get_tile_npes(domain)
type(domain2d), intent(in) :: domain
integer :: mpp_get_tile_npes
integer :: i, tile
!--- When there is more than one tile on this pe, we assume each tile will be
!--- limited to this pe.
if(size(domain%tile_id(:)) > 1) then
mpp_get_tile_npes = 1
else
mpp_get_tile_npes = 0
tile = domain%tile_id(1)
do i = 0, size(domain%list(:))-1
if(tile == domain%list(i)%tile_id(1) ) mpp_get_tile_npes = mpp_get_tile_npes + 1
end do
endif
end function mpp_get_tile_npes
!########################################################################
! get the processors list used on current tile.
subroutine mpp_get_tile_pelist(domain, pelist)
type(domain2d), intent(in) :: domain
integer, intent(inout) :: pelist(:)
integer :: npes_on_tile
integer :: i, tile, pos
npes_on_tile = mpp_get_tile_npes(domain)
if(size(pelist(:)) .NE. npes_on_tile) call mpp_error(FATAL, &
"mpp_domains_util.inc(mpp_get_tile_pelist): size(pelist) does not equal npes on current tile")
tile = domain%tile_id(1)
pos = 0
do i = 0, size(domain%list(:))-1
if(tile == domain%list(i)%tile_id(1)) then
pos = pos+1
pelist(pos) = domain%list(i)%pe
endif
enddo
return
end subroutine mpp_get_tile_pelist
!#####################################################################
subroutine mpp_get_tile_compute_domains( domain, xbegin, xend, ybegin, yend, position )
type(domain2D), intent(in) :: domain
integer, intent(out), dimension(:) :: xbegin, xend, ybegin, yend
integer, intent(in ), optional :: position
integer :: i, ishift, jshift
integer :: npes_on_tile, pos, tile
call mpp_get_domain_shift( domain, ishift, jshift, position )
if( .NOT.module_is_initialized ) &
call mpp_error( FATAL, 'mpp_get_compute_domains2D: must first call mpp_domains_init.' )
npes_on_tile = mpp_get_tile_npes(domain)
if(size(xbegin(:)) .NE. npes_on_tile) call mpp_error(FATAL, &
"mpp_domains_util.inc(mpp_get_compute_domains2D): size(xbegin) does not equal npes on current tile")
if(size(xend(:)) .NE. npes_on_tile) call mpp_error(FATAL, &
"mpp_domains_util.inc(mpp_get_compute_domains2D): size(xend) does not equal npes on current tile")
if(size(ybegin(:)) .NE. npes_on_tile) call mpp_error(FATAL, &
"mpp_domains_util.inc(mpp_get_compute_domains2D): size(ybegin) does not equal npes on current tile")
if(size(yend(:)) .NE. npes_on_tile) call mpp_error(FATAL, &
"mpp_domains_util.inc(mpp_get_compute_domains2D): size(yend) does not equal npes on current tile")
tile = domain%tile_id(1)
pos = 0
do i = 0, size(domain%list(:))-1
if(tile == domain%list(i)%tile_id(1)) then
pos = pos+1
xbegin(pos) = domain%list(i)%x(1)%compute%begin
xend (pos) = domain%list(i)%x(1)%compute%end + ishift
ybegin(pos) = domain%list(i)%y(1)%compute%begin
yend (pos) = domain%list(i)%y(1)%compute%end + jshift
endif
enddo
return
end subroutine mpp_get_tile_compute_domains
!#############################################################################
function mpp_get_num_overlap(domain, action, p, position)
type(domain2d), intent(in) :: domain
integer, intent(in) :: action
integer, intent(in) :: p
integer, optional, intent(in) :: position
integer :: mpp_get_num_overlap
type(overlapSpec), pointer :: update => NULL()
integer :: pos
pos = CENTER
if(present(position)) pos = position
select case(pos)
case (CENTER)
update => domain%update_T
case (CORNER)
update => domain%update_C
case (EAST)
update => domain%update_E
case (NORTH)
update => domain%update_N
case default
call mpp_error( FATAL, "mpp_domains_mod(mpp_get_num_overlap): invalid option of position")
end select
if(action == EVENT_SEND) then
if(p< 1 .OR. p > update%nsend) call mpp_error( FATAL, &
"mpp_domains_mod(mpp_get_num_overlap): p should be between 1 and update%nsend")
mpp_get_num_overlap = update%send(p)%count
else if(action == EVENT_RECV) then
if(p< 1 .OR. p > update%nrecv) call mpp_error( FATAL, &
"mpp_domains_mod(mpp_get_num_overlap): p should be between 1 and update%nrecv")
mpp_get_num_overlap = update%recv(p)%count
else
call mpp_error( FATAL, "mpp_domains_mod(mpp_get_num_overlap): invalid option of action")
end if
end function mpp_get_num_overlap
!#############################################################################
subroutine mpp_get_update_size(domain, nsend, nrecv, position)
type(domain2d), intent(in) :: domain
integer, intent(out) :: nsend, nrecv
integer, optional, intent(in) :: position
integer :: pos
pos = CENTER
if(present(position)) pos = position
select case(pos)
case (CENTER)
nsend = domain%update_T%nsend
nrecv = domain%update_T%nrecv
case (CORNER)
nsend = domain%update_C%nsend
nrecv = domain%update_C%nrecv
case (EAST)
nsend = domain%update_E%nsend
nrecv = domain%update_E%nrecv
case (NORTH)
nsend = domain%update_N%nsend
nrecv = domain%update_N%nrecv
case default
call mpp_error( FATAL, "mpp_domains_mod(mpp_get_update_size): invalid option of position")
end select
end subroutine mpp_get_update_size
!#############################################################################
subroutine mpp_get_update_pelist(domain, action, pelist, position)
type(domain2d), intent(in) :: domain
integer, intent(in) :: action
integer, intent(inout) :: pelist(:)
integer, optional, intent(in) :: position
type(overlapSpec), pointer :: update => NULL()
integer :: pos, p
pos = CENTER
if(present(position)) pos = position
select case(pos)
case (CENTER)
update => domain%update_T
case (CORNER)
update => domain%update_C
case (EAST)
update => domain%update_E
case (NORTH)
update => domain%update_N
case default
call mpp_error( FATAL, "mpp_domains_mod(mpp_get_update_pelist): invalid option of position")
end select
if(action == EVENT_SEND) then
if(size(pelist) .NE. update%nsend) call mpp_error( FATAL, &
"mpp_domains_mod(mpp_get_update_pelist): size of pelist does not match update%nsend")
do p = 1, update%nsend
pelist(p) = update%send(p)%pe
enddo
else if(action == EVENT_RECV) then
if(size(pelist) .NE. update%nrecv) call mpp_error( FATAL, &
"mpp_domains_mod(mpp_get_update_pelist): size of pelist does not match update%nrecv")
do p = 1, update%nrecv
pelist(p) = update%recv(p)%pe
enddo
else
call mpp_error( FATAL, "mpp_domains_mod(mpp_get_update_pelist): invalid option of action")
end if
end subroutine mpp_get_update_pelist
!#############################################################################
subroutine mpp_get_overlap(domain, action, p, is, ie, js, je, dir, rot, position)
type(domain2d), intent(in) :: domain
integer, intent(in) :: action
integer, intent(in) :: p
integer, dimension(:), intent(out) :: is, ie, js, je
integer, dimension(:), intent(out) :: dir, rot
integer, optional, intent(in) :: position
type(overlapSpec), pointer :: update => NULL()
type(overlap_type), pointer :: overlap => NULL()
integer :: count, pos
pos = CENTER
if(present(position)) pos = position
select case(pos)
case (CENTER)
update => domain%update_T
case (CORNER)
update => domain%update_C
case (EAST)
update => domain%update_E
case (NORTH)
update => domain%update_N
case default
call mpp_error( FATAL, "mpp_domains_mod(mpp_get_overlap): invalid option of position")
end select
if(action == EVENT_SEND) then
overlap => update%send(p)
else if(action == EVENT_RECV) then
overlap => update%recv(p)
else
call mpp_error( FATAL, "mpp_domains_mod(mpp_get_overlap): invalid option of action")
end if
count = overlap%count
if(size(is(:)) .NE. count .OR. size(ie (:)) .NE. count .OR. size(js (:)) .NE. count .OR. &
size(je(:)) .NE. count .OR. size(dir(:)) .NE. count .OR. size(rot(:)) .NE. count ) &
call mpp_error( FATAL, "mpp_domains_mod(mpp_get_overlap): size mismatch between number of overlap and array size")
is = overlap%is (1:count)
ie = overlap%ie (1:count)
js = overlap%js (1:count)
je = overlap%je (1:count)
dir = overlap%dir (1:count)
rot = overlap%rotation(1:count)
update => NULL()
overlap => NULL()
end subroutine mpp_get_overlap
!##################################################################
function mpp_get_domain_name(domain)
type(domain2d), intent(in) :: domain
character(len=NAME_LENGTH) :: mpp_get_domain_name
mpp_get_domain_name = domain%name
end function mpp_get_domain_name
!#################################################################
function mpp_get_domain_root_pe(domain)
type(domain2d), intent(in) :: domain
integer :: mpp_get_domain_root_pe
mpp_get_domain_root_pe = domain%list(0)%pe
end function mpp_get_domain_root_pe
!#################################################################
function mpp_get_domain_npes(domain)
type(domain2d), intent(in) :: domain
integer :: mpp_get_domain_npes
mpp_get_domain_npes = size(domain%list(:))
return
end function mpp_get_domain_npes
!################################################################
subroutine mpp_get_domain_pelist(domain, pelist)
type(domain2d), intent(in) :: domain
integer, intent(out) :: pelist(:)
integer :: p
if(size(pelist(:)) .NE. size(domain%list(:)) ) then
call mpp_error(FATAL, "mpp_get_domain_pelist: size(pelist(:)) .NE. size(domain%list(:)) ")
endif
do p = 0, size(domain%list(:))-1
pelist(p+1) = domain%list(p)%pe
enddo
return
end subroutine mpp_get_domain_pelist
!#################################################################
function mpp_get_io_domain_layout(domain)
type(domain2d), intent(in) :: domain
integer, dimension(2) :: mpp_get_io_domain_layout
mpp_get_io_domain_layout = domain%io_layout
end function mpp_get_io_domain_layout
!################################################################
function get_rank_send(domain, overlap_x, overlap_y, rank_x, rank_y, ind_x, ind_y)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: overlap_x, overlap_y
integer, intent(out) :: rank_x, rank_y, ind_x, ind_y
integer :: get_rank_send
integer :: nlist, nsend_x, nsend_y
nlist = size(domain%list(:))
nsend_x = overlap_x%nsend
nsend_y = overlap_y%nsend
rank_x = nlist+1
rank_y = nlist+1
if(nsend_x>0) rank_x = overlap_x%send(1)%pe - domain%pe
if(nsend_y>0) rank_y = overlap_y%send(1)%pe - domain%pe
if(rank_x .LT. 0) rank_x = rank_x + nlist
if(rank_y .LT. 0) rank_y = rank_y + nlist
get_rank_send = min(rank_x, rank_y)
ind_x = nsend_x + 1
ind_y = nsend_y + 1
if(get_rank_send < nlist+1) then
if(nsend_x>0) ind_x = 1
if(nsend_y>0) ind_y = 1
endif
end function get_rank_send
!############################################################################
function get_rank_recv(domain, overlap_x, overlap_y, rank_x, rank_y, ind_x, ind_y)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: overlap_x, overlap_y
integer, intent(out) :: rank_x, rank_y, ind_x, ind_y
integer :: get_rank_recv
integer :: nlist, nrecv_x, nrecv_y
nlist = size(domain%list(:))
nrecv_x = overlap_x%nrecv
nrecv_y = overlap_y%nrecv
rank_x = -1
rank_y = -1
if(nrecv_x>0) then
rank_x = overlap_x%recv(1)%pe - domain%pe
if(rank_x .LE. 0) rank_x = rank_x + nlist
endif
if(nrecv_y>0) then
rank_y = overlap_y%recv(1)%pe - domain%pe
if(rank_y .LE. 0) rank_y = rank_y + nlist
endif
get_rank_recv = max(rank_x, rank_y)
ind_x = nrecv_x + 1
ind_y = nrecv_y + 1
if(get_rank_recv < nlist+1) then
if(nrecv_x>0) ind_x = 1
if(nrecv_y>0) ind_y = 1
endif
end function get_rank_recv
function get_vector_recv(domain, update_x, update_y, ind_x, ind_y, start_pos, pelist)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: update_x, update_y
integer, intent(out) :: ind_x(:), ind_y(:)
integer, intent(out) :: start_pos(:)
integer, intent(out) :: pelist(:)
integer :: nlist, nrecv_x, nrecv_y, ntot, n
integer :: ix, iy, rank_x, rank_y, cur_pos
integer :: get_vector_recv
nlist = size(domain%list(:))
nrecv_x = update_x%nrecv
nrecv_y = update_y%nrecv
ntot = nrecv_x + nrecv_y
n = 1
ix = 1
iy = 1
ind_x = -1
ind_y = -1
get_vector_recv = 0
cur_pos = 0
do while (n<=ntot)
if(ix <= nrecv_x ) then
rank_x = update_x%recv(ix)%pe-domain%pe
if(rank_x .LE. 0) rank_x = rank_x + nlist
else
rank_x = -1
endif
if(iy <= nrecv_y ) then
rank_y = update_y%recv(iy)%pe-domain%pe
if(rank_y .LE. 0) rank_y = rank_y + nlist
else
rank_y = -1
endif
get_vector_recv = get_vector_recv + 1
start_pos(get_vector_recv) = cur_pos
if( rank_x == rank_y ) then
n = n+2
ind_x (get_vector_recv) = ix
ind_y (get_vector_recv) = iy
cur_pos = cur_pos + update_x%recv(ix)%totsize + update_y%recv(iy)%totsize
pelist(get_vector_recv) = update_x%recv(ix)%pe
ix = ix + 1
iy = iy + 1
else if ( rank_x > rank_y ) then
n = n+1
ind_x (get_vector_recv) = ix
ind_y (get_vector_recv) = -1
cur_pos = cur_pos + update_x%recv(ix)%totsize
pelist(get_vector_recv) = update_x%recv(ix)%pe
ix = ix + 1
else if ( rank_y > rank_x ) then
n = n+1
ind_x (get_vector_recv) = -1
ind_y (get_vector_recv) = iy
cur_pos = cur_pos + update_y%recv(iy)%totsize
pelist(get_vector_recv) = update_y%recv(iy)%pe
iy = iy+1
endif
end do
end function get_vector_recv
function get_vector_send(domain, update_x, update_y, ind_x, ind_y, start_pos, pelist)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: update_x, update_y
integer, intent(out) :: ind_x(:), ind_y(:)
integer, intent(out) :: start_pos(:)
integer, intent(out) :: pelist(:)
integer :: nlist, nsend_x, nsend_y, ntot, n
integer :: ix, iy, rank_x, rank_y, cur_pos
integer :: get_vector_send
nlist = size(domain%list(:))
nsend_x = update_x%nsend
nsend_y = update_y%nsend
ntot = nsend_x + nsend_y
n = 1
ix = 1
iy = 1
ind_x = -1
ind_y = -1
get_vector_send = 0
cur_pos = 0
do while (n<=ntot)
if(ix <= nsend_x ) then
rank_x = update_x%send(ix)%pe-domain%pe
if(rank_x .LT. 0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
if(iy <= nsend_y ) then
rank_y = update_y%send(iy)%pe-domain%pe
if(rank_y .LT. 0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
get_vector_send = get_vector_send + 1
start_pos(get_vector_send) = cur_pos
if( rank_x == rank_y ) then
n = n+2
ind_x (get_vector_send) = ix
ind_y (get_vector_send) = iy
cur_pos = cur_pos + update_x%send(ix)%totsize + update_y%send(iy)%totsize
pelist (get_vector_send) = update_x%send(ix)%pe
ix = ix + 1
iy = iy + 1
else if ( rank_x < rank_y ) then
n = n+1
ind_x (get_vector_send) = ix
ind_y (get_vector_send) = -1
cur_pos = cur_pos + update_x%send(ix)%totsize
pelist (get_vector_send) = update_x%send(ix)%pe
ix = ix + 1
else if ( rank_y < rank_x ) then
n = n+1
ind_x (get_vector_send) = -1
ind_y (get_vector_send) = iy
cur_pos = cur_pos + update_y%send(iy)%totsize
pelist (get_vector_send) = update_y%send(iy)%pe
iy = iy+1
endif
end do
end function get_vector_send
!############################################################################
function get_rank_unpack(domain, overlap_x, overlap_y, rank_x, rank_y, ind_x, ind_y)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: overlap_x, overlap_y
integer, intent(out) :: rank_x, rank_y, ind_x, ind_y
integer :: get_rank_unpack
integer :: nlist, nrecv_x, nrecv_y
nlist = size(domain%list(:))
nrecv_x = overlap_x%nrecv
nrecv_y = overlap_y%nrecv
rank_x = nlist+1
rank_y = nlist+1
if(nrecv_x>0) rank_x = overlap_x%recv(nrecv_x)%pe - domain%pe
if(nrecv_y>0) rank_y = overlap_y%recv(nrecv_y)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
if(rank_y .LE.0) rank_y = rank_y + nlist
get_rank_unpack = min(rank_x, rank_y)
ind_x = 0
ind_y = 0
if(get_rank_unpack < nlist+1) then
if(nrecv_x >0) ind_x = nrecv_x
if(nrecv_y >0) ind_y = nrecv_y
endif
end function get_rank_unpack
function get_mesgsize(overlap, do_dir)
type(overlap_type), intent(in) :: overlap
logical, intent(in) :: do_dir(:)
integer :: get_mesgsize
integer :: n, dir
get_mesgsize = 0
do n = 1, overlap%count
dir = overlap%dir(n)
if(do_dir(dir)) then
get_mesgsize = get_mesgsize + overlap%msgsize(n)
end if
end do
end function get_mesgsize
!#############################################################################
subroutine mpp_set_domain_symmetry(domain, symmetry)
type(domain2D), intent(inout) :: domain
logical, intent(in ) :: symmetry
domain%symmetry = symmetry
end subroutine mpp_set_domain_symmetry
subroutine mpp_copy_domain1D(domain_in, domain_out)
type(domain1D), intent(in) :: domain_in
type(domain1D), intent(inout) :: domain_out
domain_out%compute = domain_in%compute
domain_out%data = domain_in%data
domain_out%global = domain_in%global
domain_out%memory = domain_in%memory
domain_out%cyclic = domain_in%cyclic
domain_out%pe = domain_in%pe
domain_out%pos = domain_in%pos
end subroutine mpp_copy_domain1D
!#################################################################
!z1l: This is not fully implemented. The current purpose is to make
! it work in read_data.
subroutine mpp_copy_domain2D(domain_in, domain_out)
type(domain2D), intent(in) :: domain_in
type(domain2D), intent(inout) :: domain_out
integer :: n, ntiles
domain_out%id = domain_in%id
domain_out%pe = domain_in%pe
domain_out%fold = domain_in%fold
domain_out%pos = domain_in%pos
domain_out%symmetry = domain_in%symmetry
domain_out%whalo = domain_in%whalo
domain_out%ehalo = domain_in%ehalo
domain_out%shalo = domain_in%shalo
domain_out%nhalo = domain_in%nhalo
domain_out%ntiles = domain_in%ntiles
domain_out%max_ntile_pe = domain_in%max_ntile_pe
domain_out%ncontacts = domain_in%ncontacts
domain_out%rotated_ninety = domain_in%rotated_ninety
domain_out%initialized = domain_in%initialized
domain_out%tile_root_pe = domain_in%tile_root_pe
domain_out%io_layout = domain_in%io_layout
domain_out%name = domain_in%name
ntiles = size(domain_in%x(:))
allocate(domain_out%x(ntiles), domain_out%y(ntiles), domain_out%tile_id(ntiles) )
do n = 1, ntiles
call mpp_copy_domain1D(domain_in%x(n), domain_out%x(n))
call mpp_copy_domain1D(domain_in%y(n), domain_out%y(n))
enddo
domain_out%tile_id = domain_in%tile_id
return
end subroutine mpp_copy_domain2D
!######################################################################
subroutine set_group_update(group, domain)
type(mpp_group_update_type), intent(inout) :: group
type(domain2D), intent(inout) :: domain
integer :: nscalar, nvector, nlist
integer :: nsend, nrecv, nsend_old, nrecv_old
integer :: nsend_s, nsend_x, nsend_y
integer :: nrecv_s, nrecv_x, nrecv_y
integer :: update_buffer_pos, tot_recv_size, tot_send_size
integer :: msgsize_s, msgsize_x, msgsize_y, msgsize
logical :: recv_s(8), send_s(8)
logical :: recv_x(8), send_x(8), recv_y(8), send_y(8)
integer :: ntot, n, l, m, ksize
integer :: i_s, i_x, i_y, rank_s, rank_x, rank_y, rank
integer :: ind_s(3*MAXOVERLAP)
integer :: ind_x(3*MAXOVERLAP)
integer :: ind_y(3*MAXOVERLAP)
integer :: pelist(3*MAXOVERLAP), to_pe_list(3*MAXOVERLAP)
integer :: buffer_pos_recv(3*MAXOVERLAP), buffer_pos_send(3*MAXOVERLAP)
integer :: recv_size(3*MAXOVERLAP), send_size(3*MAXOVERLAP)
integer :: position_x, position_y, npack, nunpack, dir
integer :: pack_buffer_pos, unpack_buffer_pos
integer :: omp_get_num_threads, nthreads
character(len=8) :: text
type(overlap_type), pointer :: overPtr => NULL()
type(overlapSpec), pointer :: update_s => NULL()
type(overlapSpec), pointer :: update_x => NULL()
type(overlapSpec), pointer :: update_y => NULL()
nscalar = group%nscalar
nvector = group%nvector
!--- get the overlap data type
select case(group%gridtype)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case (DGRID_NE, DGRID_SW)
position_x = NORTH
position_y = EAST
case default
call mpp_error(FATAL, "set_group_update: invalid value of gridtype")
end select
if(nscalar>0) then
update_s => search_update_overlap(domain, group%whalo_s, group%ehalo_s, &
group%shalo_s, group%nhalo_s, group%position)
endif
if(nvector>0) then
update_x => search_update_overlap(domain, group%whalo_v, group%ehalo_v, &
group%shalo_v, group%nhalo_v, position_x)
update_y => search_update_overlap(domain, group%whalo_v, group%ehalo_v, &
group%shalo_v, group%nhalo_v, position_y)
endif
if(nscalar > 0) then
recv_s = group%recv_s
send_s = recv_s
endif
if(nvector > 0) then
recv_x = group%recv_x
send_x = recv_x
recv_y = group%recv_y
send_y = recv_y
end if
nlist = size(domain%list(:))
group%initialized = .true.
nsend_s = 0; nsend_x = 0; nsend_y = 0
nrecv_s = 0; nrecv_x = 0; nrecv_y = 0
if(nscalar > 0) then
!--- This check could not be done because of memory domain
! if( group%isize_s .NE. (group%ie_s-group%is_s+1) .OR. group%jsize_s .NE. (group%je_s-group%js_s+1)) &
! call mpp_error(FATAL, "set_group_update: mismatch of size of the field and domain memory domain")
nsend_s = update_s%nsend
nrecv_s = update_s%nrecv
endif
!--- ksize_s must equal ksize_v
if(nvector > 0 .AND. nscalar > 0) then
if(group%ksize_s .NE. group%ksize_v) then
call mpp_error(FATAL, "set_group_update: ksize_s and ksize_v are not equal")
endif
ksize = group%ksize_s
else if (nscalar > 0) then
ksize = group%ksize_s
else if (nvector > 0) then
ksize = group%ksize_v
else
call mpp_error(FATAL, "set_group_update: nscalar and nvector are all 0")
endif
nthreads = 1
!$OMP PARALLEL
!$ nthreads = omp_get_num_threads()
!$OMP END PARALLEL
if( nthreads > nthread_control_loop ) then
group%k_loop_inside = .FALSE.
else
group%k_loop_inside = .TRUE.
endif
if(nvector > 0) then
!--- This check could not be done because of memory domain
! if( group%isize_x .NE. (group%ie_x-group%is_x+1) .OR. group%jsize_x .NE. (group%je_x-group%js_x+1)) &
! call mpp_error(FATAL, "set_group_update: mismatch of size of the fieldx and domain memory domain")
! if( group%isize_y .NE. (group%ie_y-group%is_y+1) .OR. group%jsize_y .NE. (group%je_y-group%js_y+1)) &
! call mpp_error(FATAL, "set_group_update: mismatch of size of the fieldy and domain memory domain")
nsend_x = update_x%nsend
nrecv_x = update_x%nrecv
nsend_y = update_y%nsend
nrecv_y = update_y%nrecv
endif
!figure out message size for each processor.
ntot = nrecv_s + nrecv_x + nrecv_y
if(ntot > 3*MAXOVERLAP) call mpp_error(FATAL, "set_group_update: ntot is greater than 3*MAXOVERLAP")
n = 1
i_s = 1
i_x = 1
i_y = 1
ind_s = -1
ind_x = -1
ind_y = -1
nrecv = 0
do while(n<=ntot)
if( i_s <= nrecv_s ) then
rank_s = update_s%recv(i_s)%pe-domain%pe
if(rank_s .LE. 0) rank_s = rank_s + nlist
else
rank_s = -1
endif
if( i_x <= nrecv_x ) then
rank_x = update_x%recv(i_x)%pe-domain%pe
if(rank_x .LE. 0) rank_x = rank_x + nlist
else
rank_x = -1
endif
if( i_y <= nrecv_y ) then
rank_y = update_y%recv(i_y)%pe-domain%pe
if(rank_y .LE. 0) rank_y = rank_y + nlist
else
rank_y = -1
endif
nrecv = nrecv + 1
rank = maxval((/rank_s, rank_x, rank_y/))
if(rank == rank_s) then
n = n + 1
ind_s(nrecv) = i_s
pelist(nrecv) = update_s%recv(i_s)%pe
i_s = i_s + 1
endif
if(rank == rank_x) then
n = n + 1
ind_x(nrecv) = i_x
pelist(nrecv) = update_x%recv(i_x)%pe
i_x = i_x + 1
endif
if(rank == rank_y) then
n = n + 1
ind_y(nrecv) = i_y
pelist(nrecv) = update_y%recv(i_y)%pe
i_y = i_y + 1
endif
enddo
nrecv_old = nrecv
nrecv = 0
update_buffer_pos = 0
tot_recv_size = 0
!--- setup for recv
do l = 1, nrecv_old
msgsize_s = 0
msgsize_x = 0
msgsize_y = 0
m = ind_s(l)
if(m>0) msgsize_s = get_mesgsize(update_s%recv(m), recv_s)*ksize*nscalar
m = ind_x(l)
if(m>0) msgsize_x = get_mesgsize(update_x%recv(m), recv_x)*ksize*nvector
m = ind_y(l)
if(m>0) msgsize_y = get_mesgsize(update_y%recv(m), recv_y)*ksize*nvector
msgsize = msgsize_s + msgsize_x + msgsize_y
if( msgsize.GT.0 )then
tot_recv_size = tot_recv_size + msgsize
nrecv = nrecv + 1
if(nrecv > MAXOVERLAP) then
call mpp_error(FATAL, "set_group_update: nrecv is greater than MAXOVERLAP, increase MAXOVERLAP")
endif
group%from_pe(nrecv) = pelist(l)
group%recv_size(nrecv) = msgsize
group%buffer_pos_recv(nrecv) = update_buffer_pos
update_buffer_pos = update_buffer_pos + msgsize
end if
end do
group%nrecv = nrecv
!--- setup for unpack
nunpack = 0
unpack_buffer_pos = 0
do l = 1, nrecv_old
m = ind_s(l)
if(m>0) then
overptr => update_s%recv(m)
do n = 1, overptr%count
dir = overptr%dir(n)
if(recv_s(dir)) then
nunpack = nunpack + 1
if(nunpack > MAXOVERLAP) call mpp_error(FATAL, &
"set_group_update: nunpack is greater than MAXOVERLAP, increase MAXOVERLAP 1")
group%unpack_type(nunpack) = FIELD_S
group%unpack_buffer_pos(nunpack) = unpack_buffer_pos
group%unpack_rotation(nunpack) = overptr%rotation(n)
group%unpack_is(nunpack) = overptr%is(n)
group%unpack_ie(nunpack) = overptr%ie(n)
group%unpack_js(nunpack) = overptr%js(n)
group%unpack_je(nunpack) = overptr%je(n)
group%unpack_size(nunpack) = overptr%msgsize(n)*nscalar
unpack_buffer_pos = unpack_buffer_pos + group%unpack_size(nunpack)*ksize
end if
end do
end if
m = ind_x(l)
if(m>0) then
overptr => update_x%recv(m)
do n = 1, overptr%count
dir = overptr%dir(n)
if(recv_x(dir)) then
nunpack = nunpack + 1
if(nunpack > MAXOVERLAP) call mpp_error(FATAL, &
"set_group_update: nunpack is greater than MAXOVERLAP, increase MAXOVERLAP 2")
group%unpack_type(nunpack) = FIELD_X
group%unpack_buffer_pos(nunpack) = unpack_buffer_pos
group%unpack_rotation(nunpack) = overptr%rotation(n)
group%unpack_is(nunpack) = overptr%is(n)
group%unpack_ie(nunpack) = overptr%ie(n)
group%unpack_js(nunpack) = overptr%js(n)
group%unpack_je(nunpack) = overptr%je(n)
group%unpack_size(nunpack) = overptr%msgsize(n)*nvector
unpack_buffer_pos = unpack_buffer_pos + group%unpack_size(nunpack)*ksize
end if
end do
end if
m = ind_y(l)
if(m>0) then
overptr => update_y%recv(m)
do n = 1, overptr%count
dir = overptr%dir(n)
if(recv_y(dir)) then
nunpack = nunpack + 1
if(nunpack > MAXOVERLAP) call mpp_error(FATAL, &
"set_group_update: nunpack is greater than MAXOVERLAP, increase MAXOVERLAP 3")
group%unpack_type(nunpack) = FIELD_Y
group%unpack_buffer_pos(nunpack) = unpack_buffer_pos
group%unpack_rotation(nunpack) = overptr%rotation(n)
group%unpack_is(nunpack) = overptr%is(n)
group%unpack_ie(nunpack) = overptr%ie(n)
group%unpack_js(nunpack) = overptr%js(n)
group%unpack_je(nunpack) = overptr%je(n)
group%unpack_size(nunpack) = overptr%msgsize(n)*nvector
unpack_buffer_pos = unpack_buffer_pos + group%unpack_size(nunpack)*ksize
end if
end do
end if
end do
group%nunpack = nunpack
if(update_buffer_pos .NE. unpack_buffer_pos ) call mpp_error(FATAL, &
"set_group_update: update_buffer_pos .NE. unpack_buffer_pos")
!figure out message size for each processor.
ntot = nsend_s + nsend_x + nsend_y
n = 1
i_s = 1
i_x = 1
i_y = 1
ind_s = -1
ind_x = -1
ind_y = -1
nsend = 0
do while(n<=ntot)
if( i_s <= nsend_s ) then
rank_s = update_s%send(i_s)%pe-domain%pe
if(rank_s .LT. 0) rank_s = rank_s + nlist
else
rank_s = nlist+1
endif
if( i_x <= nsend_x ) then
rank_x = update_x%send(i_x)%pe-domain%pe
if(rank_x .LT. 0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
if( i_y <= nsend_y ) then
rank_y = update_y%send(i_y)%pe-domain%pe
if(rank_y .LT. 0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
nsend = nsend + 1
rank = minval((/rank_s, rank_x, rank_y/))
if(rank == rank_s) then
n = n + 1
ind_s(nsend) = i_s
pelist(nsend) = update_s%send(i_s)%pe
i_s = i_s + 1
endif
if(rank == rank_x) then
n = n + 1
ind_x(nsend) = i_x
pelist(nsend) = update_x%send(i_x)%pe
i_x = i_x + 1
endif
if(rank == rank_y) then
n = n + 1
ind_y(nsend) = i_y
pelist(nsend) = update_y%send(i_y)%pe
i_y = i_y + 1
endif
enddo
nsend_old = nsend
nsend = 0
tot_send_size = 0
do l = 1, nsend_old
msgsize_s = 0
msgsize_x = 0
msgsize_y = 0
m = ind_s(l)
if(m>0) msgsize_s = get_mesgsize(update_s%send(m), send_s)*ksize*nscalar
m = ind_x(l)
if(m>0) msgsize_x = get_mesgsize(update_x%send(m), send_x)*ksize*nvector
m = ind_y(l)
if(m>0) msgsize_y = get_mesgsize(update_y%send(m), send_y)*ksize*nvector
msgsize = msgsize_s + msgsize_x + msgsize_y
if( msgsize.GT.0 )then
tot_send_size = tot_send_size + msgsize
nsend = nsend + 1
if(nsend > MAXOVERLAP) then
call mpp_error(FATAL, "set_group_update: nsend is greater than MAXOVERLAP, increase MAXOVERLAP")
endif
send_size(nsend) = msgsize
group%to_pe(nsend) = pelist(l)
group%buffer_pos_send(nsend) = update_buffer_pos
group%send_size(nsend) = msgsize
update_buffer_pos = update_buffer_pos + msgsize
end if
end do
group%nsend = nsend
!--- setup for pack
npack = 0
pack_buffer_pos = unpack_buffer_pos
do l = 1, nsend_old
m = ind_s(l)
if(m>0) then
overptr => update_s%send(m)
do n = 1, overptr%count
dir = overptr%dir(n)
if(send_s(dir)) then
npack = npack + 1
if(npack > MAXOVERLAP) call mpp_error(FATAL, &
"set_group_update: npack is greater than MAXOVERLAP, increase MAXOVERLAP 1")
group%pack_type(npack) = FIELD_S
group%pack_buffer_pos(npack) = pack_buffer_pos
group%pack_rotation(npack) = overptr%rotation(n)
group%pack_is(npack) = overptr%is(n)
group%pack_ie(npack) = overptr%ie(n)
group%pack_js(npack) = overptr%js(n)
group%pack_je(npack) = overptr%je(n)
group%pack_size(npack) = overptr%msgsize(n)*nscalar
pack_buffer_pos = pack_buffer_pos + group%pack_size(npack)*ksize
end if
end do
end if
m = ind_x(l)
if(m>0) then
overptr => update_x%send(m)
do n = 1, overptr%count
dir = overptr%dir(n)
!--- nonsym_edge update is not for rotation of 90 or -90 degree ( cubic sphere grid )
if( group%nonsym_edge .and. (overptr%rotation(n)==NINETY .or. &
overptr%rotation(n)==MINUS_NINETY) ) then
call mpp_error(FATAL, 'set_group_update: flags=NONSYMEDGEUPDATE is not compatible '// &
'with 90 or -90 degree rotation (normally cubic sphere grid' )
endif
if(send_x(dir)) then
npack = npack + 1
if(npack > MAXOVERLAP) call mpp_error(FATAL, &
"set_group_update: npack is greater than MAXOVERLAP, increase MAXOVERLAP 2")
group%pack_type(npack) = FIELD_X
group%pack_buffer_pos(npack) = pack_buffer_pos
group%pack_rotation(npack) = overptr%rotation(n)
group%pack_is(npack) = overptr%is(n)
group%pack_ie(npack) = overptr%ie(n)
group%pack_js(npack) = overptr%js(n)
group%pack_je(npack) = overptr%je(n)
group%pack_size(npack) = overptr%msgsize(n)*nvector
pack_buffer_pos = pack_buffer_pos + group%pack_size(npack)*ksize
end if
end do
end if
m = ind_y(l)
if(m>0) then
overptr => update_y%send(m)
do n = 1, overptr%count
dir = overptr%dir(n)
if( group%nonsym_edge .and. (overptr%rotation(n)==NINETY .or. &
overptr%rotation(n)==MINUS_NINETY) ) then
call mpp_error(FATAL, 'set_group_update: flags=NONSYMEDGEUPDATE is not compatible '// &
'with 90 or -90 degree rotation (normally cubic sphere grid' )
endif
if(send_y(dir)) then
npack = npack + 1
if(npack > MAXOVERLAP) call mpp_error(FATAL, &
"set_group_update: npack is greater than MAXOVERLAP, increase MAXOVERLAP 3")
group%pack_type(npack) = FIELD_Y
group%pack_buffer_pos(npack) = pack_buffer_pos
group%pack_rotation(npack) = overptr%rotation(n)
group%pack_is(npack) = overptr%is(n)
group%pack_ie(npack) = overptr%ie(n)
group%pack_js(npack) = overptr%js(n)
group%pack_je(npack) = overptr%je(n)
group%pack_size(npack) = overptr%msgsize(n)*nvector
pack_buffer_pos = pack_buffer_pos + group%pack_size(npack)*ksize
end if
end do
end if
end do
group%npack = npack
if(update_buffer_pos .NE. pack_buffer_pos ) call mpp_error(FATAL, &
"set_group_update: update_buffer_pos .NE. pack_buffer_pos")
!--- make sure the buffer is large enough
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, tot_recv_size+tot_send_size )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'set_group_update: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
group%tot_msgsize = tot_recv_size+tot_send_size
end subroutine set_group_update
!######################################################################
subroutine mpp_clear_group_update(group)
type(mpp_group_update_type), intent(inout) :: group
group%nscalar = 0
group%nvector = 0
group%nsend = 0
group%nrecv = 0
group%npack = 0
group%nunpack = 0
group%initialized = .false.
end subroutine mpp_clear_group_update
!#####################################################################
function mpp_group_update_initialized(group)
type(mpp_group_update_type), intent(in) :: group
logical :: mpp_group_update_initialized
mpp_group_update_initialized = group%initialized
end function mpp_group_update_initialized
!#####################################################################
function mpp_group_update_is_set(group)
type(mpp_group_update_type), intent(in) :: group
logical :: mpp_group_update_is_set
mpp_group_update_is_set = (group%nscalar > 0 .OR. group%nvector > 0)
end function mpp_group_update_is_set
# 2785 "../mpp/mpp_domains.F90" 2
# 1 "../mpp/include/mpp_domains_comm.inc" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_redistribute_init_comm(domain_in,l_addrs_in, domain_out,l_addrs_out, &
isize_in,jsize_in,ksize_in,isize_out,jsize_out,ksize_out) RESULT(d_comm)
type(DomainCommunicator2D), pointer :: d_comm
type(domain2D),target, intent(in) :: domain_in
integer(8), intent(in) :: l_addrs_in(:)
type(domain2D),target, intent(in) :: domain_out
integer(8), intent(in) :: l_addrs_out(:)
integer, intent(in) :: isize_in
integer, intent(in) :: jsize_in
integer, intent(in) :: ksize_in
integer, intent(in) :: isize_out
integer, intent(in) :: jsize_out
integer, intent(in) :: ksize_out
integer(8) :: domain_id
integer :: m, list
integer :: is, ie, js, je, ke, ioff, joff, list_size
integer :: isc, iec, jsc, jec, mytile
integer :: lsize,rsize,msgsize,to_pe,from_pe
integer, allocatable,dimension(:) :: isL, jsL
integer(8),allocatable,dimension(:,:) :: slist_addr
character(len=8) :: text
! This test determines whether input fields are from allocated memory (LOC gets global
! address) or "static" memory (need shmem_ptr). This probably needs to be generalized
! to determine appropriate mechanism for each incoming address.
! "Concurrent" run mode may leave field_in or field_out unassociated if pe does not
! contain in/out data. Use of STATIC option for ocean complicates this as ocean component
! always defined. Field_out is always a boundary structure and so is always allocated or
! not depending on whether it's used. If field out is defined (>0), then it is used otherwise
! field in must be defined.
!fix ke
ke = 0
if( domain_in%pe /= NULL_PE )ke = ksize_in
if( domain_out%pe /= NULL_PE )then
if( ke /= 0 .AND. ke /= ksize_out ) &
call mpp_error( FATAL, 'MPP_REDISTRIBUTE_INIT_COMM: mismatch between field_in and field_out.' )
ke = ksize_out
end if
if( ke == 0 )call mpp_error( FATAL, 'MPP_REDISTRIBUTE_INIT_COMM: either domain_in or domain_out must be native.' )
!check sizes
if( domain_in%pe /= NULL_PE )then
if( isize_in /= domain_in%x(1)%data%size .OR. jsize_in /= domain_in%y(1)%data%size ) &
call mpp_error( FATAL, 'MPP_REDISTRIBUTE_INIT_COMM: field_in must be on data domain of domain_in.' )
end if
if( domain_out%pe /= NULL_PE )then
if( isize_out /= domain_out%x(1)%data%size .OR. jsize_out /= domain_out%y(1)%data%size ) &
call mpp_error( FATAL, 'MPP_REDISTRIBUTE_INIT_COMM: field_out must be on data domain of domain_out.' )
end if
! Create unique domain identifier
list_size = size(l_addrs_in(:))
if(l_addrs_out(1) > 0)then
domain_id = set_domain_id(domain_out%id,ke+list_size)
else
domain_id = set_domain_id(domain_in%id,ke+list_size)
endif
d_comm =>get_comm(domain_id,l_addrs_in(1),l_addrs_out(1))
if(d_comm%initialized)return ! Found existing field/domain communicator
d_comm%l_addr = l_addrs_in(1)
d_comm%domain_in =>domain_in
d_comm%Slist_size = size(domain_out%list(:))
d_comm%isize_in = isize_in
d_comm%jsize_in = jsize_in
d_comm%ke = ke
!send
lsize = d_comm%Slist_size-1
allocate(d_comm%sendis(1,0:lsize), d_comm%sendie(1,0:lsize), &
d_comm%sendjs(1,0:lsize), d_comm%sendje(1,0:lsize), &
d_comm%S_msize(0:lsize),isL(0:lsize),jsL(0:lsize))
allocate(slist_addr(list_size,0:lsize))
allocate(d_comm%cto_pe(0:lsize), d_comm%S_do_buf(0:lsize))
isL=0;jsL=0
slist_addr = -9999
d_comm%cto_pe=-1
d_comm%sendis=0; d_comm%sendie=0
d_comm%sendjs=0; d_comm%sendje=0;
d_comm%S_msize=0
d_comm%S_do_buf=.false.
ioff = domain_in%x(1)%data%begin
joff = domain_in%y(1)%data%begin
mytile = domain_in%tile_id(1)
call mpp_get_compute_domain( domain_in, isc, iec, jsc, jec )
do list = 0,lsize
m = mod( domain_out%pos+list+lsize+1, lsize+1 )
if( mytile .NE. domain_out%list(m)%tile_id(1) ) cycle
d_comm%cto_pe(list) = domain_out%list(m)%pe
to_pe = d_comm%cto_pe(list)
is = domain_out%list(m)%x(1)%compute%begin
ie = domain_out%list(m)%x(1)%compute%end
js = domain_out%list(m)%y(1)%compute%begin
je = domain_out%list(m)%y(1)%compute%end
is = max(is,isc); ie = min(ie,iec)
js = max(js,jsc); je = min(je,jec)
if( ie >= is .AND. je >= js )then
d_comm%S_do_buf(list) = .true.
d_comm%sendis(1,list)=is; d_comm%sendie(1,list)=ie
d_comm%sendjs(1,list)=js; d_comm%sendje(1,list)=je
d_comm%S_msize(list) = (ie-is+1)*(je-js+1)*ke
isL(list) = is-ioff+1; jsL(list) = js-joff+1
end if
end do
call mpp_sync_self()
!recv
d_comm%domain_out =>domain_out
d_comm%Rlist_size = size(domain_in%list(:))
d_comm%isize_out = isize_out
d_comm%jsize_out = jsize_out
rsize = d_comm%Rlist_size-1
allocate(d_comm%recvis(1,0:rsize), d_comm%recvie(1,0:rsize), &
d_comm%recvjs(1,0:rsize), d_comm%recvje(1,0:rsize), &
d_comm%R_msize(0:rsize))
allocate(d_comm%cfrom_pe(0:rsize), d_comm%R_do_buf(0:rsize))
allocate(d_comm%isizeR(0:rsize), d_comm%jsizeR(0:rsize))
allocate(d_comm%sendisR(1,0:rsize), d_comm%sendjsR(1,0:rsize))
allocate(d_comm%rem_addrl(list_size,0:rsize))
d_comm%rem_addrl=-9999
d_comm%cfrom_pe=-1
d_comm%recvis=0; d_comm%recvie=0
d_comm%recvjs=0; d_comm%recvje=0;
d_comm%R_msize=0
d_comm%R_do_buf=.false.
d_comm%isizeR=0; d_comm%jsizeR=0
d_comm%sendisR=0; d_comm%sendjsR=0
mytile = domain_out%tile_id(1)
call mpp_get_compute_domain( domain_out, isc, iec, jsc, jec )
do list = 0,rsize
m = mod( domain_in%pos+rsize+1-list, rsize+1 )
if( mytile .NE. domain_in%list(m)%tile_id(1) ) cycle
d_comm%cfrom_pe(list) = domain_in%list(m)%pe
from_pe = d_comm%cfrom_pe(list)
is = domain_in%list(m)%x(1)%compute%begin
ie = domain_in%list(m)%x(1)%compute%end
js = domain_in%list(m)%y(1)%compute%begin
je = domain_in%list(m)%y(1)%compute%end
is = max(is,isc); ie = min(ie,iec)
js = max(js,jsc); je = min(je,jec)
if( ie >= is .AND. je >= js )then
d_comm%R_do_buf(list) = .true.
d_comm%recvis(1,list)=is; d_comm%recvie(1,list)=ie
d_comm%recvjs(1,list)=js; d_comm%recvje(1,list)=je
d_comm%R_msize(list) = (ie-is+1)*(je-js+1)*ke
end if
end do
d_comm%isize_max = isize_in; call mpp_max(d_comm%isize_max)
d_comm%jsize_max = jsize_in; call mpp_max(d_comm%jsize_max)
! Handles case where S_msize and/or R_msize are 0 size array
msgsize = ( MAXVAL( (/0,sum(d_comm%S_msize(:))/) ) + MAXVAL( (/0,sum(d_comm%R_msize(:))/) ) ) * list_size
if(msgsize>0)then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_REDISTRIBUTE_INIT_COMM: mpp_domains_stack overflow, call mpp_domains_set_stack_size(' &
//trim(text)//') from all PEs.' )
end if
end if
DEALLOCATE(slist_addr,isL,jsL)
d_comm%initialized = .true.
end function mpp_redistribute_init_comm
function mpp_global_field_init_comm(domain,l_addr,isize_g,jsize_g,isize_l, &
jsize_l, ksize,l_addr2,flags, position) RESULT(d_comm)
type(DomainCommunicator2D), pointer :: d_comm
type(domain2D),target, intent(in) :: domain
integer(8), intent(in) :: l_addr
integer, intent(in) :: isize_g
integer, intent(in) :: jsize_g
integer, intent(in) :: isize_l
integer, intent(in) :: jsize_l
integer, intent(in) :: ksize
integer(8),optional,intent(in) :: l_addr2
integer, optional, intent(in) :: flags
integer, optional, intent(in) :: position
integer(8) :: domain_id
integer :: n, lpos, rpos, list, nlist, tile_id
integer :: update_flags
logical :: xonly, yonly
integer :: is, ie, js, je, ioff, joff, ishift, jshift
integer :: lsize,msgsize,from_pe
integer, allocatable,dimension(:) :: isL, jsL
integer(8),allocatable,dimension(:,:) :: slist_addr
integer(8),save ,dimension(2) :: rem_addr
character(len=8) :: text
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
update_flags=XUPDATE+YUPDATE; xonly = .FALSE.; yonly = .FALSE.
if( PRESENT(flags) )then
update_flags = flags
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
end if
call mpp_get_domain_shift(domain, ishift, jshift, position=position)
if( isize_g /= (domain%x(1)%global%size+ishift) .OR. jsize_g /= (domain%y(1)%global%size+jshift) ) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_INIT_COMM: incoming arrays do not match domain.' )
if( isize_l == (domain%x(1)%compute%size+ishift) .AND. jsize_l == (domain%y(1)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(1)%compute%begin + 1
joff = -domain%y(1)%compute%begin + 1
elseif( isize_l == (domain%x(1)%memory%size+ishift) .AND. jsize_l == (domain%y(1)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(1)%data%begin + 1
joff = -domain%y(1)%data%begin + 1
else
call mpp_error(FATAL,'MPP_GLOBAL_FIELD_INIT_COMM: incoming field array must match either compute domain or data domain.')
endif
! Create unique domain identifier
domain_id=set_domain_id(domain%id,ksize,update_flags, position=position)
d_comm =>get_comm(domain_id,l_addr,l_addr2)
if(d_comm%initialized)return ! Found existing field/domain communicator
d_comm%domain => domain
d_comm%isize_in = isize_l; d_comm%isize_out = isize_g
d_comm%jsize_in = jsize_l; d_comm%jsize_out = jsize_g
d_comm%ke = ksize
d_comm%gf_ioff=ioff; d_comm%gf_joff=joff
!fill off-domains (note loops begin at an offset of 1)
if( xonly )then
lsize = size(domain%x(1)%list(:))
!send
allocate(d_comm%cto_pe(0:lsize-1))
d_comm%cto_pe=-1
do list = 0,lsize-1
lpos = mod(domain%x(1)%pos+lsize-list,lsize)
d_comm%cto_pe(list) = domain%x(1)%list(lpos)%pe
end do
!recv
allocate(d_comm%cfrom_pe(0:lsize-1))
allocate(d_comm%recvis(1,0:lsize-1), d_comm%recvie(1,0:lsize-1), &
d_comm%recvjs(1,0:lsize-1), d_comm%recvje(1,0:lsize-1), &
d_comm%R_msize(0:lsize-1))
d_comm%cfrom_pe=-1
d_comm%recvis=0; d_comm%recvie=0
d_comm%recvjs=0; d_comm%recvje=0;
d_comm%R_msize=0
do list = 0,lsize-1
rpos = mod(domain%x(1)%pos+list,lsize)
from_pe = domain%x(1)%list(rpos)%pe
d_comm%cfrom_pe(list) = from_pe
is = domain%list(from_pe)%x(1)%compute%begin; ie = domain%list(from_pe)%x(1)%compute%end+ishift
js = domain%y(1)%compute%begin; je = domain%y(1)%compute%end+jshift
d_comm%recvis(1,list)=is; d_comm%recvie(1,list)=ie
d_comm%recvjs(1,list)=js; d_comm%recvje(1,list)=je
d_comm%R_msize(list) = (ie-is+1) * (je-js+1) * ksize
end do
elseif( yonly )then
lsize = size(domain%y(1)%list(:))
!send
allocate(d_comm%cto_pe(0:lsize-1))
d_comm%cto_pe=-1
do list = 0,lsize
lpos = mod(domain%y(1)%pos+lsize-list,lsize)
d_comm%cto_pe(list) = domain%y(1)%list(lpos)%pe
end do
!recv
allocate(d_comm%cfrom_pe(0:lsize-1))
allocate(d_comm%recvis(1,0:lsize-1), d_comm%recvie(1,0:lsize-1), &
d_comm%recvjs(1,0:lsize-1), d_comm%recvje(1,0:lsize-1), &
d_comm%R_msize(0:lsize-1))
d_comm%cfrom_pe=-1
d_comm%recvis=0; d_comm%recvie=0
d_comm%recvjs=0; d_comm%recvje=0;
d_comm%R_msize=0
do list = 0,lsize-1
rpos = mod(domain%y(1)%pos+list,lsize)
from_pe = domain%y(1)%list(rpos)%pe
d_comm%cfrom_pe(list) = from_pe
is = domain%x(1)%compute%begin; ie = domain%x(1)%compute%end+ishift
js = domain%list(from_pe)%y(1)%compute%begin; je = domain%list(from_pe)%y(1)%compute%end+jshift
d_comm%recvis(1,list)=is; d_comm%recvie(1,list)=ie
d_comm%recvjs(1,list)=js; d_comm%recvje(1,list)=je
d_comm%R_msize(list) = (ie-is+1) * (je-js+1) * ksize
end do
else
nlist = size(domain%list(:))
tile_id = domain%tile_id(1)
lsize = 0
do list = 0,nlist-1
if( domain%list(list)%tile_id(1) .NE. tile_id ) cycle
lsize = lsize+1
end do
!send
allocate(d_comm%cto_pe(0:lsize-1))
d_comm%cto_pe=-1
n = 0
do list = 0,nlist-1
lpos = mod(domain%pos+nlist-list,nlist)
if( domain%list(lpos)%tile_id(1) .NE. tile_id ) cycle
d_comm%cto_pe(n) = domain%list(lpos)%pe
n = n + 1
end do
!recv
allocate(d_comm%cfrom_pe(0:lsize-1))
allocate(d_comm%recvis(1,0:lsize-1), d_comm%recvie(1,0:lsize-1), &
d_comm%recvjs(1,0:lsize-1), d_comm%recvje(1,0:lsize-1), &
d_comm%R_msize(0:lsize-1))
d_comm%cfrom_pe=-1
d_comm%recvis=0; d_comm%recvie=0
d_comm%recvjs=0; d_comm%recvje=0;
d_comm%R_msize=0
n = 0
do list = 0,nlist-1
rpos = mod(domain%pos+list,nlist)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle
d_comm%cfrom_pe(n) = domain%list(rpos)%pe
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
d_comm%recvis(1,n)=is; d_comm%recvie(1,n)=ie
d_comm%recvjs(1,n)=js; d_comm%recvje(1,n)=je
d_comm%R_msize(n) = (je-js+1) * (ie-is+1) * ksize
n = n+1
end do
endif
d_comm%Slist_size = lsize
d_comm%Rlist_size = lsize
!send
allocate(d_comm%sendis(1,0:lsize-1), d_comm%sendie(1,0:lsize-1), &
d_comm%sendjs(1,0:lsize-1), d_comm%sendje(1,0:lsize-1), &
d_comm%S_msize(0:lsize-1),isL(0:lsize-1),jsL(0:lsize-1))
allocate(slist_addr(2,0:lsize-1))
isL=0; jsL=0
slist_addr = -9999
d_comm%sendis=0; d_comm%sendie=0
d_comm%sendjs=0; d_comm%sendje=0;
d_comm%S_msize=0
do list = 0,lsize-1
is=domain%x(1)%compute%begin; ie=domain%x(1)%compute%end+ishift
js=domain%y(1)%compute%begin; je=domain%y(1)%compute%end+jshift
d_comm%sendis(1,list)=is; d_comm%sendie(1,list)=ie
d_comm%sendjs(1,list)=js; d_comm%sendje(1,list)=je
d_comm%S_msize(list) = (je-js+1) * (ie-is+1) * ksize
isL(list) = ioff+domain%x(1)%compute%begin; jsL(list) = joff+domain%y(1)%compute%begin
end do
!recv
allocate(d_comm%isizeR(0:lsize-1), d_comm%jsizeR(0:lsize-1))
allocate(d_comm%sendisR(1,0:lsize-1), d_comm%sendjsR(1,0:lsize-1))
if(.not.PRESENT(l_addr2))then
allocate(d_comm%rem_addr(0:lsize-1))
d_comm%rem_addr=-9999
else
allocate(d_comm%rem_addrx(0:lsize-1),d_comm%rem_addry(0:lsize-1))
d_comm%rem_addrx=-9999; d_comm%rem_addry=-9999
endif
d_comm%isizeR=0; d_comm%jsizeR=0
d_comm%sendisR=0; d_comm%sendjsR=0
rem_addr = -9999
! Handles case where S_msize and/or R_msize are 0 size array
msgsize = MAXVAL( (/0,sum(d_comm%S_msize(:))/) ) + MAXVAL( (/0,sum(d_comm%R_msize(:))/) )
if(msgsize>0)then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_INIT_COMM: mpp_domains_stack overflow, call mpp_domains_set_stack_size(' &
//trim(text)//') from all PEs.' )
end if
end if
DEALLOCATE(slist_addr,isL,jsL)
d_comm%initialized = .true.
end function mpp_global_field_init_comm
subroutine mpp_redistribute_free_comm(domain_in,l_addr,domain_out,l_addr2,ksize,lsize)
! Since initialization of the d_comm type is expensive, freeing should be a rare
! event. Thus no attempt is made to salvage freed d_comm's.
type(domain2D), intent(in) :: domain_in
integer(8), intent(in) :: l_addr
type(domain2D), intent(in) :: domain_out
integer(8), intent(in) :: l_addr2
integer, intent(in) :: ksize,lsize
integer(8) :: domain_id
if(l_addr2 > 0)then
domain_id = set_domain_id(domain_out%id,ksize+lsize)
else
domain_id = set_domain_id(domain_in%id,ksize+lsize)
endif
call free_comm(domain_id,l_addr,l_addr2)
end subroutine mpp_redistribute_free_comm
subroutine mpp_global_field_free_comm(domain,l_addr,ksize,l_addr2,flags)
! Since initialization of the d_comm type is expensive, freeing should be a rare
! event. Thus no attempt is made to salvage freed d_comm's.
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: l_addr
integer, intent(in) :: ksize
integer(8),optional,intent(in) :: l_addr2
integer, optional,intent(in) :: flags
integer :: update_flags
integer(8) :: domain_id
update_flags=0; if(PRESENT(flags))update_flags=flags
domain_id=set_domain_id(domain%id,ksize,update_flags)
call free_comm(domain_id,l_addr,l_addr2)
end subroutine mpp_global_field_free_comm
subroutine free_comm(domain_id,l_addr,l_addr2)
! Since initialization of the d_comm type is expensive, freeing should be a rare
! event. Thus no attempt is made to salvage freed d_comm's.
integer(8), intent(in) :: domain_id
integer(8), intent(in) :: l_addr
integer(8),optional,intent(in) :: l_addr2
integer(8) :: dc_key,a_key
integer :: dc_idx,a_idx,i_idx,insert,insert_a,insert_i
integer :: a2_idx,insert_a2
i_idx = find_key(domain_id,ids_sorted(1:n_ids),insert_i)
a_idx = find_key(l_addr,addrs_sorted(1:a_sort_len),insert_a)
a_key = int(addrs_idx(a_idx),KIND(8))
if(PRESENT(l_addr2))then
a2_idx = find_key(l_addr2,addrs2_sorted(1:a2_sort_len),insert_a2)
a_key = a_key + ADDR2_BASE*int(addrs2_idx(a2_idx),KIND(8))
endif
dc_key = DOMAIN_ID_BASE*int(ids_idx(i_idx),KIND(8)) + a_key
dc_idx = find_key(dc_key,dcKey_sorted(1:dc_sort_len),insert)
if(dc_idx < 0)then
call mpp_error(FATAL,'FREE_COMM: attempt to remove nonexistent domains communicator key')
endif
call deallocate_comm(d_comm(dc_idx))
call pop_key(dcKey_sorted,d_comm_idx,dc_sort_len,dc_idx)
call pop_key(addrs_sorted,addrs_idx,a_sort_len,a_idx)
if(PRESENT(l_addr2))call pop_key(addrs2_sorted,addrs2_idx,a2_sort_len,a2_idx)
end subroutine free_comm
function get_comm(domain_id,l_addr,l_addr2)
integer(8),intent(in) :: domain_id
integer(8),intent(in) :: l_addr
integer(8),intent(in),optional :: l_addr2
type(DomainCommunicator2D), pointer :: get_comm
integer(8) :: dc_key,a_key
integer :: i,dc_idx,a_idx,i_idx,insert,insert_a,insert_i
integer :: a2_idx,insert_a2
if(.not.ALLOCATED(d_comm))ALLOCATE(d_comm(MAX_FIELDS))
i_idx = find_key(domain_id,ids_sorted(1:n_ids),insert_i)
a_idx = find_key(l_addr,addrs_sorted(1:a_sort_len),insert_a)
a_key = int(addrs_idx(a_idx),KIND(8))
if(PRESENT(l_addr2))then
a2_idx = find_key(l_addr2,addrs2_sorted(1:a2_sort_len),insert_a2)
a_key = a_key + ADDR2_BASE*int(addrs2_idx(a2_idx),KIND(8))
endif
dc_key = DOMAIN_ID_BASE*int(ids_idx(i_idx),KIND(8)) + a_key
dc_idx = find_key(dc_key,dcKey_sorted(1:dc_sort_len),insert)
if(dc_idx > 0)then
get_comm =>d_comm(d_comm_idx(dc_idx))
else
if(i_idx<0)then
if(n_ids == MAX_DOM_IDS)then
call mpp_error(FATAL,'GET_COMM: Maximum number of domains exceeded')
endif
n_ids = n_ids+1
i_idx = push_key(ids_sorted,ids_idx,i_sort_len,insert_i,domain_id,n_ids)
endif
if(a_idx<0)then
if(n_addrs == MAX_ADDRS)then
call mpp_error(FATAL,'GET_COMM: Maximum number of memory addresses exceeded')
endif
n_addrs = n_addrs + 1
a_idx = push_key(addrs_sorted,addrs_idx,a_sort_len,insert_a,l_addr,n_addrs)
endif
if(PRESENT(l_addr2))then
if(a2_idx<0)then
if(n_addrs2 == MAX_ADDRS2)then
call mpp_error(FATAL,'GET_COMM: Maximum number of 2nd memory addresses exceeded')
endif
n_addrs2 = n_addrs2 + 1
a2_idx = push_key(addrs2_sorted,addrs2_idx,a2_sort_len,insert_a2,l_addr2,n_addrs2)
endif
endif
if(n_comm == MAX_FIELDS)then
call mpp_error(FATAL,'GET_COMM: Maximum number of fields exceeded')
endif
a_key = int(addrs_idx(a_idx),KIND(8))
if(PRESENT(l_addr2))a_key = a_key + ADDR2_BASE*int(addrs2_idx(a2_idx),KIND(8))
dc_key = DOMAIN_ID_BASE*int(ids_idx(i_idx),KIND(8)) + a_key
dc_idx = find_key(dc_key,dcKey_sorted(1:dc_sort_len),insert)
if(dc_idx /= -1)call mpp_error(FATAL,'GET_COMM: attempt to insert existing key')
n_comm = n_comm + 1
i = push_key(dcKey_sorted,d_comm_idx,dc_sort_len,insert,dc_key,n_comm)
d_comm_idx(insert) = n_comm
if(PRESENT(l_addr2))then
d_comm(n_comm)%l_addrx = l_addr
d_comm(n_comm)%l_addry = l_addr2
else
d_comm(n_comm)%l_addr = l_addr
endif
get_comm =>d_comm(n_comm)
endif
end function get_comm
function push_key(sorted,idx,n_idx,insert,key,ival)
integer(8),intent(inout),dimension(:) :: sorted
integer, intent(inout),dimension(-1:) :: idx ! Start -1 to simplify first call logic in get_comm
integer, intent(inout) :: n_idx
integer, intent(in) :: insert
integer(8),intent(in) :: key
integer, intent(in) :: ival
integer :: push_key,i
do i=n_idx,insert,-1
sorted(i+1) = sorted(i)
idx(i+1) = idx(i)
end do
sorted(insert) = key
n_idx = n_idx + 1
idx(insert) = ival
push_key = insert
end function push_key
subroutine pop_key(sorted,idx,n_idx,key_idx)
integer(8),intent(inout),dimension(:) :: sorted
integer, intent(inout),dimension(-1:) :: idx ! Start -1 to simplify first call logic in get_comm
integer, intent(inout) :: n_idx
integer, intent(in) :: key_idx
integer :: i
do i=key_idx,n_idx-1
sorted(i) = sorted(i+1)
idx(i) = idx(i+1)
end do
sorted(n_idx) = -9999
idx(n_idx) = -9999
n_idx = n_idx - 1
end subroutine pop_key
function find_key(key,sorted,insert) RESULT(n)
! The algorithm used here requires monotonic keys w/out repetition.
integer(8),intent(in) :: key ! new address to be found in list
integer(8),dimension(:),intent(in) :: sorted ! list of sorted local addrs
integer, intent(out) :: insert
integer :: n, n_max, n_min, n_key
logical :: not_found
n_key = size(sorted(:))
insert = 1
n = -1 ! value not in list
if(n_key == 0)return ! first call
if(key < sorted(1))then
insert = 1; return
elseif(key > sorted(n_key))then
insert = n_key+1; return
endif
if(key == sorted(1))then
n = 1; return
elseif(key == sorted(n_key))then
n = n_key; return
endif
not_found = .true.
n = n_key/2 + 1
n_min=1; n_max=n_key
do while(not_found)
if(key == sorted(n))then
not_found = .false.
elseif(key > sorted(n))then
if(key < sorted(n+1))then
insert = n+1; exit
endif
n_min = n
n = (n+1+n_max)/2
else
if(key > sorted(n-1))then
insert = n; exit
endif
n_max = n
n = (n+n_min)/2
endif
if(n==1 .or. n==n_key)exit
end do
if(not_found)n = -1 ! value not in list
end function find_key
subroutine deallocate_comm(d_comm)
type(DomainCommunicator2D), intent(inout) :: d_comm
d_comm%domain =>NULL()
d_comm%domain_in =>NULL()
d_comm%domain_out =>NULL()
d_comm%initialized=.false.
d_comm%id=-9999
d_comm%l_addr =-9999
d_comm%l_addrx =-9999
d_comm%l_addry =-9999
if( allocated(d_comm%sendis) ) DEALLOCATE(d_comm%sendis); !!d_comm%sendis =>NULL()
if( allocated(d_comm%sendie) ) DEALLOCATE(d_comm%sendie); !!d_comm%sendie =>NULL()
if( allocated(d_comm%sendjs) ) DEALLOCATE(d_comm%sendjs); !!d_comm%sendjs =>NULL()
if( allocated(d_comm%sendje) ) DEALLOCATE(d_comm%sendje); !!d_comm%sendje =>NULL()
if( allocated(d_comm%S_msize) ) DEALLOCATE(d_comm%S_msize); !!d_comm%S_msize =>NULL()
if( allocated(d_comm%S_do_buf) ) DEALLOCATE(d_comm%S_do_buf); !!d_comm%S_do_buf =>NULL()
if( allocated(d_comm%cto_pe) ) DEALLOCATE(d_comm%cto_pe); !!d_comm%cto_pe =>NULL()
if( allocated(d_comm%recvis) ) DEALLOCATE(d_comm%recvis); !!d_comm%recvis =>NULL()
if( allocated(d_comm%recvie) ) DEALLOCATE(d_comm%recvie); !!d_comm%recvie =>NULL()
if( allocated(d_comm%recvjs) ) DEALLOCATE(d_comm%recvjs); !!d_comm%recvjs =>NULL()
if( allocated(d_comm%recvje) ) DEALLOCATE(d_comm%recvje); !!d_comm%recvje =>NULL()
if( allocated(d_comm%R_msize) ) DEALLOCATE(d_comm%R_msize); !!d_comm%R_msize =>NULL()
if( allocated(d_comm%R_do_buf) ) DEALLOCATE(d_comm%R_do_buf); !!d_comm%R_do_buf =>NULL()
if( allocated(d_comm%cfrom_pe) ) DEALLOCATE(d_comm%cfrom_pe); !!d_comm%cfrom_pe =>NULL()
d_comm%Slist_size=0; d_comm%Rlist_size=0
d_comm%isize=0; d_comm%jsize=0; d_comm%ke=0
d_comm%isize_in=0; d_comm%jsize_in=0
d_comm%isize_out=0; d_comm%jsize_out=0
d_comm%isize_max=0; d_comm%jsize_max=0
d_comm%gf_ioff=0; d_comm%gf_joff=0
! Remote data
if( allocated(d_comm%isizeR) ) DEALLOCATE(d_comm%isizeR); !!dd_comm%isizeR =>NULL()
if( allocated(d_comm%jsizeR) ) DEALLOCATE(d_comm%jsizeR); !!dd_comm%jsizeR =>NULL()
if( allocated(d_comm%sendisR) ) DEALLOCATE(d_comm%sendisR); !!dd_comm%sendisR =>NULL()
if( allocated(d_comm%sendjsR) ) DEALLOCATE(d_comm%sendjsR); !!dd_comm%sendjsR =>NULL()
if( allocated(d_comm%rem_addr) ) DEALLOCATE(d_comm%rem_addr); !!dd_comm%rem_addr =>NULL()
if( allocated(d_comm%rem_addrx) )DEALLOCATE(d_comm%rem_addrx); !!dd_comm%rem_addrx =>NULL()
if( allocated(d_comm%rem_addry) )DEALLOCATE(d_comm%rem_addry); !!dd_comm%rem_addry =>NULL()
if( allocated(d_comm%rem_addrl) )DEALLOCATE(d_comm%rem_addrl); !!dd_comm%rem_addrl =>NULL()
end subroutine deallocate_comm
function set_domain_id(d_id,ksize,flags,gtype, position, whalo, ehalo, shalo, nhalo)
integer(8), intent(in) :: d_id
integer , intent(in) :: ksize
integer , optional, intent(in) :: flags
integer , optional, intent(in) :: gtype
integer , optional, intent(in) :: position
integer , optional, intent(in) :: whalo, ehalo, shalo, nhalo
integer(8) :: set_domain_id
set_domain_id=d_id + KE_BASE*int(ksize,KIND(d_id))
if(PRESENT(flags))set_domain_id=set_domain_id+int(flags,KIND(d_id))
if(PRESENT(gtype))set_domain_id=set_domain_id+GT_BASE*int(gtype,KIND(d_id)) ! Must be 8 arithmetic
!--- gtype is never been used to set id. we need to add position to calculate id to seperate
!--- BGRID and CGRID or scalar variable.
if(present(position)) set_domain_id=set_domain_id+GT_BASE*int(2**position, KIND(d_id))
!z1l ???? the following calculation may need to be revised
if(present(whalo)) then
if(whalo>=0) then
set_domain_id=set_domain_id+GT_BASE*int(2**4*2**whalo, KIND(d_id))
else
set_domain_id=set_domain_id-GT_BASE*int(2**4*2**(-whalo), KIND(d_id))
endif
end if
if(present(ehalo)) then
if(ehalo>=0) then
set_domain_id=set_domain_id+GT_BASE*int(2**4*2**ehalo, KIND(d_id))
else
set_domain_id=set_domain_id-GT_BASE*int(2**4*2**(-ehalo), KIND(d_id))
endif
end if
if(present(shalo)) then
if(shalo>=0) then
set_domain_id=set_domain_id+GT_BASE*int(2**4*2**shalo, KIND(d_id))
else
set_domain_id=set_domain_id-GT_BASE*int(2**4*2**(-shalo), KIND(d_id))
endif
end if
if(present(nhalo)) then
if(nhalo>=0) then
set_domain_id=set_domain_id+GT_BASE*int(2**4*2**nhalo, KIND(d_id))
else
set_domain_id=set_domain_id-GT_BASE*int(2**4*2**(-nhalo), KIND(d_id))
endif
end if
end function set_domain_id
!#######################################################################
# 2786 "../mpp/mpp_domains.F90" 2
# 1 "../mpp/include/mpp_domains_define.inc" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
!
!
!
!
!
subroutine mpp_define_layout2D( global_indices, ndivs, layout )
integer, intent(in) :: global_indices(:) !(/ isg, ieg, jsg, jeg /)
integer, intent(in) :: ndivs !number of divisions to divide global domain
integer, intent(out) :: layout(:)
integer :: isg, ieg, jsg, jeg, isz, jsz, idiv, jdiv
if(size(global_indices(:)) .NE. 4) call mpp_error(FATAL,"mpp_define_layout2D: size of global_indices should be 4")
if(size(layout(:)) .NE. 2) call mpp_error(FATAL,"mpp_define_layout2D: size of layout should be 2")
isg = global_indices(1)
ieg = global_indices(2)
jsg = global_indices(3)
jeg = global_indices(4)
isz = ieg - isg + 1
jsz = jeg - jsg + 1
!first try to divide ndivs in the domain aspect ratio: if imperfect aspect, reduce idiv till it divides ndivs
idiv = nint( sqrt(float(ndivs*isz)/jsz) )
idiv = max(idiv,1) !for isz=1 line above can give 0
do while( mod(ndivs,idiv).NE.0 )
idiv = idiv - 1
end do !will terminate at idiv=1 if not before
jdiv = ndivs/idiv
layout = (/ idiv, jdiv /)
return
end subroutine mpp_define_layout2D
!############################################################################
!
!
!
!
! NOTE: The following routine may need to revised to improve the capability.
! It is very hard to make it balance for all the situation.
! Hopefully some smart idea will come up someday.
subroutine mpp_define_mosaic_pelist( sizes, pe_start, pe_end, pelist, costpertile)
integer, dimension(:), intent(in) :: sizes
integer, dimension(:), intent(inout) :: pe_start, pe_end
integer, dimension(:), intent(in), optional :: pelist, costpertile
integer, dimension(size(sizes(:))) :: costs
integer, dimension(:), allocatable :: pes
integer :: ntiles, npes, totcosts, avgcost
integer :: ntiles_left, npes_left, pos, n, tile
integer :: cost_on_tile, cost_on_pe, npes_used, errunit
ntiles = size(sizes(:))
if(size(pe_start(:)) .NE. ntiles .OR. size(pe_end(:)) .NE. ntiles ) then
call mpp_error(FATAL, "mpp_define_mosaic_pelist: size mismatch between pe_start/pe_end and sizes")
end if
if(present(costpertile)) then
if(size(costpertile(:)) .NE. ntiles ) then
call mpp_error(FATAL, "mpp_define_mosaic_pelist: size mismatch between costpertile and sizes")
end if
costs = sizes*costpertile
else
costs = sizes
end if
if( PRESENT(pelist) )then
if( .NOT.any(pelist.EQ.mpp_pe()) )then
errunit = stderr()
write( errunit,* )'pe=', mpp_pe(), ' pelist=', pelist
call mpp_error( FATAL, 'mpp_define_mosaic_pelist: pe must be in pelist.' )
end if
npes = size(pelist(:))
allocate( pes(0:npes-1) )
pes(:) = pelist(:)
else
npes = mpp_npes()
allocate( pes(0:npes-1) )
call mpp_get_current_pelist(pes)
end if
ntiles_left = ntiles
npes_left = npes
pos = pes(0)
do while( ntiles_left > 0 )
if( npes_left == 1 ) then ! all left tiles will on the last processor, imbalance possibly.
do n = 1, ntiles
if(costs(n) > 0) then
pe_start(n) = pos
pe_end(n) = pos
costs(n) = 0
end if
end do
ntiles_left = 0
npes_left = 0
else
totcosts = sum(costs)
avgcost = CEILING(real(totcosts)/npes_left )
tile = minval(maxloc(costs))
cost_on_tile = costs(tile)
pe_start(tile) = pos
ntiles_left = ntiles_left - 1
costs(tile) = 0
totcosts = totcosts - cost_on_tile
if(cost_on_tile .GE. avgcost ) then
npes_used = min(ceiling(real(cost_on_tile)/avgcost), npes_left)
if( ntiles_left > 0 .AND. npes_used == npes_left ) npes_used = npes_used - 1
pe_end(tile) = pos + npes_used - 1
npes_left = npes_left - npes_used
pos = pos + npes_used
else
!--- find other tiles to share the pe
pe_end(tile) = pos
cost_on_pe = cost_on_tile
do while(ntiles_left>npes_left) ! make sure all the pes are used.
tile = minval(minloc(costs, costs> 0 ))
cost_on_tile = costs(tile)
cost_on_pe = cost_on_pe + cost_on_tile
if(cost_on_pe > avgcost ) exit
pe_start(tile) = pos
pe_end(tile) = pos
ntiles_left = ntiles_left - 1
costs(tile) = 0
totcosts = totcosts - cost_on_tile
end do
npes_left = npes_left - 1
pos = pos + 1
end if
end if
end do
if(npes_left .NE. 0 ) call mpp_error(FATAL, "mpp_define_mosaic_pelist: the left npes should be zero")
deallocate(pes)
end subroutine mpp_define_mosaic_pelist
!-- The following implementation is different from mpp_compute_extents
!-- The last block might have most points
subroutine mpp_compute_block_extent(isg,ieg,ndivs,ibegin,iend)
integer, intent(in) :: isg, ieg, ndivs
integer, dimension(:), intent(out) :: ibegin, iend
integer :: ndiv, imax, ndmax
integer :: is, ie, n
ie = ieg
do ndiv=ndivs,1,-1
!domain is sized by dividing remaining points by remaining domains
is = ie - CEILING( REAL(ie-isg+1)/ndiv ) + 1
ibegin(ndiv) = is
iend(ndiv) = ie
if( ie.LT.is )call mpp_error( FATAL, &
'MPP_DEFINE_DOMAINS(mpp_compute_block_extent): domain extents must be positive definite.' )
if( ndiv.EQ.1 .AND. ibegin(ndiv) .NE. isg ) &
call mpp_error( FATAL, 'mpp_compute_block_extent: domain extents do not span space completely.' )
ie = is - 1
end do
end subroutine mpp_compute_block_extent
!#####################################################################
subroutine mpp_compute_extent(isg,ieg,ndivs,ibegin,iend, extent )
integer, intent(in) :: isg, ieg, ndivs
integer, dimension(0:), intent(out) :: ibegin, iend
integer, dimension(0:), intent(in), optional :: extent
integer :: ndiv, imax, ndmax, ndmirror
integer :: is, ie, n
logical :: symmetrize, use_extent
!statement functions
logical :: even, odd
even(n) = (mod(n,2).EQ.0)
odd (n) = (mod(n,2).EQ.1)
use_extent = .false.
if(PRESENT(extent)) then
if( size(extent(:)).NE.ndivs ) &
call mpp_error( FATAL, 'mpp_compute_extent: extent array size must equal number of domain divisions.' )
use_extent = .true.
if(ALL(extent ==0)) use_extent = .false.
endif
is = isg
if(use_extent) then
ibegin(0) = isg
do ndiv = 0, ndivs-2
if(extent(ndiv) .LE. 0) call mpp_error( FATAL, 'mpp_compute_extent: domain extents must be positive definite.' )
iend(ndiv) = ibegin(ndiv) + extent(ndiv) - 1
ibegin(ndiv+1) = iend(ndiv) + 1
enddo
iend(ndivs-1) = ibegin(ndivs-1) + extent(ndivs-1) - 1
if(iend(ndivs-1) .NE. ieg) call mpp_error(FATAL, 'mpp_compute_extent: extent array limits do not match global domain.' )
else
do ndiv=0,ndivs-1
!modified for mirror-symmetry
!original line
! ie = is + CEILING( float(ieg-is+1)/(ndivs-ndiv) ) - 1
!problem of dividing nx points into n domains maintaining symmetry
!i.e nx=18 n=4 4554 and 5445 are solutions but 4455 is not.
!this will always work for nx even n even or odd
!this will always work for nx odd, n odd
!this will never work for nx odd, n even: for this case we supersede the mirror calculation
! symmetrize = .NOT. ( mod(ndivs,2).EQ.0 .AND. mod(ieg-isg+1,2).EQ.1 )
!nx even n odd fails if n>nx/2
symmetrize = ( even(ndivs) .AND. even(ieg-isg+1) ) .OR. &
( odd(ndivs) .AND. odd(ieg-isg+1) ) .OR. &
( odd(ndivs) .AND. even(ieg-isg+1) .AND. ndivs.LT.(ieg-isg+1)/2 )
!mirror domains are stored in the list and retrieved if required.
if( ndiv.EQ.0 )then
!initialize max points and max domains
imax = ieg
ndmax = ndivs
end if
!do bottom half of decomposition, going over the midpoint for odd ndivs
if( ndiv.LT.(ndivs-1)/2+1 )then
!domain is sized by dividing remaining points by remaining domains
ie = is + CEILING( REAL(imax-is+1)/(ndmax-ndiv) ) - 1
ndmirror = (ndivs-1) - ndiv !mirror domain
if( ndmirror.GT.ndiv .AND. symmetrize )then !only for domains over the midpoint
!mirror extents, the max(,) is to eliminate overlaps
ibegin(ndmirror) = max( isg+ieg-ie, ie+1 )
iend(ndmirror) = max( isg+ieg-is, ie+1 )
imax = ibegin(ndmirror) - 1
ndmax = ndmax - 1
end if
else
if( symmetrize )then
!do top half of decomposition by retrieving saved values
is = ibegin(ndiv)
ie = iend(ndiv)
else
ie = is + CEILING( REAL(imax-is+1)/(ndmax-ndiv) ) - 1
end if
end if
ibegin(ndiv) = is
iend(ndiv) = ie
if( ie.LT.is )call mpp_error( FATAL, &
'MPP_DEFINE_DOMAINS(mpp_compute_extent): domain extents must be positive definite.' )
if( ndiv.EQ.ndivs-1 .AND. iend(ndiv).NE.ieg ) &
call mpp_error( FATAL, 'mpp_compute_extent: domain extents do not span space completely.' )
is = ie + 1
end do
endif
end subroutine mpp_compute_extent
!#####################################################################
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_DEFINE_DOMAINS: define layout and decomposition !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! >
!
!
!
!
!
!
!
!
!
!routine to divide global array indices among domains, and assign domains to PEs
!domain is of type domain1D
!ARGUMENTS:
! global_indices(2)=(isg,ieg) gives the extent of global domain
! ndivs is number of divisions of domain: even divisions unless extent is present.
! domain is the returned domain1D
! pelist (optional) list of PEs to which domains are to be assigned (default 0...npes-1)
! size of pelist must correspond to number of mask=.TRUE. divisions
! flags define whether compute and data domains are global (undecomposed) and whether global domain has periodic boundaries
! halo (optional) defines halo width (currently the same on both sides)
! extent (optional) array defines width of each division (used for non-uniform domain decomp, for e.g load-balancing)
! maskmap (optional) a division whose maskmap=.FALSE. is not assigned to any domain
! By default we assume decomposition of compute and data domains, non-periodic boundaries, no halo, as close to uniform extents
! as the input parameters permit
subroutine mpp_define_domains1D( global_indices, ndivs, domain, pelist, flags, halo, extent, maskmap, &
memory_size, begin_halo, end_halo )
integer, intent(in) :: global_indices(:) !(/ isg, ieg /)
integer, intent(in) :: ndivs
type(domain1D), intent(inout) :: domain !declared inout so that existing links, if any, can be nullified
integer, intent(in), optional :: pelist(0:)
integer, intent(in), optional :: flags, halo
integer, intent(in), optional :: extent(0:)
logical, intent(in), optional :: maskmap(0:)
integer, intent(in), optional :: memory_size
integer, intent(in), optional :: begin_halo, end_halo
logical :: compute_domain_is_global, data_domain_is_global
integer :: ndiv, n, isg, ieg, i
integer, allocatable :: pes(:)
integer :: ibegin(0:ndivs-1), iend(0:ndivs-1)
logical :: mask(0:ndivs-1)
integer :: halosz, halobegin, haloend
integer :: errunit
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_DEFINE_DOMAINS1D: You must first call mpp_domains_init.' )
if(size(global_indices(:)) .NE. 2) call mpp_error(FATAL,"mpp_define_domains1D: size of global_indices should be 2")
!get global indices
isg = global_indices(1)
ieg = global_indices(2)
if( ndivs.GT.ieg-isg+1 )call mpp_error( FATAL, 'MPP_DEFINE_DOMAINS1D: more divisions requested than rows available.' )
!get the list of PEs on which to assign domains; if pelist is absent use 0..npes-1
if( PRESENT(pelist) )then
if( .NOT.any(pelist.EQ.mpp_pe()) )then
errunit = stderr()
write( errunit,* )'pe=', mpp_pe(), ' pelist=', pelist
call mpp_error( FATAL, 'MPP_DEFINE_DOMAINS1D: pe must be in pelist.' )
end if
allocate( pes(0:size(pelist(:))-1) )
pes(:) = pelist(:)
else
allocate( pes(0:mpp_npes()-1) )
call mpp_get_current_pelist(pes)
! pes(:) = (/ (i,i=0,mpp_npes()-1) /)
end if
!get number of real domains: 1 mask domain per PE in pes
mask = .TRUE. !default mask
if( PRESENT(maskmap) )then
if( size(maskmap(:)).NE.ndivs ) &
call mpp_error( FATAL, 'MPP_DEFINE_DOMAINS1D: maskmap array size must equal number of domain divisions.' )
mask(:) = maskmap(:)
end if
if( count(mask).NE.size(pes(:)) ) &
call mpp_error( FATAL, 'MPP_DEFINE_DOMAINS1D: number of TRUEs in maskmap array must match PE count.' )
!get halosize
halosz = 0
if( PRESENT(halo) ) then
halosz = halo
!--- if halo is present, begin_halo and end_halo should not present
if(present(begin_halo) .OR. present(end_halo) ) call mpp_error(FATAL, &
"mpp_domains_define.inc: when halo is present, begin_halo and end_halo should not present")
end if
halobegin = halosz; haloend = halosz
if(present(begin_halo)) halobegin = begin_halo
if(present(end_halo)) haloend = end_halo
halosz = max(halobegin, haloend)
!get flags
compute_domain_is_global = .FALSE.
data_domain_is_global = .FALSE.
domain%cyclic = .FALSE.
domain%goffset = 1
domain%loffset = 1
if( PRESENT(flags) )then
!NEW: obsolete flag global_compute_domain, since ndivs is non-optional and you cannot have global compute and ndivs.NE.1
compute_domain_is_global = ndivs.EQ.1
!if compute domain is global, data domain must also be
data_domain_is_global = BTEST(flags,GLOBAL) .OR. compute_domain_is_global
domain%cyclic = BTEST(flags,CYCLIC) .AND. halosz.NE.0
if(BTEST(flags,CYCLIC)) domain%goffset = 0
end if
!set up links list
allocate( domain%list(0:ndivs-1) )
!set global domain
domain%list(:)%global%begin = isg
domain%list(:)%global%end = ieg
domain%list(:)%global%size = ieg-isg+1
domain%list(:)%global%max_size = ieg-isg+1
domain%list(:)%global%is_global = .TRUE. !always
!get compute domain
if( compute_domain_is_global )then
domain%list(:)%compute%begin = isg
domain%list(:)%compute%end = ieg
domain%list(:)%compute%is_global = .TRUE.
domain%list(:)%pe = pes(:)
domain%pos = 0
else
domain%list(:)%compute%is_global = .FALSE.
n = 0
call mpp_compute_extent(isg, ieg, ndivs, ibegin, iend, extent)
do ndiv=0,ndivs-1
domain%list(ndiv)%compute%begin = ibegin(ndiv)
domain%list(ndiv)%compute%end = iend(ndiv)
if( mask(ndiv) )then
domain%list(ndiv)%pe = pes(n)
if( mpp_pe().EQ.pes(n) )domain%pos = ndiv
n = n + 1
else
domain%list(ndiv)%pe = NULL_PE
end if
end do
end if
domain%list(:)%compute%size = domain%list(:)%compute%end - domain%list(:)%compute%begin + 1
!get data domain
!data domain is at least equal to compute domain
domain%list(:)%data%begin = domain%list(:)%compute%begin
domain%list(:)%data%end = domain%list(:)%compute%end
domain%list(:)%data%is_global = .FALSE.
!apply global flags
if( data_domain_is_global )then
domain%list(:)%data%begin = isg
domain%list(:)%data%end = ieg
domain%list(:)%data%is_global = .TRUE.
end if
!apply margins
domain%list(:)%data%begin = domain%list(:)%data%begin - halobegin
domain%list(:)%data%end = domain%list(:)%data%end + haloend
domain%list(:)%data%size = domain%list(:)%data%end - domain%list(:)%data%begin + 1
!--- define memory domain, if memory_size is not present or memory size is 0, memory domain size
!--- will be the same as data domain size. if momory_size is present, memory_size should greater than
!--- or equal to data size. The begin of memory domain will be always the same as data domain.
domain%list(:)%memory%begin = domain%list(:)%data%begin
domain%list(:)%memory%end = domain%list(:)%data%end
if( present(memory_size) ) then
if(memory_size > 0) then
if( domain%list(domain%pos)%data%size > memory_size ) call mpp_error(FATAL, &
"mpp_domains_define.inc: data domain size is larger than memory domain size on this pe")
domain%list(:)%memory%end = domain%list(:)%memory%begin + memory_size - 1
end if
end if
domain%list(:)%memory%size = domain%list(:)%memory%end - domain%list(:)%memory%begin + 1
domain%list(:)%memory%is_global = domain%list(:)%data%is_global
domain%compute = domain%list(domain%pos)%compute
domain%data = domain%list(domain%pos)%data
domain%global = domain%list(domain%pos)%global
domain%memory = domain%list(domain%pos)%memory
domain%compute%max_size = MAXVAL( domain%list(:)%compute%size )
domain%data%max_size = MAXVAL( domain%list(:)%data%size )
domain%global%max_size = domain%global%size
domain%memory%max_size = domain%memory%size
!PV786667: the deallocate stmts can be removed when fixed (7.3.1.3m)
deallocate( pes )
return
end subroutine mpp_define_domains1D
!################################################################################
!--- define the IO domain.
subroutine mpp_define_io_domain(domain, io_layout)
type(domain2D), intent(inout) :: domain
integer, intent(in ) :: io_layout(2)
integer :: layout(2)
integer :: npes_in_group
type(domain2D), pointer :: io_domain=>NULL()
integer :: i, j, n, m
integer :: ipos, jpos, igroup, jgroup
integer :: ipos_beg, ipos_end, jpos_beg, jpos_end
integer :: whalo, ehalo, shalo, nhalo
integer :: npes_x, npes_y, ndivx, ndivy
integer, allocatable :: posarray(:,:)
if(io_layout(1) * io_layout(2) .LE. 0) then
call mpp_error(NOTE, &
"mpp_domains_define.inc(mpp_define_io_domain): io domain will not be defined for "//trim(domain%name)// &
" when one or both entry of io_layout is not positive")
return
endif
layout(1) = size(domain%x(1)%list(:))
layout(2) = size(domain%y(1)%list(:))
if(ASSOCIATED(domain%io_domain)) call mpp_error(FATAL, &
"mpp_domains_define.inc(mpp_define_io_domain): io_domain is already defined")
if(mod(layout(1), io_layout(1)) .NE. 0) call mpp_error(FATAL, &
"mpp_domains_define.inc(mpp_define_io_domain): "//trim(domain%name)//" domain layout(1) must be divided by io_layout(1)")
if(mod(layout(2), io_layout(2)) .NE. 0) call mpp_error(FATAL, &
"mpp_domains_define.inc(mpp_define_io_domain): "//trim(domain%name)//" domain layout(2) must be divided by io_layout(2)")
if(size(domain%x(:)) > 1) call mpp_error(FATAL, &
"mpp_domains_define.inc(mpp_define_io_domain): "//trim(domain%name)// &
": multiple tile per pe is not supported yet for this routine")
allocate(domain%io_domain)
domain%io_layout = io_layout
io_domain => domain%io_domain
! Find how many processors are in the group with the consideration that some of the region maybe masked out.
npes_x = layout(1)/io_layout(1)
npes_y = layout(2)/io_layout(2)
ipos = mod(domain%x(1)%pos, npes_x)
jpos = mod(domain%y(1)%pos, npes_y)
igroup = domain%x(1)%pos/npes_x
jgroup = domain%y(1)%pos/npes_y
ipos_beg = igroup*npes_x; ipos_end = ipos_beg + npes_x - 1
jpos_beg = jgroup*npes_y; jpos_end = jpos_beg + npes_y - 1
npes_in_group = 0
do j = jpos_beg, jpos_end
do i = ipos_beg, ipos_end
if(domain%pearray(i,j) .NE. NULL_PE) npes_in_group = npes_in_group+1
enddo
enddo
io_domain%whalo = domain%whalo
io_domain%ehalo = domain%ehalo
io_domain%shalo = domain%shalo
io_domain%nhalo = domain%nhalo
io_domain%ntiles = 1
io_domain%pe = domain%pe
io_domain%symmetry = domain%symmetry
allocate(io_domain%list(0:npes_in_group-1))
do i = 0, npes_in_group-1
allocate( io_domain%list(i)%x(1), io_domain%list(i)%y(1), io_domain%list(i)%tile_id(1) )
enddo
ndivx = size(domain%pearray,1)
ndivy = size(domain%pearray,2)
allocate(posarray(0:ndivx-1, 0:ndivy-1))
n = domain%tile_root_pe - mpp_root_pe()
posarray = -1
do j = 0,ndivy-1
do i = 0,ndivx-1
if( domain%pearray(i,j) == NULL_PE) cycle
posarray(i,j) = n
n = n + 1
enddo
enddo
n = 0
do j = jpos_beg, jpos_end
do i = ipos_beg, ipos_end
if( domain%pearray(i,j) == NULL_PE) cycle
io_domain%list(n)%pe = domain%pearray(i,j)
m = posarray(i,j)
io_domain%list(n)%x(1)%compute = domain%list(m)%x(1)%compute
io_domain%list(n)%y(1)%compute = domain%list(m)%y(1)%compute
igroup = domain%list(m)%x(1)%pos/npes_x
jgroup = domain%list(m)%y(1)%pos/npes_y
io_domain%list(n)%tile_id(1) = jgroup*io_layout(1) + igroup
n = n + 1
enddo
enddo
deallocate(posarray)
allocate(io_domain%x(1), io_domain%y(1), io_domain%tile_id(1) )
allocate(io_domain%x(1)%list(0:npes_x-1), io_domain%y(1)%list(0:npes_y-1) )
n = -1
do j = jpos_beg, jpos_beg+jpos
do i = ipos_beg, ipos_beg+ipos
if(domain%pearray(i,j) .NE. NULL_PE) n = n + 1
enddo
enddo
io_domain%pos = n
io_domain%x(1)%compute = domain%x(1)%compute
io_domain%x(1)%data = domain%x(1)%data
io_domain%x(1)%memory = domain%x(1)%memory
io_domain%y(1)%compute = domain%y(1)%compute
io_domain%y(1)%data = domain%y(1)%data
io_domain%y(1)%memory = domain%y(1)%memory
io_domain%x(1)%global%begin = domain%x(1)%list(ipos_beg)%compute%begin
io_domain%x(1)%global%end = domain%x(1)%list(ipos_end)%compute%end
io_domain%x(1)%global%size = io_domain%x(1)%global%end - io_domain%x(1)%global%begin + 1
io_domain%x(1)%global%max_size = io_domain%x(1)%global%size
io_domain%y(1)%global%begin = domain%y(1)%list(jpos_beg)%compute%begin
io_domain%y(1)%global%end = domain%y(1)%list(jpos_end)%compute%end
io_domain%y(1)%global%size = io_domain%y(1)%global%end - io_domain%y(1)%global%begin + 1
io_domain%y(1)%global%max_size = io_domain%y(1)%global%size
io_domain%x(1)%pos = ipos
io_domain%y(1)%pos = jpos
io_domain%tile_id(1) = io_domain%list(n)%tile_id(1)
io_domain%tile_root_pe = io_domain%list(0)%pe
!z1l
!!$ do j = 0, npes_y - 1
!!$ n = j*npes_x + ipos
!!$ io_domain%y(1)%list(j) = io_domain%list(n)%y(1)
!!$ enddo
!!$ do i = 0, npes_x - 1
!!$ n = jpos*npes_x + i
!!$ io_domain%x(1)%list(i) = io_domain%list(n)%x(1)
!!$ enddo
whalo = domain%whalo
ehalo = domain%ehalo
shalo = domain%shalo
nhalo = domain%nhalo
io_domain=>NULL()
end subroutine mpp_define_io_domain
!
!
!
!
!
!
!
!
!
!
!
subroutine mpp_define_domains2D( global_indices, layout, domain, pelist, xflags, yflags, &
xhalo, yhalo, xextent, yextent, maskmap, name, symmetry, memory_size, &
whalo, ehalo, shalo, nhalo, is_mosaic, tile_count, tile_id, complete, x_cyclic_offset, y_cyclic_offset )
!define 2D data and computational domain on global rectilinear cartesian domain (isg:ieg,jsg:jeg) and assign them to PEs
integer, intent(in) :: global_indices(:) !(/ isg, ieg, jsg, jeg /)
integer, intent(in) :: layout(:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: pelist(0:)
integer, intent(in), optional :: xflags, yflags, xhalo, yhalo
integer, intent(in), optional :: xextent(0:), yextent(0:)
logical, intent(in), optional :: maskmap(0:,0:)
character(len=*), intent(in), optional :: name
logical, intent(in), optional :: symmetry
logical, intent(in), optional :: is_mosaic ! indicate if calling mpp_define_domains from mpp_define_mosaic.
integer, intent(in), optional :: memory_size(:)
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! halo size for West, East, South and North direction.
! if whalo and ehalo is not present,
! will take the value of xhalo
! if shalo and nhalo is not present,
! will take the value of yhalo
integer, intent(in), optional :: tile_count ! tile number on current pe, default value is 1
! this is for the situation that multiple tiles on one processor.
integer, intent(in), optional :: tile_id ! tile id
logical, intent(in), optional :: complete ! true indicate mpp_define_domain is completed for mosaic definition.
integer, intent(in), optional :: x_cyclic_offset ! offset for x-cyclic boundary condition,
! (0,j) = (ni, mod(j+x_cyclic_offset,nj))
! (ni+1,j) = ( 1, mod(j+nj-x_cyclic_offset,nj) )
integer, intent(in), optional :: y_cyclic_offset ! offset for y-cyclic boundary condition
! (i,0) = (mod(i+y_cyclic_offset,ni), nj))
! (i,nj+1) = (mod(mod(i+ni-y_cyclic_offset,ni), 1) )
integer :: i, j, m, n, xhalosz, yhalosz, memory_xsize, memory_ysize
integer :: whalosz, ehalosz, shalosz, nhalosz
integer :: ipos, jpos, pos, tile, nlist, cur_tile_id
integer :: ndivx, ndivy, isg, ieg, jsg, jeg, ishift, jshift, errunit, logunit
integer :: x_offset, y_offset, start_pos, nfold
logical :: from_mosaic, is_complete
logical :: mask(0:layout(1)-1,0:layout(2)-1)
integer, allocatable :: pes(:), pesall(:)
integer :: pearray(0:layout(1)-1,0:layout(2)-1)
integer :: ibegin(0:layout(1)-1), iend(0:layout(1)-1)
integer :: jbegin(0:layout(2)-1), jend(0:layout(2)-1)
character(len=8) :: text
type(overlapSpec), pointer :: update=>NULL()
type(overlapSpec), pointer :: check_T => NULL()
character(len=1) :: position
integer :: msgsize, l, p, is, ie, js, je, from_pe
integer :: outunit
logical :: send(8), recv(8)
outunit = stdout()
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_DEFINE_DOMAINS2D: You must first call mpp_domains_init.' )
if(PRESENT(name)) then
if(len_trim(name) > NAME_LENGTH) call mpp_error(FATAL, &
"mpp_domains_define.inc(mpp_define_domains2D): the len_trim of optional argument name ="//trim(name)// &
" is greater than NAME_LENGTH, change the argument name or increase NAME_LENGTH")
domain%name = name
endif
if(size(global_indices(:)) .NE. 4) call mpp_error(FATAL, &
"mpp_define_domains2D: size of global_indices should be 4 for "//trim(domain%name) )
if(size(layout(:)) .NE. 2) call mpp_error(FATAL,"mpp_define_domains2D: size of layout should be 2 for "//trim(domain%name) )
ndivx = layout(1); ndivy = layout(2)
isg = global_indices(1); ieg = global_indices(2); jsg = global_indices(3); jeg = global_indices(4)
from_mosaic = .false.
if(present(is_mosaic)) from_mosaic = is_mosaic
is_complete = .true.
if(present(complete)) is_complete = complete
tile = 1
if(present(tile_count)) tile = tile_count
cur_tile_id = 1
if(present(tile_id)) cur_tile_id = tile_id
if( PRESENT(pelist) )then
allocate( pes(0:size(pelist(:))-1) )
pes = pelist
if(from_mosaic) then
allocate( pesall(0:mpp_npes()-1) )
call mpp_get_current_pelist(pesall)
else
allocate( pesall(0:size(pes(:))-1) )
pesall = pes
end if
else
allocate( pes(0:mpp_npes()-1) )
allocate( pesall(0:mpp_npes()-1) )
call mpp_get_current_pelist(pes)
pesall = pes
end if
!--- at least of one of x_cyclic_offset and y_cyclic_offset must be zero
!--- folded boundary condition is not supported when either x_cyclic_offset or y_cyclic_offset is nonzero.
!--- Since we only implemented Folded-north boundary condition currently, we only consider y-flags.
x_offset = 0; y_offset = 0
if(PRESENT(x_cyclic_offset)) x_offset = x_cyclic_offset
if(PRESENT(y_cyclic_offset)) y_offset = y_cyclic_offset
if(x_offset*y_offset .NE. 0) call mpp_error(FATAL, &
'MPP_DEFINE_DOMAINS2D: At least one of x_cyclic_offset and y_cyclic_offset must be zero for '//trim(domain%name))
!--- x_cyclic_offset and y_cyclic_offset should no larger than the global grid size.
if(abs(x_offset) > jeg-jsg+1) call mpp_error(FATAL, &
'MPP_DEFINE_DOMAINS2D: absolute value of x_cyclic_offset is greater than jeg-jsg+1 for '//trim(domain%name))
if(abs(y_offset) > ieg-isg+1) call mpp_error(FATAL, &
'MPP_DEFINE_DOMAINS2D: absolute value of y_cyclic_offset is greater than ieg-isg+1 for '//trim(domain%name))
!--- when there is more than one tile on one processor, all the tile will limited on this processor
if( tile > 1 .AND. size(pes(:)) > 1) call mpp_error(FATAL, &
'MPP_DEFINE_DOMAINS2D: there are more than one tile on this pe, '// &
'all the tile should be limited on this pe for '//trim(domain%name))
!--- the position of current pe is changed due to mosaic, because pes
!--- is only part of the pelist in mosaic (pesall). We assume the pe
!--- distribution are contious in mosaic.
pos = -1
do n = 0, size(pesall(:))-1
if(pesall(n) == mpp_pe() ) then
pos = n
exit
endif
enddo
if(pos<0) call mpp_error(FATAL, 'MPP_DEFINE_DOMAINS2D: mpp_pe() is not in the pesall list')
domain%symmetry = .FALSE.
if(present(symmetry)) domain%symmetry = symmetry
if(domain%symmetry) then
ishift = 1; jshift = 1
else
ishift = 0; jshift = 0
end if
!--- first compute domain decomposition.
call mpp_compute_extent(isg, ieg, ndivx, ibegin, iend, xextent)
call mpp_compute_extent(jsg, jeg, ndivy, jbegin, jend, yextent)
xhalosz = 0; yhalosz = 0
if(present(xhalo)) xhalosz = xhalo
if(present(yhalo)) yhalosz = yhalo
whalosz = xhalosz; ehalosz = xhalosz
shalosz = yhalosz; nhalosz = yhalosz
if(present(whalo)) whalosz = whalo
if(present(ehalo)) ehalosz = ehalo
if(present(shalo)) shalosz = shalo
if(present(nhalo)) nhalosz = nhalo
!--- configure maskmap
mask = .TRUE.
if( PRESENT(maskmap) )then
if( size(maskmap,1).NE.ndivx .OR. size(maskmap,2).NE.ndivy ) &
call mpp_error( FATAL, 'MPP_DEFINE_DOMAINS2D: maskmap array does not match layout for '//trim(domain%name) )
mask(:,:) = maskmap(:,:)
end if
!number of unmask domains in layout must equal number of PEs assigned
n = count(mask)
if( n.NE.size(pes(:)) )then
write( text,'(i8)' )n
call mpp_error( FATAL, 'MPP_DEFINE_DOMAINS2D: incorrect number of PEs assigned for ' // &
'this layout and maskmap. Use '//text//' PEs for this domain decomposition for '//trim(domain%name) )
end if
memory_xsize = 0; memory_ysize = 0
if(present(memory_size)) then
if(size(memory_size(:)) .NE. 2) call mpp_error(FATAL, &
"mpp_define_domains2D: size of memory_size should be 2 for "//trim(domain%name))
memory_xsize = memory_size(1)
memory_ysize = memory_size(2)
end if
!--- set up domain%list.
!--- set up 2-D domain decomposition for T, E, C, N and computing overlapping
!--- when current tile is the last tile in the mosaic.
nlist = size(pesall(:))
if( .NOT. Associated(domain%x) ) then
allocate(domain%tileList(1))
domain%tileList(1)%xbegin = global_indices(1)
domain%tileList(1)%xend = global_indices(2)
domain%tileList(1)%ybegin = global_indices(3)
domain%tileList(1)%yend = global_indices(4)
allocate(domain%x(1), domain%y(1) )
allocate(domain%tile_id(1))
domain%tile_id = cur_tile_id
domain%ntiles = 1
domain%max_ntile_pe = 1
domain%ncontacts = 0
domain%rotated_ninety = .FALSE.
allocate( domain%list(0:nlist-1) )
do i = 0, nlist-1
allocate( domain%list(i)%x(1), domain%list(i)%y(1), domain%list(i)%tile_id(1) )
end do
end if
domain%initialized = .true.
start_pos = 0
do n = 0, nlist-1
if(pesall(n) == pes(0)) then
start_pos = n
exit
endif
enddo
!place on PE array; need flag to assign them to j first and then i
pearray(:,:) = NULL_PE
ipos = NULL_PE; jpos = NULL_PE
n = 0
m = start_pos
do j = 0,ndivy-1
do i = 0,ndivx-1
if( mask(i,j) )then
pearray(i,j) = pes(n)
domain%list(m)%x(tile)%compute%begin = ibegin(i)
domain%list(m)%x(tile)%compute%end = iend(i)
domain%list(m)%y(tile)%compute%begin = jbegin(j)
domain%list(m)%y(tile)%compute%end = jend(j)
domain%list(m)%x(tile)%compute%size = domain%list(m)%x(tile)%compute%end - domain%list(m)%x(tile)%compute%begin + 1
domain%list(m)%y(tile)%compute%size = domain%list(m)%y(tile)%compute%end - domain%list(m)%y(tile)%compute%begin + 1
domain%list(m)%tile_id(tile) = cur_tile_id
domain%list(m)%x(tile)%pos = i
domain%list(m)%y(tile)%pos = j
domain%list(m)%tile_root_pe = pes(0)
domain%list(m)%pe = pesall(m)
if( pes(n).EQ.mpp_pe() )then
ipos = i
jpos = j
end if
n = n + 1
m = m + 1
end if
end do
end do
!Considering mosaic, the following will only be done on the pe in the pelist
!when there is only one tile, all the current pe will be in the pelist.
if( ANY(pes == mpp_pe()) ) then
domain%io_layout = layout
domain%tile_root_pe = pes(0)
if( ipos.EQ.NULL_PE .OR. jpos.EQ.NULL_PE ) &
call mpp_error( FATAL, 'MPP_DEFINE_DOMAINS2D: pelist must include this PE for '//trim(domain%name) )
if( debug ) then
errunit = stderr()
write( errunit, * )'pe, tile, ipos, jpos=', mpp_pe(), tile, ipos, jpos, ' pearray(:,jpos)=', &
pearray(:,jpos), ' pearray(ipos,:)=', pearray(ipos,:)
endif
!--- when tile is not equal to 1, the layout for that tile always ( 1, 1), so no need for pearray in domain
if( tile == 1 ) then
allocate( domain%pearray(0:ndivx-1,0:ndivy-1) )
domain%pearray = pearray
end if
domain%pe = mpp_pe()
domain%pos = pos
domain_cnt = domain_cnt + INT(1,KIND=8)
domain%id = domain_cnt*DOMAIN_ID_BASE ! Must be 8 arithmetic
!do domain decomposition using 1D versions in X and Y,
call mpp_define_domains( global_indices(1:2), ndivx, domain%x(tile), &
pack(pearray(:,jpos),mask(:,jpos)), xflags, xhalo, xextent, mask(:,jpos), memory_xsize, whalo, ehalo )
call mpp_define_domains( global_indices(3:4), ndivy, domain%y(tile), &
pack(pearray(ipos,:),mask(ipos,:)), yflags, yhalo, yextent, mask(ipos,:), memory_ysize, shalo, nhalo )
if( domain%x(tile)%list(ipos)%pe.NE.domain%y(tile)%list(jpos)%pe ) &
call mpp_error( FATAL, 'MPP_DEFINE_DOMAINS2D: domain%x%list(ipos)%pe.NE.domain%y%list(jpos)%pe.' )
!--- when x_cyclic_offset or y_cyclic_offset is set, no cross domain is allowed
if(x_offset .NE. 0 .OR. y_offset .NE. 0) then
if(whalosz .GT. domain%x(tile)%compute%size .OR. ehalosz .GT. domain%x(tile)%compute%size ) &
call mpp_error(FATAL, "mpp_define_domains_2d: when x_cyclic_offset/y_cyclic_offset is set, "// &
"whalo and ehalo must be no larger than the x-direction computation domain size")
if(shalosz .GT. domain%y(tile)%compute%size .OR. nhalosz .GT. domain%y(tile)%compute%size ) &
call mpp_error(FATAL, "mpp_define_domains_2d: when x_cyclic_offset/y_cyclic_offset is set, "// &
"shalo and nhalo must be no larger than the y-direction computation domain size")
endif
!--- restrict the halo size is no larger than global domain size.
if(whalosz .GT. domain%x(tile)%global%size) &
call mpp_error(FATAL, "MPP_DEFINE_DOMAINS2D: whalo is greather global domain size")
if(ehalosz .GT. domain%x(tile)%global%size) &
call mpp_error(FATAL, "MPP_DEFINE_DOMAINS2D: ehalo is greather global domain size")
if(shalosz .GT. domain%x(tile)%global%size) &
call mpp_error(FATAL, "MPP_DEFINE_DOMAINS2D: shalo is greather global domain size")
if(nhalosz .GT. domain%x(tile)%global%size) &
call mpp_error(FATAL, "MPP_DEFINE_DOMAINS2D: nhalo is greather global domain size")
!set up fold, when the boundary is folded, there is only one tile.
domain%fold = 0
nfold = 0
if( PRESENT(xflags) )then
if( BTEST(xflags,WEST) ) then
!--- make sure no cross-domain in y-direction
if(domain%x(tile)%data%begin .LE. domain%x(tile)%global%begin .AND. &
domain%x(tile)%compute%begin > domain%x(tile)%global%begin ) then
call mpp_error(FATAL, &
'MPP_DEFINE_DOMAINS: the domain could not be crossed when west is folded')
endif
if( domain%x(tile)%cyclic )call mpp_error( FATAL, &
'MPP_DEFINE_DOMAINS: an axis cannot be both folded west and cyclic for '//trim(domain%name) )
domain%fold = domain%fold + FOLD_WEST_EDGE
nfold = nfold+1
endif
if( BTEST(xflags,EAST) ) then
!--- make sure no cross-domain in y-direction
if(domain%x(tile)%data%end .GE. domain%x(tile)%global%end .AND. &
domain%x(tile)%compute%end < domain%x(tile)%global%end ) then
call mpp_error(FATAL, &
'MPP_DEFINE_DOMAINS: the domain could not be crossed when north is folded')
endif
if( domain%x(tile)%cyclic )call mpp_error( FATAL, &
'MPP_DEFINE_DOMAINS: an axis cannot be both folded east and cyclic for '//trim(domain%name) )
domain%fold = domain%fold + FOLD_EAST_EDGE
nfold = nfold+1
endif
endif
if( PRESENT(yflags) )then
if( BTEST(yflags,SOUTH) ) then
!--- make sure no cross-domain in y-direction
if(domain%y(tile)%data%begin .LE. domain%y(tile)%global%begin .AND. &
domain%y(tile)%compute%begin > domain%y(tile)%global%begin ) then
call mpp_error(FATAL, &
'MPP_DEFINE_DOMAINS: the domain could not be crossed when south is folded')
endif
if( domain%y(tile)%cyclic )call mpp_error( FATAL, &
'MPP_DEFINE_DOMAINS: an axis cannot be both folded north and cyclic for '//trim(domain%name))
domain%fold = domain%fold + FOLD_SOUTH_EDGE
nfold = nfold+1
endif
if( BTEST(yflags,NORTH) ) then
!--- when the halo size is big and halo region is crossing neighbor domain, we
!--- restrict the halo size is less than half of the global size.
if(whalosz .GT. domain%x(tile)%compute%size .AND. whalosz .GE. domain%x(tile)%global%size/2 ) &
call mpp_error(FATAL, "MPP_DEFINE_DOMAINS2D: north is folded, whalo .GT. compute domain size "// &
"and whalo .GE. half of global domain size")
if(ehalosz .GT. domain%x(tile)%compute%size .AND. ehalosz .GE. domain%x(tile)%global%size/2 ) &
call mpp_error(FATAL, "MPP_DEFINE_DOMAINS2D: north is folded, ehalo is .GT. compute domain size "// &
"and ehalo .GE. half of global domain size")
if(shalosz .GT. domain%y(tile)%compute%size .AND. shalosz .GE. domain%x(tile)%global%size/2 ) &
call mpp_error(FATAL, "MPP_DEFINE_DOMAINS2D: north is folded, shalo .GT. compute domain size "// &
"and shalo .GE. half of global domain size")
if(nhalosz .GT. domain%y(tile)%compute%size .AND. nhalosz .GE. domain%x(tile)%global%size/2 ) &
call mpp_error(FATAL, "MPP_DEFINE_DOMAINS2D: north is folded, nhalo .GT. compute domain size "// &
"and nhalo .GE. half of global domain size")
if( domain%y(tile)%cyclic )call mpp_error( FATAL, &
'MPP_DEFINE_DOMAINS: an axis cannot be both folded south and cyclic for '//trim(domain%name) )
domain%fold = domain%fold + FOLD_NORTH_EDGE
nfold = nfold+1
endif
endif
if(nfold > 1) call mpp_error(FATAL, &
'MPP_DEFINE_DOMAINS2D: number of folded edge is greater than 1 for '//trim(domain%name) )
if(nfold == 1) then
if( x_offset .NE. 0 .OR. y_offset .NE. 0) call mpp_error(FATAL, &
'MPP_DEFINE_DOMAINS2D: For the foled_north/folded_south/fold_east/folded_west boundary condition, '// &
'x_cyclic_offset and y_cyclic_offset must be zero for '//trim(domain%name))
endif
if( BTEST(domain%fold,SOUTH) .OR. BTEST(domain%fold,NORTH) )then
if( domain%y(tile)%cyclic )call mpp_error( FATAL, &
'MPP_DEFINE_DOMAINS: an axis cannot be both folded and cyclic for '//trim(domain%name) )
if( modulo(domain%x(tile)%global%size,2).NE.0 ) &
call mpp_error( FATAL, 'MPP_DEFINE_DOMAINS: number of points in X must be even ' // &
'when there is a fold in Y for '//trim(domain%name) )
!check if folded domain boundaries line up in X: compute domains lining up is a sufficient condition for symmetry
n = ndivx - 1
do i = 0,n/2
if( domain%x(tile)%list(i)%compute%size.NE.domain%x(tile)%list(n-i)%compute%size ) &
call mpp_error( FATAL, 'MPP_DEFINE_DOMAINS: Folded domain boundaries ' // &
'must line up (mirror-symmetric extents) for '//trim(domain%name) )
end do
end if
if( BTEST(domain%fold,WEST) .OR. BTEST(domain%fold,EAST) )then
if( domain%x(tile)%cyclic )call mpp_error( FATAL, &
'MPP_DEFINE_DOMAINS: an axis cannot be both folded and cyclic for '//trim(domain%name) )
if( modulo(domain%y(tile)%global%size,2).NE.0 ) &
call mpp_error( FATAL, 'MPP_DEFINE_DOMAINS: number of points in Y must be even '//&
'when there is a fold in X for '//trim(domain%name) )
!check if folded domain boundaries line up in Y: compute domains lining up is a sufficient condition for symmetry
n = ndivy - 1
do i = 0,n/2
if( domain%y(tile)%list(i)%compute%size.NE.domain%y(tile)%list(n-i)%compute%size ) &
call mpp_error( FATAL, 'MPP_DEFINE_DOMAINS: Folded domain boundaries must '//&
'line up (mirror-symmetric extents) for '//trim(domain%name) )
end do
end if
!set up domain%list
if( mpp_pe().EQ.pes(0) .AND. PRESENT(name) )then
logunit = stdlog()
write( logunit, '(/a,i5,a,i5)' )trim(name)//' domain decomposition: ', ndivx, ' X', ndivy
write( logunit, '(3x,a)' )'pe, is, ie, js, je, isd, ied, jsd, jed'
end if
end if ! if( ANY(pes == mpp_pe()) )
if(is_complete) then
domain%whalo = whalosz; domain%ehalo = ehalosz
domain%shalo = shalosz; domain%nhalo = nhalosz
allocate(domain%update_T, domain%update_E, domain%update_C, domain%update_N)
domain%update_T%next => NULL()
domain%update_E%next => NULL()
domain%update_C%next => NULL()
domain%update_N%next => NULL()
allocate(domain%check_E, domain%check_C, domain%check_N )
domain%update_T%nsend = 0
domain%update_T%nrecv = 0
domain%update_C%nsend = 0
domain%update_C%nrecv = 0
domain%update_E%nsend = 0
domain%update_E%nrecv = 0
domain%update_N%nsend = 0
domain%update_N%nrecv = 0
if( BTEST(domain%fold,SOUTH) ) then
call compute_overlaps_fold_south(domain, CENTER, 0, 0)
call compute_overlaps_fold_south(domain, CORNER, ishift, jshift)
call compute_overlaps_fold_south(domain, EAST, ishift, 0)
call compute_overlaps_fold_south(domain, NORTH, 0, jshift)
else if( BTEST(domain%fold,WEST) ) then
call compute_overlaps_fold_west(domain, CENTER, 0, 0)
call compute_overlaps_fold_west(domain, CORNER, ishift, jshift)
call compute_overlaps_fold_west(domain, EAST, ishift, 0)
call compute_overlaps_fold_west(domain, NORTH, 0, jshift)
else if( BTEST(domain%fold,EAST) ) then
call compute_overlaps_fold_east(domain, CENTER, 0, 0)
call compute_overlaps_fold_east(domain, CORNER, ishift, jshift)
call compute_overlaps_fold_east(domain, EAST, ishift, 0)
call compute_overlaps_fold_east(domain, NORTH, 0, jshift)
else
call compute_overlaps(domain, CENTER, domain%update_T, check_T, 0, 0, x_offset, y_offset, &
domain%whalo, domain%ehalo, domain%shalo, domain%nhalo)
call compute_overlaps(domain, CORNER, domain%update_C, domain%check_C, ishift, jshift, x_offset, y_offset, &
domain%whalo, domain%ehalo, domain%shalo, domain%nhalo)
call compute_overlaps(domain, EAST, domain%update_E, domain%check_E, ishift, 0, x_offset, y_offset, &
domain%whalo, domain%ehalo, domain%shalo, domain%nhalo)
call compute_overlaps(domain, NORTH, domain%update_N, domain%check_N, 0, jshift, x_offset, y_offset, &
domain%whalo, domain%ehalo, domain%shalo, domain%nhalo)
endif
call check_overlap_pe_order(domain, domain%update_T, trim(domain%name)//" update_T in mpp_define_domains")
call check_overlap_pe_order(domain, domain%update_C, trim(domain%name)//" update_C in mpp_define_domains")
call check_overlap_pe_order(domain, domain%update_E, trim(domain%name)//" update_E in mpp_define_domains")
call check_overlap_pe_order(domain, domain%update_N, trim(domain%name)//" update_N in mpp_define_domains")
!--- when ncontacts is nonzero, set_check_overlap will be called in mpp_define
if(domain%symmetry .AND. (domain%ncontacts == 0 .OR. domain%ntiles == 1) ) then
call set_check_overlap( domain, CORNER )
call set_check_overlap( domain, EAST )
call set_check_overlap( domain, NORTH )
allocate(domain%bound_E, domain%bound_C, domain%bound_N )
call set_bound_overlap( domain, CORNER )
call set_bound_overlap( domain, EAST )
call set_bound_overlap( domain, NORTH )
end if
call set_domain_comm_inf(domain%update_T)
call set_domain_comm_inf(domain%update_E)
call set_domain_comm_inf(domain%update_C)
call set_domain_comm_inf(domain%update_N)
end if
!--- check the send and recv size are matching.
!--- or ntiles>1 mosaic,
!--- the check will be done in mpp_define_mosaic
if(debug_message_passing .and. (domain%ncontacts == 0 .OR. domain%ntiles == 1) ) then
send = .true.
recv = .true.
call check_message_size(domain, domain%update_T, send, recv, 'T')
call check_message_size(domain, domain%update_E, send, recv, 'E')
call check_message_size(domain, domain%update_C, send, recv, 'C')
call check_message_size(domain, domain%update_N, send, recv, 'N')
endif
!print out decomposition, this didn't consider maskmap.
if( mpp_pe() .EQ. pes(0) .AND. PRESENT(name) )then
write(*,*) trim(name)//' domain decomposition'
write(*,'(a,i4,a,i4,a,i4,a,i4)')'whalo = ', whalosz, ", ehalo = ", ehalosz, ", shalo = ", shalosz, ", nhalo = ", nhalosz
write (*,110) (domain%x(1)%list(i)%compute%size, i= 0, layout(1)-1)
write (*,120) (domain%y(1)%list(i)%compute%size, i= 0, layout(2)-1)
110 format (' X-AXIS = ',24i4,/,(11x,24i4))
120 format (' Y-AXIS = ',24i4,/,(11x,24i4))
endif
deallocate( pes, pesall)
return
end subroutine mpp_define_domains2D
!#####################################################################
subroutine check_message_size(domain, update, send, recv, position)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: update
logical, intent(in) :: send(:)
logical, intent(in) :: recv(:)
character, intent(in) :: position
integer, dimension(0:size(domain%list(:))-1) :: msg1, msg2, msg3
integer :: m, n, l, dir, is, ie, js, je, from_pe, msgsize
integer :: nlist
nlist = size(domain%list(:))
msg1 = 0
msg2 = 0
do m = 1, update%nrecv
msgsize = 0
do n = 1, update%recv(m)%count
dir = update%recv(m)%dir(n)
if( recv(dir) ) then
is = update%recv(m)%is(n); ie = update%recv(m)%ie(n)
js = update%recv(m)%js(n); je = update%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
endif
end do
from_pe = update%recv(m)%pe
l = from_pe-mpp_root_pe()
call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1)
msg2(l) = msgsize
enddo
do m = 1, update%nsend
msgsize = 0
do n = 1, update%send(m)%count
dir = update%send(m)%dir(n)
if(send(dir))then
is = update%send(m)%is(n); ie = update%send(m)%ie(n)
js = update%send(m)%js(n); je = update%send(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
endif
end do
l = update%send(m)%pe-mpp_root_pe()
msg3(l) = msgsize
call mpp_send( msg3(l), plen=1, to_pe=update%send(m)%pe, tag=COMM_TAG_1)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",at position=",position,",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_define_domains2D: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
end subroutine check_message_size
!#####################################################################
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_define_mosaic: define mosaic domain !
! NOTE: xflags and yflags is not in mpp_define_mosaic, because such relation !
! are already defined in the mosaic relation. !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!??? do we need optional argument xextent and yextent
!??? how to specify pelist, we may use two dimensional variable pelist to represent.
!z1l: We assume the tilelist are in always limited to 1, 2, ... num_tile. If we want
! to remove this limitation, we need to add one more argument tilelist.
subroutine mpp_define_mosaic( global_indices, layout, domain, num_tile, num_contact, tile1, tile2, &
istart1, iend1, jstart1, jend1, istart2, iend2, jstart2, jend2, pe_start, &
pe_end, pelist, whalo, ehalo, shalo, nhalo, xextent, yextent, &
maskmap, name, memory_size, symmetry, xflags, yflags, tile_id )
integer, intent(in) :: global_indices(:,:) ! The size of first indice is 4, (/ isg, ieg, jsg, jeg /)
! The size of second indice is number of tiles in mosaic.
integer, intent(in) :: layout(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in) :: num_tile ! number of tiles in the mosaic
integer, intent(in) :: num_contact ! number of contact region between tiles.
integer, intent(in) :: tile1(:), tile2(:) ! tile number
integer, intent(in) :: istart1(:), iend1(:) ! i-index in tile_1 of contact region
integer, intent(in) :: jstart1(:), jend1(:) ! j-index in tile_1 of contact region
integer, intent(in) :: istart2(:), iend2(:) ! i-index in tile_2 of contact region
integer, intent(in) :: jstart2(:), jend2(:) ! j-index in tile_2 of contact region
integer, intent(in) :: pe_start(:) ! start pe of the pelist used in each tile
integer, intent(in) :: pe_end(:) ! end pe of the pelist used in each tile
integer, intent(in), optional :: pelist(:) ! list of processors used in mosaic
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
integer, intent(in), optional :: xextent(:,:), yextent(:,:)
logical, intent(in), optional :: maskmap(:,:,:)
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: memory_size(2)
logical, intent(in), optional :: symmetry
integer, intent(in), optional :: xflags, yflags
integer, intent(in), optional :: tile_id(:) ! tile_id of each tile in the mosaic
integer :: n, m, ndivx, ndivy, nc, nlist, nt, pos, n1, n2
integer :: whalosz, ehalosz, shalosz, nhalosz, xhalosz, yhalosz, t1, t2, tile
integer :: flags_x, flags_y
logical, allocatable :: mask(:,:)
integer, allocatable :: pes(:), xext(:), yext(:), pelist_tile(:), ntile_per_pe(:), tile_count(:)
integer, allocatable :: tile_id_local(:)
logical :: is_symmetry
integer, allocatable :: align1(:), align2(:), is1(:), ie1(:), js1(:), je1(:), is2(:), ie2(:), js2(:), je2(:)
integer, allocatable :: isgList(:), iegList(:), jsgList(:), jegList(:)
real, allocatable :: refine1(:), refine2(:)
type(overlapSpec), pointer :: update=>NULL()
character(len=1) :: position
integer :: msgsize, l, p, is, ie, js, je, from_pe
integer, allocatable :: msg1(:), msg2(:), msg3(:)
integer :: outunit
logical :: send(8), recv(8)
outunit = stdout()
mosaic_defined = .true.
!--- the size of first indice of global_indices must be 4.
if(size(global_indices, 1) .NE. 4) call mpp_error(FATAL, &
'mpp_domains_define.inc: The size of first dimension of global_indices is not 4')
!--- the size of second indice of global_indices must be num_tile
if(size(global_indices, 2) .NE. num_tile) call mpp_error(FATAL, &
'mpp_domains_define.inc: The size of second dimension of global_indices is not equal num_tile')
!--- the size of first indice of layout must be 2. The second dimension size of layout must equal num_tile.
if(size(layout, 1) .NE. 2) call mpp_error(FATAL, &
'mpp_domains_define.inc: The size of first dimension of layout is not 2')
if(size(layout,2) .NE. num_tile) call mpp_error(FATAL, &
'mpp_domains_define.inc: The size of second dimension of layout is not equal num_tile')
!--- setup pelist for the mosaic ---------------------
nlist = mpp_npes()
allocate(pes(0:nlist-1))
if(present(pelist)) then
if( nlist .NE. size(pelist(:))) call mpp_error(FATAL, &
'mpp_domains_define.inc: size of pelist is not equal mpp_npes')
pes = pelist
else
call mpp_get_current_pelist(pes)
end if
!--- pelist should be monotonic increasing by 1.
do n = 1, nlist-1
if(pes(n) - pes(n-1) .NE. 1) call mpp_error(FATAL, &
'mpp_domains_define.inc: pelist is not monotonic increasing by 1')
end do
is_symmetry = .FALSE.
if(present(symmetry)) is_symmetry = symmetry
if(size(pe_start(:)) .NE. num_tile .OR. size(pe_end(:)) .NE. num_tile ) call mpp_error(FATAL, &
'mpp_domains_define.inc: size of pe_start and/or pe_end is not equal num_tile')
!--- make sure pe_start and pe_end is in the pelist.
if( ANY( pe_start < pes(0) ) ) call mpp_error(FATAL, 'mpp_domains_define.inc: not all the pe_start are in the pelist')
if( ANY( pe_end > pes(nlist-1)) ) call mpp_error(FATAL, 'mpp_domains_define.inc: not all the pe_end are in the pelist')
!--- calculate number of tiles on each pe.
allocate( ntile_per_pe(0:nlist-1) )
ntile_per_pe = 0
do n = 1, num_tile
do m = pe_start(n) - mpp_root_pe(), pe_end(n) - mpp_root_pe()
ntile_per_pe(m) = ntile_per_pe(m) + 1
end do
end do
if(ANY(ntile_per_pe == 0)) call mpp_error(FATAL, &
'mpp_domains_define.inc: At least one pe in pelist is not used by any tile in the mosaic')
!--- check the size comformable of xextent and yextent
if( PRESENT(xextent) ) then
if(size(xextent,1) .GT. maxval(layout(1,:)) ) call mpp_error(FATAL, &
'mpp_domains_define.inc: size mismatch between xextent and layout')
if(size(xextent,2) .NE. num_tile) call mpp_error(FATAL, &
'mpp_domains_define.inc: size of xextent is not eqaul num_tile')
end if
if( PRESENT(yextent) ) then
if(size(yextent,1) .GT. maxval(layout(2,:)) ) call mpp_error(FATAL, &
'mpp_domains_define.inc: size mismatch between yextent and layout')
if(size(yextent,2) .NE. num_tile) call mpp_error(FATAL, &
'mpp_domains_define.inc: size of yextent is not eqaul num_tile')
end if
!--- check the size comformable of maskmap
!--- since the layout is different between tiles, so the actual size of maskmap for each tile is
!--- not diffrent. When define maskmap for multiple tiles, user can choose the maximum value
!--- of layout of all tiles to the first and second dimension of maskmap.
if(present(maskmap)) then
if(size(maskmap,1) .GT. maxval(layout(1,:)) .or. size(maskmap,2) .GT. maxval(layout(2,:))) &
call mpp_error(FATAL, 'mpp_domains_define.inc: size mismatch between maskmap and layout')
if(size(maskmap,3) .NE. num_tile) call mpp_error(FATAL, &
'mpp_domains_define.inc: the third dimension of maskmap is not equal num_tile')
end if
allocate(domain%tileList(num_tile))
do n = 1, num_tile
domain%tileList(n)%xbegin = global_indices(1,n)
domain%tileList(n)%xend = global_indices(2,n)
domain%tileList(n)%ybegin = global_indices(3,n)
domain%tileList(n)%yend = global_indices(4,n)
enddo
!--- define some mosaic information in domain type
nt = ntile_per_pe(mpp_pe()-mpp_root_pe())
allocate(domain%tile_id(nt), domain%x(nt), domain%y(nt) )
allocate(domain%list(0:nlist-1))
do n = 0, nlist-1
nt = ntile_per_pe(n)
allocate(domain%list(n)%x(nt), domain%list(n)%y(nt), domain%list(n)%tile_id(nt) )
end do
pe = mpp_pe()
pos = 0
if( PRESENT(tile_id) ) then
if(size(tile_id(:)) .NE. num_tile) then
call mpp_error(FATAL, "mpp_domains_define.inc: size(tile_id) .NE. num_tile")
endif
endif
allocate(tile_id_local(num_tile))
!These directives are a work-around for a bug in the CCE compiler, which
!causes a segmentation fault when the compiler attempts to vectorize a
!loop containing an optional argument (when -g is included).
!DIR$ NOVECTOR
do n = 1, num_tile
if(PRESENT(tile_id)) then
tile_id_local(n) = tile_id(n)
else
tile_id_local(n) = n
endif
enddo
!DIR$ VECTOR
do n = 1, num_tile
if( pe .GE. pe_start(n) .AND. pe .LE. pe_end(n)) then
pos = pos + 1
domain%tile_id(pos) = tile_id_local(n)
end if
end do
domain%initialized = .true.
domain%rotated_ninety = .FALSE.
domain%ntiles = num_tile
domain%max_ntile_pe = maxval(ntile_per_pe)
domain%ncontacts = num_contact
deallocate(ntile_per_pe)
!---call mpp_define_domain to define domain decomposition for each tile.
allocate(tile_count(pes(0):pes(0)+nlist-1))
tile_count = 0 ! tile number on current pe
do n = 1, num_tile
allocate(mask(layout(1,n), layout(2,n)))
allocate(pelist_tile(pe_start(n):pe_end(n)) )
tile_count(pe_start(n)) = tile_count(pe_start(n)) + 1
do m = pe_start(n), pe_end(n)
pelist_tile(m) = m
end do
mask = .TRUE.
if(present(maskmap)) mask = maskmap(1:layout(1,n), 1:layout(2,n), n)
ndivx = layout(1,n); ndivy = layout(2,n)
allocate(xext(ndivx), yext(ndivy))
xext = 0; yext = 0
if(present(xextent)) xext = xextent(1:ndivx,n)
if(present(yextent)) yext = yextent(1:ndivy,n)
! when num_tile is one, we assume only folded_north and cyclic_x, cyclic_y boundary condition is the possible
! z1l: when we decide to support multiple-tile tripolar grid, we will redesign the following part.
if(num_tile == 1) then
flags_x = 0
flags_y = 0
if(PRESENT(xflags)) flags_x = xflags
if(PRESENT(yflags)) flags_y = yflags
do m = 1, num_contact
if(istart1(m) == iend1(m) ) then ! x-direction contact, possible cyclic, folded-west or folded-east
if(istart2(m) .NE. iend2(m) ) call mpp_error(FATAL, &
"mpp_domains_define: for one tile mosaic, when istart1=iend1, istart2 must equal iend2")
if(istart1(m) == istart2(m) ) then ! folded west or folded east
if(istart1(m) == global_indices(1,n) ) then
if(.NOT. BTEST(flags_x,WEST) ) flags_x = flags_x + FOLD_WEST_EDGE
else if(istart1(m) == global_indices(2,n) ) then
if(.NOT. BTEST(flags_x,EAST) ) flags_x = flags_x + FOLD_EAST_EDGE
else
call mpp_error(FATAL, "mpp_domains_define: when istart1=iend1,jstart1=jend1, "//&
"istart1 should equal global_indices(1) or global_indices(2)")
endif
else
if(.NOT. BTEST(flags_x,CYCLIC)) flags_x = flags_x + CYCLIC_GLOBAL_DOMAIN
endif
else if( jstart1(m) == jend1(m) ) then ! y-direction contact, cyclic, folded-south or folded-north
if(jstart2(m) .NE. jend2(m) ) call mpp_error(FATAL, &
"mpp_domains_define: for one tile mosaic, when jstart1=jend1, jstart2 must equal jend2")
if(jstart1(m) == jstart2(m) ) then ! folded south or folded north
if(jstart1(m) == global_indices(3,n) ) then
if(.NOT. BTEST(flags_y,SOUTH) ) flags_y = flags_y + FOLD_SOUTH_EDGE
else if(jstart1(m) == global_indices(4,n) ) then
if(.NOT. BTEST(flags_y,NORTH) ) flags_y = flags_y + FOLD_NORTH_EDGE
else
call mpp_error(FATAL, "mpp_domains_define: when istart1=iend1,jstart1=jend1, "//&
"istart1 should equal global_indices(1) or global_indices(2)")
endif
else
if(.NOT. BTEST(flags_y,CYCLIC)) flags_y = flags_y + CYCLIC_GLOBAL_DOMAIN
end if
else
call mpp_error(FATAL, &
"mpp_domains_define: for one tile mosaic, invalid boundary contact")
end if
end do
call mpp_define_domains(global_indices(:,n), layout(:,n), domain, pelist=pelist_tile, xflags = flags_x, &
yflags = flags_y, whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo, &
xextent=xext, yextent=yext, maskmap=mask, name=name, symmetry=is_symmetry, &
memory_size = memory_size, is_mosaic = .true., tile_id=tile_id_local(n))
else
call mpp_define_domains(global_indices(:,n), layout(:,n), domain, pelist=pelist_tile, &
whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo, xextent=xext, yextent=yext, &
maskmap=mask, name=name, symmetry=is_symmetry, memory_size = memory_size, &
is_mosaic = .true., tile_count = tile_count(pe_start(n)), tile_id=tile_id_local(n), &
complete = n==num_tile)
end if
deallocate(mask, xext, yext, pelist_tile)
end do
deallocate(pes, tile_count)
if(num_contact == 0 .OR. num_tile == 1) return
!--- loop through each contact region and find the contact for each tile ( including alignment )
!--- we assume the tiles list is continuous and starting from 1.
allocate(is1(num_contact), ie1(num_contact), js1(num_contact), je1(num_contact) )
allocate(is2(num_contact), ie2(num_contact), js2(num_contact), je2(num_contact) )
allocate(isgList(num_tile), iegList(num_tile), jsgList(num_tile), jegList(num_tile) )
allocate(align1(num_contact), align2(num_contact), refine1(num_contact), refine2(num_contact))
!--- get the global domain for each tile
do n = 1, num_tile
isgList(n) = domain%tileList(n)%xbegin; iegList(n) = domain%tileList(n)%xend
jsgList(n) = domain%tileList(n)%ybegin; jegList(n) = domain%tileList(n)%yend
end do
!--- transfer the contact index to domain index.
nc = 0
do n = 1, num_contact
t1 = tile1(n)
t2 = tile2(n)
is1(n) = istart1(n) + isgList(t1) - 1; ie1(n) = iend1(n) + isgList(t1) - 1
js1(n) = jstart1(n) + jsgList(t1) - 1; je1(n) = jend1(n) + jsgList(t1) - 1
is2(n) = istart2(n) + isgList(t2) - 1; ie2(n) = iend2(n) + isgList(t2) - 1
js2(n) = jstart2(n) + jsgList(t2) - 1; je2(n) = jend2(n) + jsgList(t2) - 1
call check_alignment( is1(n), ie1(n), js1(n), je1(n), isgList(t1), iegList(t1), jsgList(t1), jegList(t1), align1(n))
call check_alignment( is2(n), ie2(n), js2(n), je2(n), isgList(t2), iegList(t2), jsgList(t2), jegList(t2), align2(n))
if( (align1(n) == WEST .or. align1(n) == EAST ) .NEQV. (align2(n) == WEST .or. align2(n) == EAST ) )&
domain%rotated_ninety=.true.
end do
!--- calculate the refinement ratio between tiles
do n = 1, num_contact
n1 = max(abs(iend1(n) - istart1(n)), abs(jend1(n) - jstart1(n)) ) + 1
n2 = max(abs(iend2(n) - istart2(n)), abs(jend2(n) - jstart2(n)) ) + 1
refine1(n) = real(n2)/n1
refine2(n) = real(n1)/n2
end do
whalosz = 0; ehalosz = 0; shalosz = 0; nhalosz = 0
if(present(whalo)) whalosz = whalo
if(present(ehalo)) ehalosz = ehalo
if(present(shalo)) shalosz = shalo
if(present(nhalo)) nhalosz = nhalo
xhalosz = max(whalosz, ehalosz)
yhalosz = max(shalosz, nhalosz)
!--- computing the overlap for the contact region with halo size xhalosz and yhalosz
call define_contact_point( domain, CENTER, num_contact, tile1, tile2, align1, align2, refine1, refine2, &
is1, ie1, js1, je1, is2, ie2, js2, je2, isgList, iegList, jsgList, jegList )
call set_contact_point( domain, CORNER )
call set_contact_point( domain, EAST )
call set_contact_point( domain, NORTH )
call set_domain_comm_inf(domain%update_T)
call set_domain_comm_inf(domain%update_E)
call set_domain_comm_inf(domain%update_C)
call set_domain_comm_inf(domain%update_N)
!--- goffset setting is needed for exact global sum
do m = 1, size(domain%tile_id(:))
tile = domain%tile_id(m)
do n = 1, num_contact
if( tile1(n) == tile ) then
if(align1(n) == EAST ) domain%x(m)%goffset = 0
if(align1(n) == NORTH) domain%y(m)%goffset = 0
end if
if( tile2(n) == tile ) then
if(align2(n) == EAST ) domain%x(m)%goffset = 0
if(align2(n) == NORTH) domain%y(m)%goffset = 0
end if
end do
end do
call check_overlap_pe_order(domain, domain%update_T, trim(domain%name)//" update_T in mpp_define_mosaic")
call check_overlap_pe_order(domain, domain%update_C, trim(domain%name)//" update_C in mpp_define_mosaic")
call check_overlap_pe_order(domain, domain%update_E, trim(domain%name)//" update_E in mpp_define_mosaic")
call check_overlap_pe_order(domain, domain%update_N, trim(domain%name)//" update_N in mpp_define_mosaic")
!--- set the overlapping for boundary check if domain is symmetry
if(debug_update_level .NE. NO_CHECK) then
call set_check_overlap( domain, CORNER )
call set_check_overlap( domain, EAST )
call set_check_overlap( domain, NORTH )
endif
if(domain%symmetry) then
allocate(domain%bound_E, domain%bound_C, domain%bound_N )
call set_bound_overlap( domain, CORNER )
call set_bound_overlap( domain, EAST )
call set_bound_overlap( domain, NORTH )
call check_overlap_pe_order(domain, domain%bound_C, trim(domain%name)//" bound_C")
call check_overlap_pe_order(domain, domain%bound_E, trim(domain%name)//" bound_E")
call check_overlap_pe_order(domain, domain%bound_N, trim(domain%name)//" bound_N")
end if
!--- check the send and recv size are matching.
!--- currently only check T and C-cell. For ntiles>1 mosaic,
!--- the check will be done in mpp_define_mosaic
if(debug_message_passing) then
send = .true.
recv = .true.
call check_message_size(domain, domain%update_T, send, recv, 'T')
call check_message_size(domain, domain%update_C, send, recv, 'C')
call check_message_size(domain, domain%update_E, send, recv, 'E')
call check_message_size(domain, domain%update_N, send, recv, 'N')
endif
!--- release memory
deallocate(align1, align2, is1, ie1, js1, je1, is2, ie2, js2, je2 )
deallocate(isgList, iegList, jsgList, jegList, refine1, refine2 )
end subroutine mpp_define_mosaic
!#####################################################################
logical function mpp_mosaic_defined()
! Accessor function for value of mosaic_defined
mpp_mosaic_defined = mosaic_defined
end function mpp_mosaic_defined
!#####################################################################
subroutine compute_overlaps( domain, position, update, check, ishift, jshift, x_cyclic_offset, y_cyclic_offset, &
whalo, ehalo, shalo, nhalo )
!computes remote domain overlaps
!assumes only one in each direction
!will calculate the overlapping for T,E,C,N-cell seperately.
type(domain2D), intent(inout) :: domain
type(overlapSpec), intent(inout), pointer :: update
type(overlapSpec), intent(inout), pointer :: check
integer, intent(in) :: position, ishift, jshift
integer, intent(in) :: x_cyclic_offset, y_cyclic_offset
integer, intent(in) :: whalo, ehalo, shalo, nhalo
integer :: i, m, n, nlist, tMe, tNbr, dir
integer :: is, ie, js, je, isc, iec, jsc, jec, isd, ied, jsd, jed
integer :: isg, ieg, jsg, jeg, ioff, joff
integer :: list, middle, ni, nj, isgd, iegd, jsgd, jegd
integer :: ism, iem, jsm, jem
integer :: is2, ie2, js2, je2
integer :: is3, ie3, js3, je3
integer :: isd3, ied3, jsd3, jed3
integer :: isd2, ied2, jsd2, jed2
logical :: folded, need_adjust_1, need_adjust_2, need_adjust_3, folded_north
type(overlap_type) :: overlap
type(overlap_type), pointer :: overlapList(:)=>NULL()
type(overlap_type), pointer :: checkList(:)=>NULL()
integer :: nsend, nrecv
integer :: nsend_check, nrecv_check
integer :: unit
logical :: set_check
!--- since we restrict that if multiple tiles on one pe, all the tiles are limited to this pe.
!--- In this case, if ntiles on this pe is greater than 1, no overlapping between processor within each tile
!--- In this case the overlapping exist only for tMe=1 and tNbr=1
if(size(domain%x(:)) > 1) return
!--- if there is no halo, no need to compute overlaps.
if(whalo==0 .AND. ehalo==0 .AND. shalo==0 .AND. nhalo==0) return
!--- when there is only one tile, n will equal to np
nlist = size(domain%list(:))
set_check = .false.
if(ASSOCIATED(check)) set_check = .true.
allocate(overlapList(MAXLIST) )
if(set_check) allocate(checkList(MAXLIST) )
!--- overlap is used to store the overlapping temporarily.
call allocate_update_overlap( overlap, MAXOVERLAP)
!send
call mpp_get_compute_domain( domain, isc, iec, jsc, jec, position=position )
call mpp_get_global_domain ( domain, isg, ieg, jsg, jeg, xsize=ni, ysize=nj, position=position ) !cyclic offsets
call mpp_get_memory_domain ( domain, ism, iem, jsm, jem, position=position )
update%xbegin = ism; update%xend = iem
update%ybegin = jsm; update%yend = jem
if(set_check) then
check%xbegin = ism; check%xend = iem
check%ybegin = jsm; check%yend = jem
endif
update%whalo = whalo; update%ehalo = ehalo
update%shalo = shalo; update%nhalo = nhalo
ioff = ni - ishift
joff = nj - jshift
middle = (isg+ieg)/2+1
tMe = 1; tNbr = 1
folded_north = BTEST(domain%fold,NORTH)
if( BTEST(domain%fold,SOUTH) .OR. BTEST(domain%fold,EAST) .OR. BTEST(domain%fold,WEST) ) then
call mpp_error(FATAL, "mpp_domains_define.inc(compute_overlaps): folded south, east or west boundary condition " // &
"is not supported, please use other version of compute_overlaps for "//trim(domain%name))
endif
nsend = 0
nsend_check = 0
do list = 0,nlist-1
m = mod( domain%pos+list, nlist )
if(domain%list(m)%tile_id(tNbr) == domain%tile_id(tMe) ) then ! only compute the overlapping within tile.
!to_pe's eastern halo
dir = 1
is = domain%list(m)%x(tNbr)%compute%end+1+ishift; ie = domain%list(m)%x(tNbr)%compute%end+ehalo+ishift
js = domain%list(m)%y(tNbr)%compute%begin; je = domain%list(m)%y(tNbr)%compute%end+jshift
!--- to make sure the consistence between pes
if( domain%symmetry .AND. (position == NORTH .OR. position == CORNER ) &
.AND. ( jsc == je .or. jec == js ) ) then
!--- do nothing, this point will come from other pe
else
!--- when the north face is folded, the east halo point at right side domain will be folded.
!--- the position should be on CORNER or NORTH
if( je == jeg .AND. folded_north .AND. (position == CORNER .OR. position == NORTH) ) then
call fill_overlap_send_fold(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, dir, ishift, position, ioff, middle)
else
if(x_cyclic_offset ==0 .AND. y_cyclic_offset == 0) then
call fill_overlap_send_nofold(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic, symmetry=domain%symmetry)
else
if( ie.GT.ieg ) then
if( domain%x(tMe)%cyclic .AND. iec.LT.is )then !try cyclic offset
is = is-ioff; ie = ie-ioff
call apply_cyclic_offset(js, je, -x_cyclic_offset, jsg, jeg, nj)
end if
end if
call fill_overlap(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, jsg, jeg, dir, symmetry=domain%symmetry)
endif
endif
end if
!to_pe's SE halo
dir = 2
is = domain%list(m)%x(tNbr)%compute%end+1+ishift; ie = domain%list(m)%x(tNbr)%compute%end+ehalo+ishift
js = domain%list(m)%y(tNbr)%compute%begin-shalo; je = domain%list(m)%y(tNbr)%compute%begin-1
need_adjust_1 = .true.; need_adjust_2 = .true.; need_adjust_3 = .true.
!--- divide into two parts, one part is x_cyclic_offset/y_cyclic_offset is non-zeor,
!--- the other part is both are zero.
is2 = 0; ie2 = -1; js2 = 0; je2 = -1
if(x_cyclic_offset == 0 .AND. y_cyclic_offset == 0) then
if(je .LT. jsg) then ! js .LT. jsg
if( domain%y(tMe)%cyclic ) then
js = js + joff; je = je + joff
endif
else if(js .Lt. jsg) then ! split into two parts
if( domain%y(tMe)%cyclic ) then
js2 = js + joff; je2 = jsg-1+joff
js = jsg;
endif
endif
call fill_overlap_send_nofold(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic)
if(je2 .GE. js2) call fill_overlap_send_nofold(overlap, domain, m, is, ie, js2, je2, isc, iec, jsc, jec, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic)
else
if( ie.GT.ieg )then
if( domain%x(tMe)%cyclic .AND. iec.LT.is )then !try cyclic offset
is = is-ioff; ie = ie-ioff
need_adjust_1 = .false.
if(jsg .GT. js) then
if( domain%y(tMe)%cyclic .AND. je.LT.jsc )then !try cyclic offset
js = js+joff; je = je+joff
need_adjust_2 = .false.
if(x_cyclic_offset .NE. 0) then
call apply_cyclic_offset(js, je, -x_cyclic_offset, jsg, jeg, nj)
else if(y_cyclic_offset .NE. 0) then
call apply_cyclic_offset(is, ie, y_cyclic_offset, isg, ieg, ni)
end if
end if
else
call apply_cyclic_offset(js, je, -x_cyclic_offset, jsg, jeg, nj)
need_adjust_3 = .false.
end if
end if
end if
if( need_adjust_3 .AND. jsg.GT.js )then
if( need_adjust_2 .AND. domain%y(tMe)%cyclic .AND. je.LT.jsc )then !try cyclic offset
js = js+joff; je = je+joff
if(need_adjust_1 .AND. ie.LE.ieg) then
call apply_cyclic_offset(is, ie, y_cyclic_offset, isg, ieg, ni)
end if
end if
end if
call fill_overlap(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, isg, ieg, jsg, jeg, dir)
endif
!to_pe's southern halo
dir = 3
is = domain%list(m)%x(tNbr)%compute%begin; ie = domain%list(m)%x(tNbr)%compute%end+ishift
js = domain%list(m)%y(tNbr)%compute%begin-shalo; je = domain%list(m)%y(tNbr)%compute%begin-1
js2 = 0; je2 = -1
if( jsg.GT.je )then ! jsg .GT. js
if( domain%y(tMe)%cyclic .AND. je.LT.jsc )then !try cyclic offset
js = js+joff; je = je+joff
call apply_cyclic_offset(is, ie, y_cyclic_offset, isg, ieg, ni)
end if
else if (jsg .GT. js) then ! split into two parts
if( domain%y(tMe)%cyclic) then
js2 = js + joff; je2 = jsg-1+joff
js = jsg
endif
end if
call fill_overlap(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, jsg, jeg, dir, symmetry=domain%symmetry)
if(je2 .GE. js2) call fill_overlap(overlap, domain, m, is, ie, js2, je2, isc, iec, jsc, jec, &
isg, ieg, jsg, jeg, dir, symmetry=domain%symmetry)
!to_pe's SW halo
dir = 4
is = domain%list(m)%x(tNbr)%compute%begin-whalo; ie = domain%list(m)%x(tNbr)%compute%begin-1
js = domain%list(m)%y(tNbr)%compute%begin-shalo; je = domain%list(m)%y(tNbr)%compute%begin-1
need_adjust_1 = .true.; need_adjust_2 = .true.; need_adjust_3 = .true.
is2 = 0; ie2 = -1; js2 = 0; je2 = -1
if(x_cyclic_offset == 0 .AND. y_cyclic_offset == 0) then
if(je .LT. jsg) then ! js .LT. jsg
if( domain%y(tMe)%cyclic ) then
js = js + joff; je = je + joff
endif
else if(js .Lt. jsg) then ! split into two parts
if( domain%y(tMe)%cyclic ) then
js2 = js + joff; je2 = jsg-1+joff
js = jsg;
endif
endif
call fill_overlap_send_nofold(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic)
if(je2 .GE. js2) call fill_overlap_send_nofold(overlap, domain, m, is, ie, js2, je2, isc, iec, jsc, jec, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic)
else
if( isg.GT.is )then
if( domain%x(tMe)%cyclic .AND. ie.LT.isc )then !try cyclic offset
is = is+ioff; ie = ie+ioff
need_adjust_1 = .false.
if(jsg .GT. js) then
if( domain%y(tMe)%cyclic .AND. je.LT.jsc )then !try cyclic offset
js = js+joff; je = je+joff
need_adjust_2 = .false.
if(x_cyclic_offset .NE. 0) then
call apply_cyclic_offset(js, je, x_cyclic_offset, jsg, jeg, nj)
else if(y_cyclic_offset .NE. 0) then
call apply_cyclic_offset(is, ie, y_cyclic_offset, isg, ieg, ni)
end if
end if
else
call apply_cyclic_offset(js, je, x_cyclic_offset, jsg, jeg, nj)
need_adjust_3 = .false.
end if
end if
end if
if( need_adjust_3 .AND. jsg.GT.js )then
if( need_adjust_2 .AND. domain%y(tMe)%cyclic .AND. je.LT.jsc )then !try cyclic offset
js = js+joff; je = je+joff
if(need_adjust_1 .AND. isg.LE.is )then
call apply_cyclic_offset(is, ie, y_cyclic_offset, isg, ieg, ni)
end if
end if
end if
call fill_overlap(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, isg, ieg, jsg, jeg, dir)
endif
!to_pe's western halo
dir = 5
is = domain%list(m)%x(tNbr)%compute%begin-whalo; ie = domain%list(m)%x(tNbr)%compute%begin-1
js = domain%list(m)%y(tNbr)%compute%begin; je = domain%list(m)%y(tNbr)%compute%end+jshift
!--- when the north face is folded, some point at j=nj will be folded.
!--- the position should be on CORNER or NORTH
if( je == jeg .AND. folded_north .AND. (position == CORNER .OR. position == NORTH)) then
call fill_overlap_send_fold(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, dir, ishift, position, ioff, middle)
else
if(x_cyclic_offset ==0 .AND. y_cyclic_offset == 0) then
call fill_overlap_send_nofold(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic, symmetry=domain%symmetry)
else
if( isg.GT.is )then
if( domain%x(tMe)%cyclic .AND. ie.LT.isc )then !try cyclic offset
is = is+ioff; ie = ie+ioff
call apply_cyclic_offset(js, je, x_cyclic_offset, jsg, jeg, nj)
endif
end if
call fill_overlap(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, jsg, jeg, dir, symmetry=domain%symmetry)
end if
end if
!to_pe's NW halo
dir = 6
is = domain%list(m)%x(tNbr)%compute%begin-whalo; ie = domain%list(m)%x(tNbr)%compute%begin-1
js = domain%list(m)%y(tNbr)%compute%end+1+jshift; je = domain%list(m)%y(tNbr)%compute%end+nhalo+jshift
is2 = 0; ie2 = -1; js2 = 0; je2 = -1
is3 = 0; ie3 = -1; js3 = 0; je3 = -1
folded = .FALSE.
if(x_cyclic_offset == 0 .AND. y_cyclic_offset == 0) then
if(js .GT. jeg) then ! je > jeg
if( domain%y(tMe)%cyclic ) then
js = js-joff; je = je-joff
else if(folded_north )then
folded = .TRUE.
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
endif
else if(je .GT. jeg) then ! split into two parts
if( domain%y(tMe)%cyclic ) then
is2 = is; ie2 = ie; js2 = js; je2 = jeg
js = jeg+1-joff; je = je -joff
else if(folded_north) then
folded = .TRUE.
is2 = is; ie2 = ie; js2 = js; je2 = jeg
js = jeg+1
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
if( is .GT. ieg) then
is = is - ioff; ie = ie - ioff
else if( ie .GT. ieg ) then
is3 = is; ie3 = ieg; js3 = js; je3 = je
is = ieg+1-ioff; ie = ie - ioff
endif
endif
endif
if( je == jeg .AND. jec == jeg .AND. folded_north .AND. (position == CORNER .OR. position == NORTH)) then
call fill_overlap_send_fold(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, dir, ishift, position, ioff, middle)
else
call fill_overlap_send_nofold(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic, folded)
endif
if(ie3 .GE. is3) call fill_overlap_send_nofold(overlap, domain, m, is3, ie3, js3, je3, &
isc, iec, jsc, jec, isg, ieg, dir, ioff, domain%x(tMe)%cyclic, folded)
if(ie2 .GE. is2) then
if(je2 == jeg .AND. jec == jeg .AND. folded_north .AND. (position == CORNER .OR. position == NORTH)) then
call fill_overlap_send_fold(overlap, domain, m, is2, ie2, js2, je2, isc, iec, jsc, jec, &
isg, ieg, dir, ishift, position, ioff, middle)
else
call fill_overlap_send_nofold(overlap, domain, m, is2, ie2, js2, je2, isc, iec, jsc, jec, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic)
endif
endif
else
need_adjust_1 = .true.; need_adjust_2 = .true.; need_adjust_3 = .true.
if( isg.GT.is )then
if( domain%x(tMe)%cyclic .AND. ie.LT.isc )then !try cyclic offset
is = is+ioff; ie = ie+ioff
need_adjust_1 = .false.
if(je .GT. jeg) then
if( domain%y(tMe)%cyclic .AND. jec.LT.js )then !try cyclic offset
js = js-joff; je = je-joff
need_adjust_2 = .false.
if(x_cyclic_offset .NE. 0) then
call apply_cyclic_offset(js, je, x_cyclic_offset, jsg, jeg, nj)
else if(y_cyclic_offset .NE. 0) then
call apply_cyclic_offset(is, ie, -y_cyclic_offset, isg, ieg, ni)
end if
end if
else
call apply_cyclic_offset(js, je, x_cyclic_offset, jsg, jeg, nj)
need_adjust_3 = .false.
end if
end if
end if
folded = .FALSE.
if( need_adjust_3 .AND. je.GT.jeg )then
if( need_adjust_2 .AND. domain%y(tMe)%cyclic .AND. jec.LT.js )then !try cyclic offset
js = js-joff; je = je-joff
if( need_adjust_1 .AND. isg.LE.is)then
call apply_cyclic_offset(is, ie, -y_cyclic_offset, isg, ieg, ni)
end if
else if( folded_north )then
folded = .TRUE.
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
end if
end if
call fill_overlap(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, jsg, jeg, dir)
endif
!to_pe's northern halo
dir = 7
folded = .FALSE.
is = domain%list(m)%x(tNbr)%compute%begin; ie = domain%list(m)%x(tNbr)%compute%end+ishift
js = domain%list(m)%y(tNbr)%compute%end+1+jshift; je = domain%list(m)%y(tNbr)%compute%end+nhalo+jshift
!--- when domain symmetry and position is EAST or CORNER, the point when isc == ie,
!--- no need to send, because the data on that point will come from other pe.
!--- come from two pe ( there will be only one point on one pe. ).
if( domain%symmetry .AND. (position == EAST .OR. position == CORNER ) &
.AND. ( isc == ie .or. iec == is ) .AND. (.not. folded_north) ) then
!--- do nothing, this point will come from other pe
else
js2 = -1; je2 = 0
if( js .GT. jeg) then ! je .GT. jeg
if( domain%y(tMe)%cyclic .AND. jec.LT.js )then !try cyclic offset
js = js-joff; je = je-joff
call apply_cyclic_offset(is, ie, -y_cyclic_offset, isg, ieg, ni)
else if( folded_north )then
folded = .TRUE.
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
end if
else if( je.GT.jeg )then ! split into two parts
if( domain%y(tMe)%cyclic)then !try cyclic offset
is2 = is; ie2 = ie; js2 = js; je2 = jeg
js = jeg+1-joff; je = je - joff
else if( folded_north )then
folded = .TRUE.
is2 = is; ie2 = ie; js2 = js; je2 = jeg
js = jeg+1;
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
end if
end if
if(x_cyclic_offset == 0 .AND. y_cyclic_offset == 0) then
if( je == jeg .AND. jec == jeg .AND. folded_north .AND. (position == CORNER .OR. position == NORTH)) then
call fill_overlap_send_fold(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, dir, ishift, position, ioff, middle, domain%symmetry)
else
call fill_overlap_send_nofold(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic, folded, domain%symmetry)
endif
else
call fill_overlap(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, jsg, jeg, dir, symmetry=domain%symmetry)
endif
if(ie2 .GE. is2) then
if(je2 == jeg .AND. jec == jeg .AND. folded_north .AND. (position == CORNER .OR. position == NORTH)) then
call fill_overlap_send_fold(overlap, domain, m, is2, ie2, js2, je2, isc, iec, jsc, jec, &
isg, ieg, dir, ishift, position, ioff, middle, domain%symmetry)
else
call fill_overlap_send_nofold(overlap, domain, m, is2, ie2, js2, je2, isc, iec, jsc, jec, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic, symmetry=domain%symmetry)
endif
endif
end if
!--- when north edge is folded, ie will be less than isg when position is EAST and CORNER
if(is .LT. isg .AND. domain%x(tMe)%cyclic) then
! is = is + ioff
! call insert_update_overlap( overlap, domain%list(m)%pe, &
! is, is, js, je, isc, iec, jsc, jec, dir, folded)
!??? if(je2 .GE. js2)call insert_update_overlap( overlap, domain%list(m)%pe, &
! is, is, js2, je2, isc, iec, jsc, jec, dir, folded)
endif
!--- Now calculate the overlapping for fold-edge. Currently we only consider about folded-north
!--- for folded-north-edge, only need to consider to_pe's north(7) direction
!--- only position at NORTH and CORNER need to be considered
if( folded_north .AND. (position == NORTH .OR. position == CORNER) &
.AND. domain%x(tMe)%pos .LT. (size(domain%x(tMe)%list(:))+1)/2 ) then
if( domain%list(m)%y(tNbr)%compute%end+nhalo+jshift .GE. jeg .AND. isc .LE. middle)then
js = jeg; je = jeg
is = domain%list(m)%x(tNbr)%compute%begin; ie = domain%list(m)%x(tNbr)%compute%end+ishift
is = max(is, middle)
select case (position)
case(NORTH)
i=is; is = isg+ieg-ie; ie = isg+ieg-i
case(CORNER)
i=is; is = isg+ieg-ie-1+ishift; ie = isg+ieg-i-1+ishift
end select
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, .true.)
endif
if(debug_update_level .NE. NO_CHECK .AND. set_check) then
je = domain%list(m)%y(tNbr)%compute%end+jshift;
if(je == jeg) then
is = max(is, isc); ie = min(ie, iec)
js = max(js, jsc); je = min(je, jec)
if(ie.GE.is .AND. je.GE.js )then
nsend_check = nsend_check+1
if(nsend_check > size(checkList(:)) ) then
call expand_check_overlap_list(checkList, nlist)
endif
call allocate_check_overlap(checkList(nsend_check), 1)
call insert_check_overlap(checkList(nsend_check), domain%list(m)%pe, &
tMe, 4, ONE_HUNDRED_EIGHTY, is, ie, js, je)
end if
end if
endif
endif
!to_pe's NE halo
dir = 8
is = domain%list(m)%x(tNbr)%compute%end+1+ishift; ie = domain%list(m)%x(tNbr)%compute%end+ehalo+ishift
js = domain%list(m)%y(tNbr)%compute%end+1+jshift; je = domain%list(m)%y(tNbr)%compute%end+nhalo+jshift
is2 = 0; ie2=-1; js2=0; je2=-1
is3 = 0; ie3 = -1; js3 = 0; je3 = -1
if(x_cyclic_offset == 0 .AND. y_cyclic_offset == 0) then
folded = .FALSE.
if(js .GT. jeg) then ! je > jeg
if( domain%y(tMe)%cyclic ) then
js = js-joff; je = je-joff
else if(folded_north )then
folded = .TRUE.
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
endif
else if(je .GT. jeg) then ! split into two parts
if( domain%y(tMe)%cyclic ) then
is2 = is; ie2 = ie; js2 = js; je2 = jeg
js = jeg+1-joff; je = je -joff
else if(folded_north) then
folded = .TRUE.
is2 = is; ie2 = ie; js2 = js; je2 = jeg
js = jeg+1
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
if( ie .LT. isg )then
is = is+ioff; ie = ie+ioff
else if( is .LT. isg) then
is3 = isg; ie3 = ie; js3 = js; je3 = je
is = is+ioff; ie = isg-1+ioff;
endif
endif
endif
if( je == jeg .AND. jec == jeg .AND. folded_north .AND. (position == CORNER .OR. position == NORTH)) then
call fill_overlap_send_fold(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, dir, ishift, position, ioff, middle)
else
call fill_overlap_send_nofold(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic, folded)
endif
if(ie3 .GE. is3) call fill_overlap_send_nofold(overlap, domain, m, is3, ie3, js3, je3, &
isc, iec, jsc, jec, isg, ieg, dir, ioff, domain%x(tMe)%cyclic, folded)
if(ie2 .GE. is2) then
if(je2 == jeg .AND. jec == jeg .AND. folded_north .AND. (position == CORNER .OR. position == NORTH)) then
call fill_overlap_send_fold(overlap, domain, m, is2, ie2, js2, je2, isc, iec, jsc, jec, &
isg, ieg, dir, ishift, position, ioff, middle)
else
call fill_overlap_send_nofold(overlap, domain, m, is2, ie2, js2, je2, isc, iec, jsc, jec, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic)
endif
endif
else
need_adjust_1 = .true.; need_adjust_2 = .true.; need_adjust_3 = .true.
if( ie.GT.ieg )then
if( domain%x(tMe)%cyclic .AND. iec.LT.is )then !try cyclic offset
is = is-ioff; ie = ie-ioff
need_adjust_1 = .false.
if(je .GT. jeg) then
if( domain%y(tMe)%cyclic .AND. jec.LT.js )then !try cyclic offset
js = js-joff; je = je-joff
need_adjust_2 = .false.
if(x_cyclic_offset .NE. 0) then
call apply_cyclic_offset(js, je, -x_cyclic_offset, jsg, jeg, nj)
else if(y_cyclic_offset .NE. 0) then
call apply_cyclic_offset(is, ie, -y_cyclic_offset, isg, ieg, ni)
end if
end if
else
call apply_cyclic_offset(js, je, -x_cyclic_offset, jsg, jeg, nj)
need_adjust_3 = .false.
end if
end if
end if
folded = .false.
if( need_adjust_3 .AND. je.GT.jeg )then
if( need_adjust_2 .AND. domain%y(tMe)%cyclic .AND. jec.LT.js )then !try cyclic offset
js = js-joff; je = je-joff
if( need_adjust_1 .AND. ie.LE.ieg)then
call apply_cyclic_offset(is, ie, -y_cyclic_offset, isg, ieg, ni)
end if
else if( folded_north )then
folded = .TRUE.
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
end if
end if
call fill_overlap(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, jsg, jeg, dir)
endif
endif
!--- copy the overlapping information
if( overlap%count > 0) then
nsend = nsend + 1
if(nsend > size(overlapList(:)) ) then
call mpp_error(NOTE, 'mpp_domains_define.inc(compute_overlaps): overlapList for send is expanded')
call expand_update_overlap_list(overlapList, nlist)
endif
call add_update_overlap( overlapList(nsend), overlap)
call init_overlap_type(overlap)
endif
end do ! end of send set up.
if(debug_message_passing) then
!--- write out send information
unit = mpp_pe() + 1000
do m =1,nsend
write(unit, *) "********to_pe = " ,overlapList(m)%pe, " count = ",overlapList(m)%count
do n = 1, overlapList(m)%count
write(unit, *) overlapList(m)%is(n), overlapList(m)%ie(n), overlapList(m)%js(n), overlapList(m)%je(n), &
overlapList(m)%dir(n), overlapList(m)%rotation(n)
enddo
enddo
if(nsend >0) call flush(unit)
endif
! copy the overlapping information into domain data structure
if(nsend>0) then
allocate(update%send(nsend))
update%nsend = nsend
do m = 1, nsend
call add_update_overlap( update%send(m), overlapList(m) )
enddo
endif
if(nsend_check>0) then
check%nsend = nsend_check
allocate(check%send(nsend_check))
do m = 1, nsend_check
call add_check_overlap( check%send(m), checkList(m) )
enddo
endif
do m = 1,size(overlapList(:))
call deallocate_overlap_type(overlapList(m))
enddo
if(debug_update_level .NE. NO_CHECK .AND. set_check) then
do m = 1,size(checkList(:))
call deallocate_overlap_type(checkList(m))
enddo
endif
isgd = isg - domain%whalo
iegd = ieg + domain%ehalo
jsgd = jsg - domain%shalo
jegd = jeg + domain%nhalo
! begin setting up recv
nrecv = 0
nrecv_check = 0
do list = 0,nlist-1
m = mod( domain%pos+nlist-list, nlist )
if(domain%list(m)%tile_id(tNbr) == domain%tile_id(tMe) ) then ! only compute the overlapping within tile.
isc = domain%list(m)%x(1)%compute%begin; iec = domain%list(m)%x(1)%compute%end+ishift
jsc = domain%list(m)%y(1)%compute%begin; jec = domain%list(m)%y(1)%compute%end+jshift
!recv_e
dir = 1
isd = domain%x(tMe)%compute%end+1+ishift; ied = domain%x(tMe)%compute%end+ehalo+ishift
jsd = domain%y(tMe)%compute%begin; jed = domain%y(tMe)%compute%end+jshift
is=isc; ie=iec; js=jsc; je=jec
if( domain%symmetry .AND. (position == NORTH .OR. position == CORNER ) &
.AND. ( jsd == je .or. jed == js ) ) then
! --- do nothing, this point will come from other pe
else
!--- when the north face is folded, the east halo point at right side domain will be folded.
!--- the position should be on CORNER or NORTH
if( jed == jeg .AND. folded_north .AND. (position == CORNER .OR. position == NORTH) ) then
call fill_overlap_recv_fold(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, dir, ishift, position, ioff, middle)
else
if(x_cyclic_offset == 0 .AND. y_cyclic_offset == 0) then
call fill_overlap_recv_nofold(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic)
else
if( ied.GT.ieg )then
if( domain%x(tMe)%cyclic .AND. ie.LT.isd )then !try cyclic offset
is = is+ioff; ie = ie+ioff
call apply_cyclic_offset(js, je, x_cyclic_offset, jsg, jeg, nj)
end if
end if
call fill_overlap(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, jsg, jeg, dir, symmetry=domain%symmetry)
endif
endif
endif
!recv_se
dir = 2
isd = domain%x(tMe)%compute%end+1+ishift; ied = domain%x(tMe)%compute%end+ehalo+ishift
jsd = domain%y(tMe)%compute%begin-shalo; jed = domain%y(tMe)%compute%begin-1
is=isc; ie=iec; js=jsc; je=jec
!--- divide into two parts, one part is x_cyclic_offset/y_cyclic_offset is non-zeor,
!--- the other part is both are zero.
is2 = 0; ie2 = -1; js2 = 0; je2 = -1
if(x_cyclic_offset == 0 .AND. y_cyclic_offset == 0) then
if(jed .LT. jsg) then ! then jsd < jsg
if( domain%y(tMe)%cyclic ) then
js = js-joff; je = je-joff
endif
else if(jsd .LT. jsg) then !split into two parts
if( domain%y(tMe)%cyclic ) then
js2 = js-joff; je2 = je-joff
endif
endif
call fill_overlap_recv_nofold(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic)
if(je2 .GE. js2) call fill_overlap_recv_nofold(overlap, domain, m, is, ie, js2, je2, isd, ied, jsd, jed, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic)
else
need_adjust_1 = .true.; need_adjust_2 = .true.; need_adjust_3 = .true.
if( jsd.LT.jsg )then
if( domain%y(tMe)%cyclic .AND. js.GT.jed )then !try cyclic offset
js = js-joff; je = je-joff
need_adjust_1 = .false.
if( ied.GT.ieg )then
if( domain%x(tMe)%cyclic .AND. ie.LT.isd )then !try cyclic offset
is = is+ioff; ie = ie+ioff
need_adjust_2 = .false.
if(x_cyclic_offset .NE. 0) then
call apply_cyclic_offset(js, je, x_cyclic_offset, jsgd, jeg, nj)
else if(y_cyclic_offset .NE. 0) then
call apply_cyclic_offset(is, ie, -y_cyclic_offset, isg, iegd, ni)
end if
end if
else
call apply_cyclic_offset(is, ie, -y_cyclic_offset, isg, ieg, ni)
need_adjust_3 = .false.
end if
end if
end if
if( need_adjust_3 .AND. ied.GT.ieg )then
if( need_adjust_2 .AND. domain%x(tMe)%cyclic .AND. ie.LT.isd )then !try cyclic offset
is = is+ioff; ie = ie+ioff
if( need_adjust_1 .AND. jsd.GE.jsg )then
call apply_cyclic_offset(js, je, x_cyclic_offset, jsg, jeg, nj)
end if
end if
end if
call fill_overlap(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, jsg, jeg, dir)
endif
!recv_s
dir = 3
isd = domain%x(tMe)%compute%begin; ied = domain%x(tMe)%compute%end+ishift
jsd = domain%y(tMe)%compute%begin-shalo; jed = domain%y(tMe)%compute%begin-1
is=isc; ie=iec; js=jsc; je=jec
js2 = 0; je2 = -1
if( jed .LT. jsg) then ! jsd < jsg
if( domain%y(tMe)%cyclic ) then
js = js-joff; je = je-joff
call apply_cyclic_offset(is, ie, -y_cyclic_offset, isg, ieg, ni)
endif
else if( jsd.LT.jsg )then ! split into two parts
if( domain%y(tMe)%cyclic)then !try cyclic offset
js2 = js-joff; je2 = je-joff
end if
end if
call fill_overlap(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, jsg, jeg, dir, symmetry=domain%symmetry)
if(je2 .GE. js2) call fill_overlap(overlap, domain, m, is, ie, js2, je2, isd, ied, jsd, jed, &
isg, ieg, jsg, jeg, dir, symmetry=domain%symmetry)
!recv_sw
dir = 4
isd = domain%x(tMe)%compute%begin-whalo; ied = domain%x(tMe)%compute%begin-1
jsd = domain%y(tMe)%compute%begin-shalo; jed = domain%y(tMe)%compute%begin-1
is=isc; ie=iec; js=jsc; je=jec
is2 = 0; ie2 = -1; js2 = 0; je2 = -1
if(x_cyclic_offset == 0 .AND. y_cyclic_offset == 0) then
if( ied.LT.isg )then ! isd < isg
if( domain%x(tMe)%cyclic ) then
is = is-ioff; ie = ie-ioff
endif
else if (isd.LT.isg )then ! split into two parts
if( domain%x(tMe)%cyclic ) then
is2 = is-ioff; ie2 = ie-ioff
endif
endif
if( jed.LT.jsg )then ! jsd < jsg
if( domain%y(tMe)%cyclic ) then
js = js-joff; je = je-joff
endif
else if( jsd.LT.jsg )then ! split into two parts
if( domain%y(tMe)%cyclic ) then
js2 = js-joff; je2 = je-joff
endif
endif
else
need_adjust_1 = .true.; need_adjust_2 = .true.; need_adjust_3 = .true.
if( jsd.LT.jsg )then
if( domain%y(tMe)%cyclic .AND. js.GT.jed )then !try cyclic offset
js = js-joff; je = je-joff
need_adjust_1 = .false.
if( isd.LT.isg )then
if( domain%x(tMe)%cyclic .AND. is.GT.ied )then !try cyclic offset
is = is-ioff; ie = ie-ioff
need_adjust_2 = .false.
if(x_cyclic_offset .NE. 0) then
call apply_cyclic_offset(js, je, -x_cyclic_offset, jsgd, jeg, nj)
else if(y_cyclic_offset .NE. 0) then
call apply_cyclic_offset(is, ie, -y_cyclic_offset, isgd, ieg, ni)
end if
end if
else
call apply_cyclic_offset(is, ie, -y_cyclic_offset, isg, ieg, ni)
need_adjust_3 = .false.
end if
end if
end if
if( need_adjust_3 .AND. isd.LT.isg )then
if( need_adjust_2 .AND. domain%x(tMe)%cyclic .AND. is.GT.ied )then !try cyclic offset
is = is-ioff; ie = ie-ioff
if(need_adjust_1 .AND. jsd.GE.jsg) then
call apply_cyclic_offset(js, je, -x_cyclic_offset, jsg, jeg, nj)
end if
end if
end if
endif
call fill_overlap(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, jsg, jeg, dir)
if(ie2 .GE. is2)call fill_overlap(overlap, domain, m, is2, ie2, js, je, isd, ied, jsd, jed, &
isg, ieg, jsg, jeg, dir)
if(je2 .GE. js2)call fill_overlap(overlap, domain, m, is, ie, js2, je2, isd, ied, jsd, jed, &
isg, ieg, jsg, jeg, dir)
if(ie2 .GE. is2 .AND. je2 .GE. js2)call fill_overlap(overlap, domain, m, is2, ie2, js2, je2, isd, ied, jsd, jed, &
isg, ieg, jsg, jeg, dir)
!recv_w
dir = 5
isd = domain%x(tMe)%compute%begin-whalo; ied = domain%x(tMe)%compute%begin-1
jsd = domain%y(tMe)%compute%begin; jed = domain%y(tMe)%compute%end+jshift
is=isc; ie=iec; js=jsc; je=jec
!--- when the north face is folded, some point at j=nj will be folded.
!--- the position should be on CORNER or NORTH
if( jed == jeg .AND. folded_north .AND. (position == CORNER .OR. position == NORTH) ) then
call fill_overlap_recv_fold(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, dir, ishift, position, ioff, middle)
else
if(x_cyclic_offset == 0 .AND. y_cyclic_offset == 0) then
call fill_overlap_recv_nofold(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic, symmetry=domain%symmetry)
else
if( isd.LT.isg )then
if( domain%x(tMe)%cyclic .AND. is.GT.ied )then !try cyclic offset
is = is-ioff; ie = ie-ioff
call apply_cyclic_offset(js, je, -x_cyclic_offset, jsg, jeg, nj)
end if
end if
call fill_overlap(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, jsg, jeg, dir, symmetry=domain%symmetry)
endif
endif
!recv_nw
dir = 6
folded = .false.
isd = domain%x(tMe)%compute%begin-whalo; ied = domain%x(tMe)%compute%begin-1
jsd = domain%y(tMe)%compute%end+1+jshift; jed = domain%y(tMe)%compute%end+nhalo+jshift
is=isc; ie=iec; js=jsc; je=jec
is2 = 0; ie2 = -1; js2 = 0; je2 = -1
is3 = 0; ie3 = -1; js3 = 0; je3 = -1
if(x_cyclic_offset == 0 .AND. y_cyclic_offset == 0) then
js2 = -1; je2 = 0
if( jsd .GT. jeg ) then ! jed > jeg
if( domain%y(tMe)%cyclic .AND. je.LT.jsd )then !try cyclic offset
js = js+joff; je = je+joff
call apply_cyclic_offset(is, ie, y_cyclic_offset, isg, ieg, ni)
else if( folded_north )then
folded = .TRUE.
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
end if
else if( jed.GT.jeg )then ! split into two parts
if( domain%y(tMe)%cyclic)then !try cyclic offset
is2 = is; ie2 = ie; js2 = js; je2 = je
isd2 = isd; ied2 = ied; jsd2 = jsd; jed2 = jeg
js = js + joff; je = je + joff
jsd = jeg+1
else if( folded_north )then
folded = .TRUE.
is2 = is; ie2 = ie; js2 = js; je2 = je
isd2 = isd; ied2 = ied; jsd2 = jsd; jed2 = jeg
jsd = jeg+1
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
if(isd < isg .and. ied .GE. isg .and. domain%symmetry) then
isd3 = isd; ied3 = isg-1
jsd3 = jsd; jed3 = jed
is3 = is-ioff; ie3=ie-ioff
js3 = js; je3 = je
isd = isg;
endif
end if
endif
if( jeg .GE. js .AND. jeg .LE. je .AND. jed == jeg .AND. folded_north &
.AND. (position == CORNER .OR. position == NORTH)) then
call fill_overlap_recv_fold(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, dir, ishift, position, ioff, middle)
else
call fill_overlap_recv_nofold(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic, folded)
endif
if(ie3 .GE. is3) call fill_overlap_recv_nofold(overlap, domain, m, is3, ie3, js3, je3, isd3, ied3, jsd3, jed3, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic, folded)
if(ie2 .GE. is2) then
if( jeg .GE. js2 .AND. jeg .LE. je2 .AND. jed2 == jeg .AND. folded_north &
.AND. (position == CORNER .OR. position == NORTH)) then
call fill_overlap_recv_fold(overlap, domain, m, is2, ie2, js2, je2, isd2, ied2, jsd2, jed2, &
isg, ieg, dir, ishift, position, ioff, middle)
else
call fill_overlap_recv_nofold(overlap, domain, m, is2, ie2, js2, je2, isd2, ied2, jsd2, jed2, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic)
endif
endif
else
need_adjust_1 = .true.; need_adjust_2 = .true.; need_adjust_3 = .true.
if( jed.GT.jeg )then
if( domain%y(tMe)%cyclic .AND. je.LT.jsd )then !try cyclic offset
js = js+joff; je = je+joff
need_adjust_1 = .false.
if( isd.LT.isg )then
if( domain%x(tMe)%cyclic .AND. is.GE.ied )then !try cyclic offset
is = is-ioff; ie = ie-ioff
need_adjust_2 = .false.
if(x_cyclic_offset .NE. 0) then
call apply_cyclic_offset(js, je, -x_cyclic_offset, jsg, jegd, nj)
else if(y_cyclic_offset .NE. 0) then
call apply_cyclic_offset(is, ie, y_cyclic_offset, isgd, ieg, ni)
end if
end if
else
call apply_cyclic_offset(is, ie, y_cyclic_offset, isg, ieg, ni)
need_adjust_3 = .false.
end if
else if( folded_north )then
folded = .TRUE.
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
end if
end if
if( need_adjust_3 .AND. isd.LT.isg )then
if( need_adjust_2 .AND. domain%x(tMe)%cyclic .AND. is.GE.ied )then !try cyclic offset
is = is-ioff; ie = ie-ioff
if( need_adjust_1 .AND. jed.LE.jeg )then
call apply_cyclic_offset(js, je, -x_cyclic_offset, jsg, jeg, nj)
end if
end if
end if
call fill_overlap(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, jsg, jeg, dir)
endif
!--- when north edge is folded, is will be less than isg when position is EAST and CORNER
if(is .LT. isg .AND. domain%x(tMe)%cyclic) then
is = is + ioff
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, is, js, je, isd, ied, jsd, jed, dir, folded )
endif
!recv_n
dir = 7
folded = .false.
isd = domain%x(tMe)%compute%begin; ied = domain%x(tMe)%compute%end+ishift
jsd = domain%y(tMe)%compute%end+1+jshift; jed = domain%y(tMe)%compute%end+nhalo+jshift
is=isc; ie=iec; js=jsc; je=jec
!--- when domain symmetry and position is EAST or CORNER, the point at i=isd will
!--- come from two pe ( there will be only one point on one pe. ).
if( domain%symmetry .AND. (position == EAST .OR. position == CORNER ) &
.AND. (isd == ie .or. ied == is ) .AND. (.not. folded_north) ) then
!--- do nothing, this point will come from other pe
else
js2 = -1; je2 = 0
if( jsd .GT. jeg ) then ! jed > jeg
if( domain%y(tMe)%cyclic .AND. je.LT.jsd )then !try cyclic offset
js = js+joff; je = je+joff
call apply_cyclic_offset(is, ie, y_cyclic_offset, isg, ieg, ni)
else if( folded_north )then
folded = .TRUE.
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
end if
else if( jed.GT.jeg )then ! split into two parts
if( domain%y(tMe)%cyclic)then !try cyclic offset
is2 = is; ie2 = ie; js2 = js; je2 = je
isd2 = isd; ied2 = ied; jsd2 = jsd; jed2 = jeg
js = js + joff; je = je + joff
jsd = jeg+1
else if( folded_north )then
folded = .TRUE.
is2 = is; ie2 = ie; js2 = js; je2 = je
isd2 = isd; ied2 = ied; jsd2 = jsd; jed2 = jeg
jsd = jeg+1
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
end if
end if
if(x_cyclic_offset == 0 .and. y_cyclic_offset == 0) then
if( jeg .GE. js .AND. jeg .LE. je .AND. jed == jeg .AND. folded_north &
.AND. (position == CORNER .OR. position == NORTH)) then
call fill_overlap_recv_fold(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, dir, ishift, position, ioff, middle, symmetry=domain%symmetry)
else
call fill_overlap_recv_nofold(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic, folded, symmetry=domain%symmetry)
endif
else
call fill_overlap(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, jsg, jeg, dir, symmetry=domain%symmetry)
endif
if(ie2 .GE. is2) then
if(jeg .GE. js2 .AND. jeg .LE. je2 .AND. jed2 == jeg .AND. folded_north &
.AND. (position == CORNER .OR. position == NORTH)) then
call fill_overlap_recv_fold(overlap, domain, m, is2, ie2, js2, je2, isd2, ied2, jsd2, jed2, &
isg, ieg, dir, ishift, position, ioff, middle, symmetry=domain%symmetry)
else
call fill_overlap_recv_nofold(overlap, domain, m, is2, ie2, js2, je2, isd2, ied2, jsd2, jed2, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic, folded, symmetry=domain%symmetry)
endif
endif
endif
!--- when north edge is folded, ie will be less than isg when position is EAST and CORNER
if(is .LT. isg .AND. domain%x(tMe)%cyclic) then
! is = is + ioff
! call insert_update_overlap( overlap, domain%list(m)%pe, &
! is, is, js, je, isd, ied, jsd, jed, dir, folded)
endif
!--- Now calculate the overlapping for fold-edge. Currently we only consider about folded-north
!--- for folded-north-edge, only need to consider to_pe's north(7) direction
!--- only position at NORTH and CORNER need to be considered
if( folded_north .AND. (position == NORTH .OR. position == CORNER) &
.AND. domain%x(tMe)%pos .GE. size(domain%x(tMe)%list(:))/2) then
if( jed .GE. jeg .AND. ied .GE. middle)then
jsd = jeg; jed = jeg
is=isc; ie=iec; js = jsc; je = jec
isd = max(isd, middle)
select case (position)
case(NORTH)
i=is; is = isg+ieg-ie; ie = isg+ieg-i
case(CORNER)
i=is; is = isg+ieg-ie-1+ishift; ie = isg+ieg-i-1+ishift
end select
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir, .true.)
endif
if(debug_update_level .NE. NO_CHECK .AND. set_check) then
jsd = domain%y(tMe)%compute%end+jshift; jed = jsd
if(jed == jeg) then
is = max(is, isd); ie = min(ie, ied)
js = max(js, jsd); je = min(je, jed)
if(ie.GE.is .AND. je.GE.js )then
nrecv_check = nrecv_check+1
if(nrecv_check > size(checkList(:)) ) then
call expand_check_overlap_list(checkList, nlist)
endif
call allocate_check_overlap(checkList(nrecv_check), 1)
call insert_check_overlap(checkList(nrecv_check), domain%list(m)%pe, &
tMe, 4, ONE_HUNDRED_EIGHTY, is, ie, js, je)
end if
end if
endif
endif
!recv_ne
dir = 8
folded = .false.
isd = domain%x(tMe)%compute%end+1+ishift; ied = domain%x(tMe)%compute%end+ehalo+ishift
jsd = domain%y(tMe)%compute%end+1+jshift; jed = domain%y(tMe)%compute%end+nhalo+jshift
is=isc; ie=iec; js=jsc; je=jec
is2 = 0; ie2=-1; js2=0; je2=-1
is3 = 0; ie3 = -1; js3 = 0; je3 = -1
if(x_cyclic_offset == 0 .AND. y_cyclic_offset == 0) then
js2 = -1; je2 = 0
if( jsd .GT. jeg ) then ! jed > jeg
if( domain%y(tMe)%cyclic .AND. je.LT.jsd )then !try cyclic offset
js = js+joff; je = je+joff
call apply_cyclic_offset(is, ie, y_cyclic_offset, isg, ieg, ni)
else if( folded_north )then
folded = .TRUE.
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
end if
else if( jed.GT.jeg )then ! split into two parts
if( domain%y(tMe)%cyclic)then !try cyclic offset
is2 = is; ie2 = ie; js2 = js; je2 = je
isd2 = isd; ied2 = ied; jsd2 = jsd; jed2 = jeg
js = js + joff; je = je + joff
jsd = jeg+1
else if( folded_north )then
folded = .TRUE.
is2 = is; ie2 = ie; js2 = js; je2 = je
isd2 = isd; ied2 = ied; jsd2 = jsd; jed2 = jeg
jsd = jeg+1
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
if(ied > ieg .and. isd .LE. ieg .and. domain%symmetry) then
isd3 = ieg+1; ied3 = ied
jsd3 = jsd; jed3 = jed
is3 = is+ioff; ie3=ie+ioff
js3 = js; je3 = je
ied = ieg;
endif
end if
endif
if( jeg .GE. js .AND. jeg .LE. je .AND. jed == jeg .AND. folded_north &
.AND. (position == CORNER .OR. position == NORTH)) then
call fill_overlap_recv_fold(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, dir, ishift, position, ioff, middle)
else
call fill_overlap_recv_nofold(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic, folded)
endif
if(ie3 .GE. is3) call fill_overlap_recv_nofold(overlap, domain, m, is3, ie3, js3, je3, isd3, ied3, jsd3, jed3, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic, folded)
if(ie2 .GE. is2) then
if(jeg .GE. js2 .AND. jeg .LE. je2 .AND. jed2 == jeg .AND. folded_north &
.AND. (position == CORNER .OR. position == NORTH)) then
call fill_overlap_recv_fold(overlap, domain, m, is2, ie2, js2, je2, isd2, ied2, jsd2, jed2, &
isg, ieg, dir, ishift, position, ioff, middle)
else
call fill_overlap_recv_nofold(overlap, domain, m, is2, ie2, js2, je2, isd2, ied2, jsd2, jed2, &
isg, ieg, dir, ioff, domain%x(tMe)%cyclic)
endif
endif
else
need_adjust_1 = .true.; need_adjust_2 = .true.; need_adjust_3 = .true.
if( jed.GT.jeg )then
if( domain%y(tMe)%cyclic .AND. je.LT.jsd )then !try cyclic offset
js = js+joff; je = je+joff
need_adjust_1 = .false.
if( ied.GT.ieg )then
if( domain%x(tMe)%cyclic .AND. ie.LT.isd )then !try cyclic offset
is = is+ioff; ie = ie+ioff
need_adjust_2 = .false.
if(x_cyclic_offset .NE. 0) then
call apply_cyclic_offset(js, je, x_cyclic_offset, jsg, jegd, nj)
else if(y_cyclic_offset .NE. 0) then
call apply_cyclic_offset(is, ie, y_cyclic_offset, isg, iegd, ni)
end if
end if
else
call apply_cyclic_offset(is, ie, y_cyclic_offset, isg, ieg, ni)
need_adjust_3 = .false.
end if
else if( folded_north )then
folded = .TRUE.
call get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
end if
end if
if( need_adjust_3 .AND. ied.GT.ieg )then
if( need_adjust_2 .AND. domain%x(tMe)%cyclic .AND. ie.LT.isd )then !try cyclic offset
is = is+ioff; ie = ie+ioff
if( need_adjust_1 .AND. jed.LE.jeg)then
call apply_cyclic_offset(js, je, x_cyclic_offset, jsg, jeg, nj)
end if
end if
end if
call fill_overlap(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, jsg, jeg, dir)
endif
endif
!--- copy the overlapping information
if( overlap%count > 0) then
nrecv = nrecv + 1
if(nrecv > size(overlapList(:)) )then
call mpp_error(NOTE, 'mpp_domains_define.inc(compute_overlaps): overlapList for recv is expanded')
call expand_update_overlap_list(overlapList, nlist)
endif
call add_update_overlap( overlapList(nrecv), overlap)
call init_overlap_type(overlap)
endif
enddo ! end of recv do loop
if(debug_message_passing) then
!--- write out send information
unit = mpp_pe() + 1000
do m =1,nrecv
write(unit, *) "********from_pe = " ,overlapList(m)%pe, " count = ",overlapList(m)%count
do n = 1, overlapList(m)%count
write(unit, *) overlapList(m)%is(n), overlapList(m)%ie(n), overlapList(m)%js(n), overlapList(m)%je(n), &
overlapList(m)%dir(n), overlapList(m)%rotation(n)
enddo
enddo
if(nrecv >0) call flush(unit)
endif
! copy the overlapping information into domain
if(nrecv>0) then
allocate(update%recv(nrecv))
update%nrecv = nrecv
do m = 1, nrecv
call add_update_overlap( update%recv(m), overlapList(m) )
do n = 1, update%recv(m)%count
if(update%recv(m)%tileNbr(n) == domain%tile_id(tMe)) then
if(update%recv(m)%dir(n) == 1) domain%x(tMe)%loffset = 0
if(update%recv(m)%dir(n) == 7) domain%y(tMe)%loffset = 0
endif
enddo
enddo
endif
if(nrecv_check>0) then
check%nrecv = nrecv_check
allocate(check%recv(nrecv_check))
do m = 1, nrecv_check
call add_check_overlap( check%recv(m), checkList(m) )
enddo
endif
call deallocate_overlap_type(overlap)
do m = 1,size(overlapList(:))
call deallocate_overlap_type(overlapList(m))
enddo
if(debug_update_level .NE. NO_CHECK .AND. set_check) then
do m = 1,size(checkList(:))
call deallocate_overlap_type(checkList(m))
enddo
endif
deallocate(overlapList)
if(set_check) deallocate(checkList)
domain%initialized = .true.
end subroutine compute_overlaps
subroutine fill_overlap_send_nofold(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, dir, ioff, is_cyclic, folded, symmetry)
type(overlap_type), intent(inout) :: overlap
type(domain2d), intent(inout) :: domain
integer, intent(in ) :: m, is, ie, js, je
integer, intent(in ) :: isc, iec, jsc, jec
integer, intent(in ) :: isg, ieg, dir, ioff
logical, intent(in ) :: is_cyclic
logical, optional, intent(in ) :: folded, symmetry
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, reverse=folded, symmetry=symmetry)
if(is_cyclic) then
if(ie .GT. ieg) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is-ioff, ie-ioff, js, je, isc, iec, jsc, jec, dir, reverse=folded, symmetry=symmetry)
else if( is .LT. isg ) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is+ioff, ie+ioff, js, je, isc, iec, jsc, jec, dir, reverse=folded, symmetry=symmetry)
endif
endif
end subroutine fill_overlap_send_nofold
!##################################################################################
subroutine fill_overlap_send_fold(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, dir, ishift, position, ioff, middle, symmetry)
type(overlap_type), intent(inout) :: overlap
type(domain2d), intent(inout) :: domain
integer, intent(in ) :: m, is, ie, js, je
integer, intent(in ) :: isc, iec, jsc, jec
integer, intent(in ) :: isg, ieg, dir, ishift, position, ioff, middle
logical, optional, intent(in ) :: symmetry
integer :: is1, ie1, is2, ie2, i
!--- consider at j = jeg for west edge.
!--- when the data is at corner and not symmetry, i = isg -1 will get from cyclic condition
if(position == CORNER .AND. .NOT. domain%symmetry .AND. is .LE. isg-1 .AND. ie .GE. isg-1) then
call insert_update_overlap(overlap, domain%list(m)%pe, &
isg-1+ioff, isg-1+ioff, je, je, isc, iec, jsc, jec, dir, .true.)
end if
is1 = 0; ie1 = -1; is2 = 0; ie2 = -1
!--- east edge
if( is > ieg ) then
is2 = is-ioff; ie2 = ie-ioff
else if( ie > ieg ) then ! split into two parts
is1 = is; ie1 = ieg
is2 = ieg+1-ioff; ie2 = ie-ioff
else if( is .GE. middle ) then
is1 = is; ie1 = ie
else if( ie .GE. middle ) then ! split into two parts
is1 = middle; ie1 = ie
is2 = is; ie2 = middle-1
else if( ie < isg ) then ! west boundary
is1 = is+ieg-isg+1-ishift; ie1 = ie+ieg-isg+1-ishift
else if( is < isg ) then ! split into two parts
is1 = is+ieg-isg+1-ishift; ie1 = isg-1+ieg-isg+1-ishift
is2 = isg; ie2 = ie
else
is2 = is; ie2 = ie
endif
if( ie1 .GE. is1) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is1, ie1, js, je-1, isc, iec, jsc, jec, dir, symmetry=symmetry)
select case (position)
case(NORTH)
i=is1; is1 = isg+ieg-ie1; ie1 = isg+ieg-i
case(CORNER)
i=is1; is1 = isg+ieg-ie1-1+ishift; ie1 = isg+ieg-i-1+ishift
end select
call insert_update_overlap( overlap, domain%list(m)%pe, &
is1, ie1, je, je, isc, iec, jsc, jec, dir, .true., symmetry=symmetry)
endif
if(ie2 .GE. is2) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is2, ie2, js, je, isc, iec, jsc, jec, dir)
endif
end subroutine fill_overlap_send_fold
!#############################################################################
subroutine fill_overlap_recv_nofold(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, dir, ioff, is_cyclic, folded, symmetry)
type(overlap_type), intent(inout) :: overlap
type(domain2d), intent(inout) :: domain
integer, intent(in ) :: m, is, ie, js, je
integer, intent(in ) :: isd, ied, jsd, jed
integer, intent(in ) :: isg, ieg, dir, ioff
logical, intent(in ) :: is_cyclic
logical, optional, intent(in ) :: folded, symmetry
integer :: is1, ie1, is2, ie2
integer :: isd1, ied1, isd2, ied2
is1 = 0; ie1 = -1; is2 = 0; ie2 = -1
isd1=isd; ied1=ied
isd2=isd; ied2=ied
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir, reverse=folded, symmetry=symmetry)
if(is_cyclic) then
if(ied .GT. ieg) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is+ioff, ie+ioff, js, je, isd, ied, jsd, jed, dir, reverse=folded, symmetry=symmetry)
else if( isd .LT. isg ) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is-ioff, ie-ioff, js, je, isd, ied, jsd, jed, dir, reverse=folded, symmetry=symmetry)
else if ( is .LT. isg ) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is+ioff, ie+ioff, js, je, isd, ied, jsd, jed, dir, reverse=folded, symmetry=symmetry)
else if ( ie .GT. ieg ) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is-ioff, ie-ioff, js, je, isd, ied, jsd, jed, dir, reverse=folded, symmetry=symmetry)
endif
endif
end subroutine fill_overlap_recv_nofold
!#################################################################################
subroutine fill_overlap_recv_fold(overlap, domain, m, is, ie, js, je, isd, ied, jsd, jed, &
isg, ieg, dir, ishift, position, ioff, middle, symmetry)
type(overlap_type), intent(inout) :: overlap
type(domain2d), intent(inout) :: domain
integer, intent(in ) :: m, is, ie, js, je
integer, intent(in ) :: isd, ied, jsd, jed
integer, intent(in ) :: isg, ieg, dir, ishift, position, ioff, middle
logical, optional, intent(in ) :: symmetry
integer :: is1, ie1, is2, ie2, is3, ie3
integer :: isd1, ied1, isd2, ied2
!--- consider at j = jeg for west edge.
!--- when the data is at corner and not symmetry, i = isg -1 will get from cyclic condition
if( position == CORNER .AND. .NOT. domain%symmetry .AND. isd .LE. isg-1 .AND. ied .GE. isg-1 ) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is-ioff, ie-ioff, js, je, isg-1, isg-1, jed, jed, dir, .true.)
end if
is1 = 0; ie1 = -1; is2 = 0; ie2 = -1
isd1=isd; ied1=ied
isd2=isd; ied2=ied
select case (position)
case(NORTH)
is3 = isg+ieg-ie; ie3 = isg+ieg-is
case(CORNER)
is3 = isg+ieg-ie-1+ishift; ie3 = isg+ieg-is-1+ishift
end select
if(isd .GT. ieg) then ! east
is2 = is + ioff; ie2 = ie + ioff;
else if(ied .GT. ieg) then ! split into two parts
is1 = is; ie1 = ie;
isd1 = isd; ied1 = ieg;
is2 = is + ioff; ie2 = ie + ioff
isd2 = ieg + 1; ied2 = ied
else if(isd .GE. middle) then
is1 = is; ie1 = ie
else if(ied .GE. middle) then ! split into two parts
is1 = is; ie1 = ie
isd1 = middle; ied1 = ied
is2 = is; ie2 = ie
isd2 = isd; ied2 = middle-1
else if(ied .LT. isg) then
is1 = is - ioff; ie1 = ie - ioff;
is3 = is3 - ioff; ie3 = ie3 - ioff;
else if(isd .LT. isg) then ! split into two parts
is1 = is - ioff; ie1 = ie - ioff;
is3 = is3 - ioff; ie3 = ie3 - ioff;
isd1 = isd; ied1 = isg-1
is2 = is; ie2 = ie
isd2 = isg; ied2 = ied
else
is2 = is ; ie2 =ie
isd2 = isd; ied2 = ied
endif
if( ie1 .GE. is1) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is1, ie1, js, je, isd1, ied1, jsd, jed-1, dir, symmetry=symmetry)
call insert_update_overlap( overlap, domain%list(m)%pe, &
is3, ie3, js, je, isd1, ied1, jed, jed, dir, .true., symmetry=symmetry)
endif
if(ie2 .GE. is2) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is2, ie2, js, je, isd2, ied2, jsd, jed, dir)
endif
end subroutine fill_overlap_recv_fold
!#####################################################################################
subroutine fill_overlap(overlap, domain, m, is, ie, js, je, isc, iec, jsc, jec, &
isg, ieg, jsg, jeg, dir, reverse, symmetry)
type(overlap_type), intent(inout) :: overlap
type(domain2d), intent(inout) :: domain
integer, intent(in ) :: m, is, ie, js, je
integer, intent(in ) :: isc, iec, jsc, jec
integer, intent(in ) :: isg, ieg, jsg, jeg
integer, intent(in ) :: dir
logical, optional, intent(in ) :: reverse, symmetry
if(js > je) then ! seperate into two regions due to x_cyclic_offset is nonzero, the two region are
! (js, jeg) and (jsg, je).
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, jsg, je, isc, iec, jsc, jec, dir, reverse, symmetry)
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, jeg, isc, iec, jsc, jec, dir, reverse, symmetry)
else if(is > ie) then ! seperate into two regions due to y_cyclic_offset is nonzero, the two region are
! (is, ieg) and (isg, ie).
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ieg, js, je, isc, iec, jsc, jec, dir, reverse, symmetry)
call insert_update_overlap( overlap, domain%list(m)%pe, &
isg, ie, js, je, isc, iec, jsc, jec, dir, reverse, symmetry)
else
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, reverse, symmetry)
end if
end subroutine fill_overlap
!####################################################################################
subroutine compute_overlaps_fold_south( domain, position, ishift, jshift)
!computes remote domain overlaps
!assumes only one in each direction
!will calculate the overlapping for T,E,C,N-cell seperately.
type(domain2D), intent(inout) :: domain
integer, intent(in) :: position, ishift, jshift
integer :: i, m, n, nlist, tMe, tNbr, dir
integer :: is, ie, js, je, isc, iec, jsc, jec, isd, ied, jsd, jed
integer :: isg, ieg, jsg, jeg, ioff, joff
integer :: list, middle, ni, nj, isgd, iegd, jsgd, jegd
integer :: ism, iem, jsm, jem, whalo, ehalo, shalo, nhalo
logical :: folded
type(overlap_type) :: overlap
type(overlapSpec), pointer :: update=>NULL()
type(overlap_type), pointer :: overlapList(:)=>NULL()
type(overlap_type), pointer :: checkList(:)=>NULL()
type(overlapSpec), pointer :: check =>NULL()
integer :: nsend, nrecv
integer :: nsend_check, nrecv_check
integer :: unit
!--- since we restrict that if multiple tiles on one pe, all the tiles are limited to this pe.
!--- In this case, if ntiles on this pe is greater than 1, no overlapping between processor within each tile
!--- In this case the overlapping exist only for tMe=1 and tNbr=1
if(size(domain%x(:)) > 1) return
!--- if there is no halo, no need to compute overlaps.
if(domain%whalo==0 .AND. domain%ehalo==0 .AND. domain%shalo==0 .AND. domain%nhalo==0) return
!--- when there is only one tile, n will equal to np
nlist = size(domain%list(:))
select case(position)
case (CENTER)
update => domain%update_T
check => NULL()
case (CORNER)
update => domain%update_C
check => domain%check_C
case (EAST)
update => domain%update_E
check => domain%check_E
case (NORTH)
update => domain%update_N
check => domain%check_N
case default
call mpp_error(FATAL, &
"mpp_domains_define.inc(compute_overlaps_fold_south): the value of position should be CENTER, EAST, CORNER or NORTH")
end select
allocate(overlapList(MAXLIST) )
allocate(checkList(MAXLIST) )
!--- overlap is used to store the overlapping temporarily.
call allocate_update_overlap( overlap, MAXOVERLAP)
!send
call mpp_get_compute_domain( domain, isc, iec, jsc, jec, position=position )
call mpp_get_global_domain ( domain, isg, ieg, jsg, jeg, xsize=ni, ysize=nj, position=position ) !cyclic offsets
call mpp_get_memory_domain ( domain, ism, iem, jsm, jem, position=position )
update%xbegin = ism; update%xend = iem
update%ybegin = jsm; update%yend = jem
if(ASSOCIATED(check)) then
check%xbegin = ism; check%xend = iem
check%ybegin = jsm; check%yend = jem
endif
update%whalo = domain%whalo; update%ehalo = domain%ehalo
update%shalo = domain%shalo; update%nhalo = domain%nhalo
whalo = domain%whalo; ehalo = domain%ehalo
shalo = domain%shalo; nhalo = domain%nhalo
ioff = ni - ishift
joff = nj - jshift
middle = (isg+ieg)/2+1
tMe = 1; tNbr = 1
if(.NOT. BTEST(domain%fold,SOUTH)) then
call mpp_error(FATAL, "mpp_domains_define.inc(compute_overlaps_fold_south): "//&
"boundary condition in y-direction should be folded-south for "//trim(domain%name))
endif
if(.NOT. domain%x(tMe)%cyclic) then
call mpp_error(FATAL, "mpp_domains_define.inc(compute_overlaps_fold_south): "//&
"boundary condition in x-direction should be cyclic for "//trim(domain%name))
endif
if(.not. domain%symmetry) then
call mpp_error(FATAL, "mpp_domains_define.inc(compute_overlaps_fold_south): "//&
"when south boundary is folded, the domain must be symmetry for "//trim(domain%name))
endif
nsend = 0
nsend_check = 0
do list = 0,nlist-1
m = mod( domain%pos+list, nlist )
if(domain%list(m)%tile_id(tNbr) == domain%tile_id(tMe) ) then ! only compute the overlapping within tile.
!to_pe's eastern halo
dir = 1
is = domain%list(m)%x(tNbr)%compute%end+1+ishift; ie = domain%list(m)%x(tNbr)%compute%end+ehalo+ishift
js = domain%list(m)%y(tNbr)%compute%begin; je = domain%list(m)%y(tNbr)%compute%end+jshift
!--- to make sure the consistence between pes
if( (position == NORTH .OR. position == CORNER ) .AND. ( jsc == je .or. jec == js ) ) then
!--- do nothing, this point will come from other pe
else
if( ie.GT.ieg .AND. iec.LT.is )then ! cyclic is assumed
is = is-ioff; ie = ie-ioff
end if
!--- when the south face is folded, the east halo point at right side domain will be folded.
!--- the position should be on CORNER or NORTH
if( js == jsg .AND. (position == CORNER .OR. position == NORTH) &
.AND. is .GE. middle .AND. domain%list(m)%x(tNbr)%compute%end+ehalo+jshift .LE. ieg ) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js+1, je, isc, iec, jsc, jec, dir)
is = domain%list(m)%x(tNbr)%compute%end+1+ishift; ie = domain%list(m)%x(tNbr)%compute%end+ehalo+ishift
je = js
select case (position)
case(NORTH)
i=is; is = isg+ieg-ie; ie = isg+ieg-i
case(CORNER)
i=is; is = isg+ieg-ie-1+ishift; ie = isg+ieg-i-1+ishift
end select
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, .true.)
else
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, symmetry=domain%symmetry)
end if
end if
!to_pe's SE halo
dir = 2
folded = .false.
is = domain%list(m)%x(tNbr)%compute%end+1+ishift; ie = domain%list(m)%x(tNbr)%compute%end+ehalo+ishift
js = domain%list(m)%y(tNbr)%compute%begin-shalo; je = domain%list(m)%y(tNbr)%compute%begin-1
if( ie.GT.ieg .AND. iec.LT.is )then ! cyclic is assumed
is = is-ioff; ie = ie-ioff
end if
if( js.LT.jsg )then
folded = .TRUE.
call get_fold_index_south(isg, ieg, jsg, ishift, position, is, ie, js, je)
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, folded)
!to_pe's southern halo
dir = 3
folded = .FALSE.
is = domain%list(m)%x(tNbr)%compute%begin; ie = domain%list(m)%x(tNbr)%compute%end+ishift
js = domain%list(m)%y(tNbr)%compute%begin-shalo; je = domain%list(m)%y(tNbr)%compute%begin-1
folded = .FALSE.
if( js.LT.jsg )then
folded = .TRUE.
call get_fold_index_south(isg, ieg, jsg, ishift, position, is, ie, js, je)
end if
!--- when domain symmetry and position is EAST or CORNER, the point when isc == ie,
!--- no need to send, because the data on that point will come from other pe.
!--- come from two pe ( there will be only one point on one pe. ).
if( (position == EAST .OR. position == CORNER ) .AND. ( isc == ie .or. iec == is ) ) then
!--- do nothing, this point will come from other pe
else
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, folded, symmetry=domain%symmetry)
endif
!--- when south edge is folded, ie will be less than isg when position is EAST and CORNER
if(is .LT. isg) then
is = is + ioff
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, is, js, je, isc, iec, jsc, jec, dir, folded)
endif
!to_pe's SW halo
dir = 4
folded = .false.
is = domain%list(m)%x(tNbr)%compute%begin-whalo; ie = domain%list(m)%x(tNbr)%compute%begin-1
js = domain%list(m)%y(tNbr)%compute%begin-shalo; je = domain%list(m)%y(tNbr)%compute%begin-1
if( isg.GT.is .AND. ie.LT.isc )then !cyclic offset
is = is+ioff; ie = ie+ioff
end if
if( js.LT.jsg )then
folded = .TRUE.
call get_fold_index_south(isg, ieg, jsg, ishift, position, is, ie, js, je)
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, folded)
!--- when south edge is folded, is will be less than isg when position is EAST and CORNER
if(is .LT. isg) then
is = is + ioff
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, is, js, je, isc, iec, jsc, jec, dir, folded)
endif
!to_pe's western halo
dir = 5
is = domain%list(m)%x(tNbr)%compute%begin-whalo; ie = domain%list(m)%x(tNbr)%compute%begin-1
js = domain%list(m)%y(tNbr)%compute%begin; je = domain%list(m)%y(tNbr)%compute%end+jshift
!--- to make sure the consistence between pes
if( (position == NORTH .OR. position == CORNER ) .AND. ( jsc == je .or. jec == js ) ) then
!--- do nothing, this point will come from other pe
else
if( isg.GT.is .AND. ie.LT.isc )then ! cyclic offset
is = is+ioff; ie = ie+ioff
end if
!--- when the south face is folded, some point at j=nj will be folded.
!--- the position should be on CORNER or NORTH
if( js == jsg .AND. (position == CORNER .OR. position == NORTH) &
.AND. ( domain%list(m)%x(tNbr)%compute%begin == isg .OR. domain%list(m)%x(tNbr)%compute%begin-1 .GE. middle)) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js+1, je, isc, iec, jsc, jec, dir)
is = domain%list(m)%x(tNbr)%compute%begin-whalo; ie = domain%list(m)%x(tNbr)%compute%begin-1
js = domain%list(m)%y(tNbr)%compute%begin; je = js
if ( domain%list(m)%x(tNbr)%compute%begin == isg ) then
select case (position)
case(NORTH)
i=is; is = 2*isg-ie-1; ie = 2*isg-i-1
case(CORNER)
i=is; is = 2*isg-ie-2+2*ishift; ie = 2*isg-i-2+2*ishift
end select
if(ie .GT. domain%x(tMe)%compute%end+ishift) call mpp_error( FATAL, &
'mpp_domains_define.inc(compute_overlaps_fold_south): west edge ubound error send.' )
else
select case (position)
case(NORTH)
i=is; is = isg+ieg-ie; ie = isg+ieg-i
case(CORNER)
i=is; is = isg+ieg-ie-1+ishift; ie = isg+ieg-i-1+ishift
end select
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, .true.)
else
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, symmetry=domain%symmetry)
end if
endif
!to_pe's NW halo
dir = 6
is = domain%list(m)%x(tNbr)%compute%begin-whalo; ie = domain%list(m)%x(tNbr)%compute%begin-1
js = domain%list(m)%y(tNbr)%compute%end+1+jshift; je = domain%list(m)%y(tNbr)%compute%end+nhalo+jshift
if( isg.GT.is .AND. ie.LT.isc )then ! cyclic offset
is = is+ioff; ie = ie+ioff
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir)
!to_pe's northern halo
dir = 7
is = domain%list(m)%x(tNbr)%compute%begin; ie = domain%list(m)%x(tNbr)%compute%end+ishift
js = domain%list(m)%y(tNbr)%compute%end+1+jshift; je = domain%list(m)%y(tNbr)%compute%end+nhalo+jshift
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, symmetry=domain%symmetry)
!to_pe's NE halo
dir = 8
is = domain%list(m)%x(tNbr)%compute%end+1+ishift; ie = domain%list(m)%x(tNbr)%compute%end+ehalo+ishift
js = domain%list(m)%y(tNbr)%compute%end+1+jshift; je = domain%list(m)%y(tNbr)%compute%end+nhalo+jshift
if( ie.GT.ieg .AND. iec.LT.is )then !cyclic offset
is = is-ioff; ie = ie-ioff
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir)
!--- Now calculate the overlapping for fold-edge.
!--- only position at NORTH and CORNER need to be considered
if( ( position == NORTH .OR. position == CORNER) ) then
if( domain%y(tMe)%data%begin .LE. jsg .AND. jsg .LE. domain%y(tMe)%data%end+jshift )then !fold is within domain
dir = 3
!--- calculate the overlapping for sending
if( domain%x(tMe)%pos .LT. (size(domain%x(tMe)%list(:))+1)/2 )then
js = domain%list(m)%y(tNbr)%compute%begin; je = js
if( js == jsg )then ! fold is within domain.
is = domain%list(m)%x(tNbr)%compute%begin; ie = domain%list(m)%x(tNbr)%compute%end+ishift
select case (position)
case(NORTH)
is = max(is, middle)
i=is; is = isg+ieg-ie; ie = isg+ieg-i
case(CORNER)
is = max(is, middle)
i=is; is = isg+ieg-ie-1+ishift; ie = isg+ieg-i-1+ishift
end select
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, .true.)
is = max(is, isc); ie = min(ie, iec)
js = max(js, jsc); je = min(je, jec)
if(debug_update_level .NE. NO_CHECK .AND. ie.GE.is .AND. je.GE.js )then
nsend_check = nsend_check+1
call allocate_check_overlap(checkList(nsend_check), 1)
call insert_check_overlap(checkList(nsend_check), domain%list(m)%pe, &
tMe, 2, ONE_HUNDRED_EIGHTY, is, ie, js, je)
end if
end if
end if
end if
end if
end if
!--- copy the overlapping information
if( overlap%count > 0) then
nsend = nsend + 1
if(nsend > size(overlapList(:)) ) then
call mpp_error(NOTE, 'mpp_domains_define.inc(compute_overlaps_south): overlapList for send is expanded')
call expand_update_overlap_list(overlapList, nlist)
endif
call add_update_overlap(overlapList(nsend), overlap)
call init_overlap_type(overlap)
endif
end do ! end of send set up.
if(debug_message_passing) then
!--- write out send information
unit = mpp_pe() + 1000
do m =1,nsend
write(unit, *) "********to_pe = " ,overlapList(m)%pe, " count = ",overlapList(m)%count
do n = 1, overlapList(m)%count
write(unit, *) overlapList(m)%is(n), overlapList(m)%ie(n), overlapList(m)%js(n), overlapList(m)%je(n), &
overlapList(m)%dir(n), overlapList(m)%rotation(n)
enddo
enddo
if( nsend > 0) call flush(unit)
endif
! copy the overlapping information into domain data structure
if(nsend>0) then
allocate(update%send(nsend))
update%nsend = nsend
do m = 1, nsend
call add_update_overlap( update%send(m), overlapList(m) )
enddo
endif
if(nsend_check>0) then
allocate(check%send(nsend_check))
check%nsend = nsend_check
do m = 1, nsend_check
call add_check_overlap( check%send(m), checkList(m) )
enddo
endif
do m = 1,size(overlapList(:))
call deallocate_overlap_type(overlapList(m))
enddo
if(debug_update_level .NE. NO_CHECK) then
do m = 1,size(checkList(:))
call deallocate_overlap_type(checkList(m))
enddo
endif
isgd = isg - domain%whalo
iegd = ieg + domain%ehalo
jsgd = jsg - domain%shalo
jegd = jeg + domain%nhalo
! begin setting up recv
nrecv = 0
nrecv_check = 0
do list = 0,nlist-1
m = mod( domain%pos+nlist-list, nlist )
if(domain%list(m)%tile_id(tNbr) == domain%tile_id(tMe) ) then ! only compute the overlapping within tile.
isc = domain%list(m)%x(1)%compute%begin; iec = domain%list(m)%x(1)%compute%end+ishift
jsc = domain%list(m)%y(1)%compute%begin; jec = domain%list(m)%y(1)%compute%end+jshift
!recv_e
dir = 1
isd = domain%x(tMe)%compute%end+1+ishift; ied = domain%x(tMe)%data%end+ishift
jsd = domain%y(tMe)%compute%begin; jed = domain%y(tMe)%compute%end+jshift
is=isc; ie=iec; js=jsc; je=jec
if( (position == NORTH .OR. position == CORNER ) .AND. ( jsd == je .or. jed == js ) ) then
! --- do nothing, this point will come from other pe
else
if( ied.GT.ieg .AND. ie.LT.isd )then !cyclic offset
is = is+ioff; ie = ie+ioff
end if
!--- when the south face is folded, the east halo point at right side domain will be folded.
!--- the position should be on CORNER or NORTH
if( jsd == jsg .AND. (position == CORNER .OR. position == NORTH) &
.AND. isd .GE. middle .AND. ied .LE. ieg ) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd+1, jed, dir)
is=isc; ie=iec; js=jsc; je=jec
jed = jsd
select case (position)
case(NORTH)
i=is; is = isg+ieg-ie; ie = isg+ieg-i
case(CORNER)
i=is; is = isg+ieg-ie-1+ishift; ie = isg+ieg-i-1+ishift
end select
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir, .TRUE.)
else
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir, symmetry=domain%symmetry)
end if
end if
!recv_se
dir = 2
folded = .false.
isd = domain%x(tMe)%compute%end+1+ishift; ied = domain%x(tMe)%data%end+ishift
jsd = domain%y(tMe)%data%begin; jed = domain%y(tMe)%compute%begin-1
is=isc; ie=iec; js=jsc; je=jec
if( jsd.LT.jsg )then
folded = .true.
call get_fold_index_south(isg, ieg, jsg, ishift, position, is, ie, js, je)
end if
if( ied.GT.ieg .AND. ie.LT.isd )then !cyclic offset
is = is+ioff; ie = ie+ioff
endif
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir, folded)
!recv_s
dir = 3
folded = .false.
isd = domain%x(tMe)%compute%begin; ied = domain%x(tMe)%compute%end+ishift
jsd = domain%y(tMe)%data%begin; jed = domain%y(tMe)%compute%begin-1
is=isc; ie=iec; js=jsc; je=jec
if( jsd.LT.jsg )then
folded = .true.
call get_fold_index_south(isg, ieg, jsg, ishift, position, is, ie, js, je)
end if
if( (position == EAST .OR. position == CORNER ) .AND. (isd == ie .or. ied == is ) ) then
!--- do nothing, this point will come from other pe
else
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir, folded, symmetry=domain%symmetry)
end if
!--- when south edge is folded, is will be less than isg when position is EAST and CORNER
if(is .LT. isg ) then
is = is + ioff
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, is, js, je, isd, ied, jsd, jed, dir, folded)
endif
!recv_sw
dir = 4
folded = .false.
isd = domain%x(tMe)%data%begin; ied = domain%x(tMe)%compute%begin-1
jsd = domain%y(tMe)%data%begin; jed = domain%y(tMe)%compute%begin-1
is=isc; ie=iec; js=jsc; je=jec
if( jsd.LT.jsg )then
folded = .true.
call get_fold_index_south(isg, ieg, jsg, ishift, position, is, ie, js, je)
end if
if( isd.LT.isg .AND. is.GT.ied ) then ! cyclic offset
is = is-ioff; ie = ie-ioff
end if
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir, folded)
!--- when southth edge is folded, is will be less than isg when position is EAST and CORNER
if(is .LT. isg ) then
is = is + ioff
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, is, js, je, isd, ied, jsd, jed, dir, folded )
endif
!recv_w
dir = 5
isd = domain%x(tMe)%data%begin; ied = domain%x(tMe)%compute%begin-1
jsd = domain%y(tMe)%compute%begin; jed = domain%y(tMe)%compute%end+jshift
is=isc; ie=iec; js=jsc; je=jec
if( (position == NORTH .OR. position == CORNER ) .AND. ( jsd == je .or. jed == js ) ) then
! --- do nothing, this point will come from other pe
else
if( isd.LT.isg .AND. is.GT.ied )then ! cyclic offset
is = is-ioff; ie = ie-ioff
end if
!--- when the south face is folded, some point at j=nj will be folded.
!--- the position should be on CORNER or NORTH
if( jsd == jsg .AND. (position == CORNER .OR. position == NORTH) &
.AND. ( isd < isg .OR. ied .GE. middle ) ) then
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd+1, jed, dir)
is=isc; ie=iec; js=jsc; je=jec
if(isd < isg) then
select case (position)
case(NORTH)
i=is; is = 2*isg-ie-1; ie = 2*isg-i-1
case(CORNER)
ied = ied -1 + ishift
i=is; is = 2*isg-ie-2+2*ishift; ie = 2*isg-i-2+2*ishift
end select
if(ie .GT. domain%x(tMe)%compute%end+ishift) call mpp_error( FATAL, &
'mpp_domains_define.inc(compute_overlaps): west edge ubound error recv.' )
else
select case (position)
case(NORTH)
i=is; is = isg+ieg-ie; ie = isg+ieg-i
case(CORNER)
i=is; is = isg+ieg-ie-1+ishift; ie = isg+ieg-i-1+ishift
end select
end if
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jsd, dir, .TRUE.)
else
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir, symmetry=domain%symmetry)
end if
endif
!recv_nw
dir = 6
isd = domain%x(tMe)%data%begin; ied = domain%x(tMe)%compute%begin-1
jsd = domain%y(tMe)%compute%end+1+jshift; jed = domain%y(tMe)%data%end+jshift
is=isc; ie=iec; js=jsc; je=jec
if( isd.LT.isg .AND. is.GE.ied )then !cyclic offset
is = is-ioff; ie = ie-ioff
endif
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir)
!recv_n
dir = 7
isd = domain%x(tMe)%compute%begin; ied = domain%x(tMe)%compute%end+ishift
jsd = domain%y(tMe)%compute%end+1+jshift; jed = domain%y(tMe)%data%end+jshift
is=isc; ie=iec; js=jsc; je=jec
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir, symmetry=domain%symmetry)
!recv_ne
dir = 8
isd = domain%x(tMe)%compute%end+1+ishift; ied = domain%x(tMe)%data%end+ishift
jsd = domain%y(tMe)%compute%end+1+jshift; jed = domain%y(tMe)%data%end+jshift
is=isc; ie=iec; js=jsc; je=jec
if( ied.GT.ieg .AND. ie.LT.isd )then ! cyclic offset
is = is+ioff; ie = ie+ioff
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir)
!--- Now calculate the overlapping for fold-edge.
!--- for folded-south-edge, only need to consider to_pe's south(3) direction
!--- only position at NORTH and CORNER need to be considered
if( ( position == NORTH .OR. position == CORNER) ) then
if( domain%y(tMe)%data%begin .LE. jsg .AND. jsg .LE. domain%y(tMe)%data%end+jshift )then !fold is within domain
dir = 3
!--- calculating overlapping for receving on north
if( domain%x(tMe)%pos .GE. size(domain%x(tMe)%list(:))/2 )then
jsd = domain%y(tMe)%compute%begin; jed = jsd
if( jsd == jsg )then ! fold is within domain.
isd = domain%x(tMe)%compute%begin; ied = domain%x(tMe)%compute%end+ishift
is=isc; ie=iec; js = jsc; je = jec
select case (position)
case(NORTH)
isd = max(isd, middle)
i=is; is = isg+ieg-ie; ie = isg+ieg-i
case(CORNER)
isd = max(isd, middle)
i=is; is = isg+ieg-ie-1+ishift; ie = isg+ieg-i-1+ishift
end select
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir, .TRUE.)
is = max(is, isd); ie = min(ie, ied)
js = max(js, jsd); je = min(je, jed)
if(debug_update_level .NE. NO_CHECK .AND. ie.GE.is .AND. je.GE.js )then
nrecv_check = nrecv_check+1
call allocate_check_overlap(checkList(nrecv_check), 1)
call insert_check_overlap(checkList(nrecv_check), domain%list(m)%pe, &
tMe, 2, ONE_HUNDRED_EIGHTY, is, ie, js, je)
endif
endif
endif
endif
endif
endif
!--- copy the overlapping information
if( overlap%count > 0) then
nrecv = nrecv + 1
if(nrecv > size(overlapList(:)) )then
call mpp_error(NOTE, 'mpp_domains_define.inc(compute_overlaps_south): overlapList for recv is expanded')
call expand_update_overlap_list(overlapList, nlist)
endif
call add_update_overlap( overlapList(nrecv), overlap)
call init_overlap_type(overlap)
endif
enddo ! end of recv do loop
if(debug_message_passing) then
!--- write out send information
unit = mpp_pe() + 1000
do m =1,nrecv
write(unit, *) "********from_pe = " ,overlapList(m)%pe, " count = ",overlapList(m)%count
do n = 1, overlapList(m)%count
write(unit, *) overlapList(m)%is(n), overlapList(m)%ie(n), overlapList(m)%js(n), overlapList(m)%je(n), &
overlapList(m)%dir(n), overlapList(m)%rotation(n)
enddo
enddo
if(nrecv >0) call flush(unit)
endif
! copy the overlapping information into domain
if(nrecv>0) then
update%nrecv = nrecv
allocate(update%recv(nrecv))
do m = 1, nrecv
call add_update_overlap( update%recv(m), overlapList(m) )
do n = 1, update%recv(m)%count
if(update%recv(m)%tileNbr(n) == domain%tile_id(tMe)) then
if(update%recv(m)%dir(n) == 1) domain%x(tMe)%loffset = 0
if(update%recv(m)%dir(n) == 7) domain%y(tMe)%loffset = 0
endif
enddo
enddo
endif
if(nrecv_check>0) then
check%nrecv = nrecv_check
allocate(check%recv(nrecv_check))
do m = 1, nrecv_check
call add_check_overlap( check%recv(m), checkList(m) )
enddo
endif
call deallocate_overlap_type(overlap)
do m = 1,size(overlapList(:))
call deallocate_overlap_type(overlapList(m))
enddo
if(debug_update_level .NE. NO_CHECK) then
do m = 1,size(checkList(:))
call deallocate_overlap_type(checkList(m))
enddo
endif
deallocate(overlapList)
deallocate(checkList)
update => NULL()
check=>NULL()
domain%initialized = .true.
end subroutine compute_overlaps_fold_south
!####################################################################################
subroutine compute_overlaps_fold_west( domain, position, ishift, jshift)
!computes remote domain overlaps
!assumes only one in each direction
!will calculate the overlapping for T,E,C,N-cell seperately.
type(domain2D), intent(inout) :: domain
integer, intent(in) :: position, ishift, jshift
integer :: j, m, n, nlist, tMe, tNbr, dir
integer :: is, ie, js, je, isc, iec, jsc, jec, isd, ied, jsd, jed
integer :: isg, ieg, jsg, jeg, ioff, joff
integer :: list, middle, ni, nj, isgd, iegd, jsgd, jegd
integer :: ism, iem, jsm, jem, whalo, ehalo, shalo, nhalo
logical :: folded
type(overlap_type) :: overlap
type(overlapSpec), pointer :: update=>NULL()
type(overlap_type) :: overlapList(MAXLIST)
type(overlap_type) :: checkList(MAXLIST)
type(overlapSpec), pointer :: check =>NULL()
integer :: nsend, nrecv
integer :: nsend_check, nrecv_check
integer :: unit
!--- since we restrict that if multiple tiles on one pe, all the tiles are limited to this pe.
!--- In this case, if ntiles on this pe is greater than 1, no overlapping between processor within each tile
!--- In this case the overlapping exist only for tMe=1 and tNbr=1
if(size(domain%x(:)) > 1) return
!--- if there is no halo, no need to compute overlaps.
if(domain%whalo==0 .AND. domain%ehalo==0 .AND. domain%shalo==0 .AND. domain%nhalo==0) return
!--- when there is only one tile, n will equal to np
nlist = size(domain%list(:))
select case(position)
case (CENTER)
update => domain%update_T
check => NULL()
case (CORNER)
update => domain%update_C
check => domain%check_C
case (EAST)
update => domain%update_E
check => domain%check_E
case (NORTH)
update => domain%update_N
check => domain%check_N
case default
call mpp_error(FATAL, &
"mpp_domains_define.inc(compute_overlaps_fold_west): the value of position should be CENTER, EAST, CORNER or NORTH")
end select
!--- overlap is used to store the overlapping temporarily.
call allocate_update_overlap( overlap, MAXOVERLAP)
!send
call mpp_get_compute_domain( domain, isc, iec, jsc, jec, position=position )
call mpp_get_global_domain ( domain, isg, ieg, jsg, jeg, xsize=ni, ysize=nj, position=position ) !cyclic offsets
call mpp_get_memory_domain ( domain, ism, iem, jsm, jem, position=position )
update%xbegin = ism; update%xend = iem
update%ybegin = jsm; update%yend = jem
if(ASSOCIATED(check)) then
check%xbegin = ism; check%xend = iem
check%ybegin = jsm; check%yend = jem
endif
update%whalo = domain%whalo; update%ehalo = domain%ehalo
update%shalo = domain%shalo; update%nhalo = domain%nhalo
whalo = domain%whalo; ehalo = domain%ehalo
shalo = domain%shalo; nhalo = domain%nhalo
ioff = ni - ishift
joff = nj - jshift
middle = (jsg+jeg)/2+1
tMe = 1; tNbr = 1
if(.NOT. BTEST(domain%fold,WEST)) then
call mpp_error(FATAL, "mpp_domains_define.inc(compute_overlaps_fold_west): "//&
"boundary condition in y-direction should be folded-west for "//trim(domain%name))
endif
if(.NOT. domain%y(tMe)%cyclic) then
call mpp_error(FATAL, "mpp_domains_define.inc(compute_overlaps_fold_west): "//&
"boundary condition in y-direction should be cyclic for "//trim(domain%name))
endif
if(.not. domain%symmetry) then
call mpp_error(FATAL, "mpp_domains_define.inc(compute_overlaps_fold_west): "//&
"when west boundary is folded, the domain must be symmetry for "//trim(domain%name))
endif
nsend = 0
nsend_check = 0
do list = 0,nlist-1
m = mod( domain%pos+list, nlist )
if(domain%list(m)%tile_id(tNbr) == domain%tile_id(tMe) ) then ! only compute the overlapping within tile.
!to_pe's eastern halo
dir = 1
is = domain%list(m)%x(tNbr)%compute%end+1+ishift; ie = domain%list(m)%x(tNbr)%compute%end+ehalo+ishift
js = domain%list(m)%y(tNbr)%compute%begin; je = domain%list(m)%y(tNbr)%compute%end+jshift
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, symmetry=domain%symmetry)
!to_pe's SE halo
dir = 2
is = domain%list(m)%x(tNbr)%compute%end+1+ishift; ie = domain%list(m)%x(tNbr)%compute%end+ehalo+ishift
js = domain%list(m)%y(tNbr)%compute%begin-shalo; je = domain%list(m)%y(tNbr)%compute%begin-1
if( js.LT.jsg .AND. jsc.GT.je )then ! cyclic is assumed
js = js+joff; je = je+joff
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir)
!to_pe's southern halo
dir = 3
is = domain%list(m)%x(tNbr)%compute%begin; ie = domain%list(m)%x(tNbr)%compute%end+ishift
js = domain%list(m)%y(tNbr)%compute%begin-shalo; je = domain%list(m)%y(tNbr)%compute%begin-1
!--- to make sure the consistence between pes
if( (position == EAST .OR. position == CORNER ) .AND. ( isc == ie .or. iec == is ) ) then
!--- do nothing, this point will come from other pe
else
if( js.LT.jsg .AND. jsc.GT.je) then ! cyclic offset
js = js+joff; je = je+joff
endif
!--- when the west face is folded, the south halo points at
!--- the position should be on CORNER or EAST
if( is == isg .AND. (position == CORNER .OR. position == EAST) &
.AND. ( domain%list(m)%y(tNbr)%compute%begin == jsg .OR. domain%list(m)%y(tNbr)%compute%begin-1 .GE. middle)) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is+1, ie, js, je, isc, iec, jsc, jec, dir)
is = domain%list(m)%x(tNbr)%compute%begin; ie = is
js = domain%list(m)%y(tNbr)%compute%begin-shalo; je = domain%list(m)%y(tNbr)%compute%begin-1
if ( domain%list(m)%y(tNbr)%compute%begin == jsg ) then
select case (position)
case(EAST)
j=js; js = 2*jsg-je-1; je = 2*jsg-j-1
case(CORNER)
j=js; js = 2*jsg-je-2+2*jshift; je = 2*jsg-j-2+2*jshift
end select
if(je .GT. domain%y(tMe)%compute%end+jshift) call mpp_error( FATAL, &
'mpp_domains_define.inc(compute_overlaps_fold_west: south edge ubound error send.' )
else
select case (position)
case(EAST)
j=js; js = jsg+jeg-je; je = jsg+jeg-j
case(CORNER)
j=js; js = jsg+jeg-je-1+jshift; je = jsg+jeg-j-1+jshift
end select
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, .true.)
else
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, symmetry=domain%symmetry)
end if
endif
!to_pe's SW halo
dir = 4
folded = .false.
is = domain%list(m)%x(tNbr)%compute%begin-whalo; ie = domain%list(m)%x(tNbr)%compute%begin-1
js = domain%list(m)%y(tNbr)%compute%begin-shalo; je = domain%list(m)%y(tNbr)%compute%begin-1
if( jsg.GT.js .AND. je.LT.jsc )then !cyclic offset
js = js+joff; je = je+joff
end if
if( is.LT.isg )then
folded = .TRUE.
call get_fold_index_west(jsg, jeg, isg, jshift, position, is, ie, js, je)
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, folded)
!--- when south edge is folded, js will be less than jsg when position is EAST and CORNER
if(js .LT. jsg) then
js = js + joff
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, js, isc, iec, jsc, jec, dir, folded)
endif
!to_pe's western halo
dir = 5
folded = .FALSE.
is = domain%list(m)%x(tNbr)%compute%begin-whalo; ie = domain%list(m)%x(tNbr)%compute%begin-1
js = domain%list(m)%y(tNbr)%compute%begin; je = domain%list(m)%y(tNbr)%compute%end+jshift
if( isg.GT.is )then
folded = .true.
call get_fold_index_west(jsg, jeg, isg, jshift, position, is, ie, js, je)
end if
!--- when domain symmetry and position is EAST or CORNER, the point when isc == ie,
!--- no need to send, because the data on that point will come from other pe.
!--- come from two pe ( there will be only one point on one pe. ).
if( (position == EAST .OR. position == CORNER ) .AND. ( jsc == je .or. jec == js ) ) then
!--- do nothing, this point will come from other pe
else
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, folded, symmetry=domain%symmetry)
endif
!--- when south edge is folded, ie will be less than isg when position is EAST and CORNER
if(js .LT. jsg) then
js = js + ioff
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, js, isc, iec, jsc, jec, dir, folded)
endif
!to_pe's NW halo
dir = 6
folded = .false.
is = domain%list(m)%x(tNbr)%compute%begin-whalo; ie = domain%list(m)%x(tNbr)%compute%begin-1
js = domain%list(m)%y(tNbr)%compute%end+1+jshift; je = domain%list(m)%y(tNbr)%compute%end+nhalo+jshift
if( je.GT.jeg .AND. jec.LT.js )then ! cyclic offset
js = js-joff; je = je-joff
end if
if( is.LT.isg )then
folded = .TRUE.
call get_fold_index_west(jsg, jeg, isg, jshift, position, is, ie, js, je)
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, folded)
!to_pe's northern halo
dir = 7
is = domain%list(m)%x(tNbr)%compute%begin; ie = domain%list(m)%x(tNbr)%compute%end+ishift
js = domain%list(m)%y(tNbr)%compute%end+1+jshift; je = domain%list(m)%y(tNbr)%compute%end+nhalo+jshift
!--- to make sure the consistence between pes
if( (position == EAST .OR. position == CORNER ) .AND. ( isc == ie .or. iec == is ) ) then
!--- do nothing, this point will come from other pe
else
if( je.GT.jeg .AND. jec.LT.js) then ! cyclic offset
js = js-joff; je = je-joff
endif
!--- when the west face is folded, the south halo points at
!--- the position should be on CORNER or EAST
if( is == isg .AND. (position == CORNER .OR. position == EAST) &
.AND. ( js .GE. middle .AND. domain%list(m)%y(tNbr)%compute%end+nhalo+jshift .LE. jeg ) ) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is+1, ie, js, je, isc, iec, jsc, jec, dir)
is = domain%list(m)%x(tNbr)%compute%begin; ie = is
js = domain%list(m)%y(tNbr)%compute%end+1+jshift; je = domain%list(m)%y(tNbr)%compute%end+nhalo+jshift
select case (position)
case(EAST)
j=js; js = jsg+jeg-je; je = jsg+jeg-j
case(CORNER)
j=js; js = jsg+jeg-je-1+jshift; je = jsg+jeg-j-1+jshift
end select
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, .true.)
else
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, symmetry=domain%symmetry)
end if
endif
!to_pe's NE halo
dir = 8
is = domain%list(m)%x(tNbr)%compute%end+1+ishift; ie = domain%list(m)%x(tNbr)%compute%end+ehalo+ishift
js = domain%list(m)%y(tNbr)%compute%end+1+jshift; je = domain%list(m)%y(tNbr)%compute%end+nhalo+jshift
if( je.GT.jeg .AND. jec.LT.js )then !cyclic offset
js = js-joff; je = je-joff
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir)
!--- Now calculate the overlapping for fold-edge.
!--- only position at EAST and CORNER need to be considered
if( ( position == EAST .OR. position == CORNER) ) then
if( domain%x(tMe)%compute%begin-whalo .LE. isg .AND. isg .LE. domain%x(tMe)%data%end+ishift )then !fold is within domain
dir = 5
!--- calculate the overlapping for sending
if( domain%y(tMe)%pos .LT. (size(domain%y(tMe)%list(:))+1)/2 )then
is = domain%list(m)%x(tNbr)%compute%begin; ie = is
if( is == isg )then ! fold is within domain.
js = domain%list(m)%y(tNbr)%compute%begin; je = domain%list(m)%y(tNbr)%compute%end+jshift
select case (position)
case(EAST)
js = max(js, middle)
j=js; js = jsg+jeg-je; je = jsg+jeg-j
case(CORNER)
js = max(js, middle)
j=js; js = jsg+jeg-je-1+jshift; je = jsg+jeg-j-1+jshift
end select
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, .true.)
is = max(is, isc); ie = min(ie, iec)
js = max(js, jsc); je = min(je, jec)
if(debug_update_level .NE. NO_CHECK .AND. ie.GE.is .AND. je.GE.js )then
nsend_check = nsend_check+1
call allocate_check_overlap(checkList(nsend_check), 1)
call insert_check_overlap(checkList(nsend_check), domain%list(m)%pe, &
tMe, 3, ONE_HUNDRED_EIGHTY, is, ie, js, je)
end if
end if
end if
end if
end if
end if
!--- copy the overlapping information
if( overlap%count > 0) then
nsend = nsend + 1
if(nsend > MAXLIST) call mpp_error(FATAL, &
"mpp_domains_define.inc(compute_overlaps_west): nsend is greater than MAXLIST, increase MAXLIST")
call add_update_overlap(overlapList(nsend), overlap)
call init_overlap_type(overlap)
endif
end do ! end of send set up.
if(debug_message_passing) then
!--- write out send information
unit = mpp_pe() + 1000
do m =1,nsend
write(unit, *) "********to_pe = " ,overlapList(m)%pe, " count = ",overlapList(m)%count
do n = 1, overlapList(m)%count
write(unit, *) overlapList(m)%is(n), overlapList(m)%ie(n), overlapList(m)%js(n), overlapList(m)%je(n), &
overlapList(m)%dir(n), overlapList(m)%rotation(n)
enddo
enddo
if(nsend >0) call flush(unit)
endif
! copy the overlapping information into domain data structure
if(nsend>0) then
update%nsend = nsend
allocate(update%send(nsend))
do m = 1, nsend
call add_update_overlap( update%send(m), overlapList(m) )
enddo
endif
if(nsend_check>0) then
check%nsend = nsend_check
allocate(check%send(nsend_check))
do m = 1, nsend_check
call add_check_overlap( check%send(m), checkList(m) )
enddo
endif
do m = 1, MAXLIST
call deallocate_overlap_type(overlapList(m))
if(debug_update_level .NE. NO_CHECK) call deallocate_overlap_type(checkList(m))
enddo
isgd = isg - domain%whalo
iegd = ieg + domain%ehalo
jsgd = jsg - domain%shalo
jegd = jeg + domain%nhalo
! begin setting up recv
nrecv = 0
nrecv_check = 0
do list = 0,nlist-1
m = mod( domain%pos+nlist-list, nlist )
if(domain%list(m)%tile_id(tNbr) == domain%tile_id(tMe) ) then ! only compute the overlapping within tile.
isc = domain%list(m)%x(1)%compute%begin; iec = domain%list(m)%x(1)%compute%end+ishift
jsc = domain%list(m)%y(1)%compute%begin; jec = domain%list(m)%y(1)%compute%end+jshift
!recv_e
dir = 1
isd = domain%x(tMe)%compute%end+1+ishift; ied = domain%x(tMe)%data%end+ishift
jsd = domain%y(tMe)%compute%begin; jed = domain%y(tMe)%compute%end+jshift
is=isc; ie=iec; js=jsc; je=jec
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir, symmetry=domain%symmetry)
!recv_se
dir = 2
isd = domain%x(tMe)%compute%end+1+ishift; ied = domain%x(tMe)%data%end+ishift
jsd = domain%y(tMe)%data%begin; jed = domain%y(tMe)%compute%begin-1
is=isc; ie=iec; js=jsc; je=jec
if( jsd.LT.jsg .AND. js.GE.jed )then ! cyclic is assumed
js = js-joff; je = je-joff
end if
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir)
!recv_s
dir = 3
folded = .false.
isd = domain%x(tMe)%compute%begin; ied = domain%x(tMe)%compute%end+ishift
jsd = domain%y(tMe)%data%begin; jed = domain%y(tMe)%compute%begin-1
is=isc; ie=iec; js=jsc; je=jec
if( (position == EAST .OR. position == CORNER ) .AND. ( isd == ie .or. ied == is ) ) then
!--- do nothing, this point will come from other pe
else
if( jsd.LT.jsg .AND. js .GT. jed)then
js = js-joff; je = je-joff
end if
!--- when the west face is folded, the south halo points at
!--- the position should be on CORNER or EAST
if( isd == isg .AND. (position == CORNER .OR. position == EAST) &
.AND. ( jsd < jsg .OR. jed .GE. middle ) ) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isd+1, ied, jsd, jed, dir)
is=isc; ie=iec; js=jsc; je=jec
if(jsd 0) then
nrecv = nrecv + 1
if(nrecv > MAXLIST) call mpp_error(FATAL, &
"mpp_domains_define.inc(compute_overlaps_west): nrecv is greater than MAXLIST, increase MAXLIST")
call add_update_overlap( overlapList(nrecv), overlap)
call init_overlap_type(overlap)
endif
enddo ! end of recv do loop
if(debug_message_passing) then
!--- write out send information
unit = mpp_pe() + 1000
do m =1,nrecv
write(unit, *) "********from_pe = " ,overlapList(m)%pe, " count = ",overlapList(m)%count
do n = 1, overlapList(m)%count
write(unit, *) overlapList(m)%is(n), overlapList(m)%ie(n), overlapList(m)%js(n), overlapList(m)%je(n), &
overlapList(m)%dir(n), overlapList(m)%rotation(n)
enddo
enddo
if(nrecv >0) call flush(unit)
endif
! copy the overlapping information into domain
if(nrecv>0) then
update%nrecv = nrecv
allocate(update%recv(nrecv))
do m = 1, nrecv
call add_update_overlap( update%recv(m), overlapList(m) )
do n = 1, update%recv(m)%count
if(update%recv(m)%tileNbr(n) == domain%tile_id(tMe)) then
if(update%recv(m)%dir(n) == 1) domain%x(tMe)%loffset = 0
if(update%recv(m)%dir(n) == 7) domain%y(tMe)%loffset = 0
endif
enddo
enddo
endif
if(nrecv_check>0) then
check%nrecv = nrecv_check
allocate(check%recv(nrecv_check))
do m = 1, nrecv_check
call add_check_overlap( check%recv(m), checkList(m) )
enddo
endif
call deallocate_overlap_type(overlap)
do m = 1, MAXLIST
call deallocate_overlap_type(overlapList(m))
if(debug_update_level .NE. NO_CHECK) call deallocate_overlap_type(checkList(m))
enddo
update=>NULL()
check=>NULL()
domain%initialized = .true.
end subroutine compute_overlaps_fold_west
!###############################################################################
subroutine compute_overlaps_fold_east( domain, position, ishift, jshift )
!computes remote domain overlaps
!assumes only one in each direction
!will calculate the overlapping for T,E,C,N-cell seperately.
!here assume fold-east and y-cyclic boundary condition
type(domain2D), intent(inout) :: domain
integer, intent(in) :: position, ishift, jshift
integer :: j, m, n, nlist, tMe, tNbr, dir
integer :: is, ie, js, je, isc, iec, jsc, jec, isd, ied, jsd
integer :: jed, isg, ieg, jsg, jeg, ioff, joff
integer :: list, middle, ni, nj, isgd, iegd, jsgd, jegd
integer :: ism, iem, jsm, jem, whalo, ehalo, shalo, nhalo
logical :: folded
type(overlap_type) :: overlap
type(overlapSpec), pointer :: update=>NULL()
type(overlap_type) :: overlapList(MAXLIST)
type(overlap_type) :: checkList(MAXLIST)
type(overlapSpec), pointer :: check =>NULL()
integer :: nsend, nrecv
integer :: nsend_check, nrecv_check
!--- since we restrict that if multiple tiles on one pe, all the tiles are limited to this pe.
!--- In this case, if ntiles on this pe is greater than 1, no overlapping between processor within each tile
!--- In this case the overlapping exist only for tMe=1 and tNbr=1
if(size(domain%x(:)) > 1) return
!--- if there is no halo, no need to compute overlaps.
if(domain%whalo==0 .AND. domain%ehalo==0 .AND. domain%shalo==0 .AND. domain%nhalo==0) return
!--- when there is only one tile, n will equal to np
nlist = size(domain%list(:))
select case(position)
case (CENTER)
update => domain%update_T
case (CORNER)
update => domain%update_C
check => domain%check_C
case (EAST)
update => domain%update_E
check => domain%check_E
case (NORTH)
update => domain%update_N
check => domain%check_N
case default
call mpp_error(FATAL, &
"mpp_domains_define.inc(compute_overlaps_fold_east): the value of position should be CENTER, EAST, CORNER or NORTH")
end select
!--- overlap is used to store the overlapping temporarily.
call allocate_update_overlap( overlap, MAXOVERLAP)
!send
call mpp_get_compute_domain( domain, isc, iec, jsc, jec, position=position )
call mpp_get_global_domain ( domain, isg, ieg, jsg, jeg, xsize=ni, ysize=nj, position=position ) !cyclic offsets
call mpp_get_memory_domain ( domain, ism, iem, jsm, jem, position=position )
update%xbegin = ism; update%xend = iem
update%ybegin = jsm; update%yend = jem
if(ASSOCIATED(check)) then
check%xbegin = ism; check%xend = iem
check%ybegin = jsm; check%yend = jem
endif
update%whalo = domain%whalo; update%ehalo = domain%ehalo
update%shalo = domain%shalo; update%nhalo = domain%nhalo
whalo = domain%whalo; ehalo = domain%ehalo
shalo = domain%shalo; nhalo = domain%nhalo
ioff = ni - ishift
joff = nj - jshift
middle = (jsg+jeg)/2+1
tMe = 1; tNbr = 1
if(.NOT. BTEST(domain%fold,EAST)) then
call mpp_error(FATAL, "mpp_domains_define.inc(compute_overlaps_fold_east): "//&
"boundary condition in y-direction should be folded-east for "//trim(domain%name))
endif
if(.NOT. domain%y(tMe)%cyclic) then
call mpp_error(FATAL, "mpp_domains_define.inc(compute_overlaps_fold_east): "//&
"boundary condition in y-direction should be cyclic for "//trim(domain%name))
endif
if(.not. domain%symmetry) then
call mpp_error(FATAL, "mpp_domains_define.inc(compute_overlaps_fold_east): "//&
"when east boundary is folded, the domain must be symmetry for "//trim(domain%name))
endif
nsend = 0
nsend_check = 0
do list = 0,nlist-1
m = mod( domain%pos+list, nlist )
if(domain%list(m)%tile_id(tNbr) == domain%tile_id(tMe) ) then ! only compute the overlapping within tile.
!to_pe's eastern halo
dir = 1
folded = .false.
is = domain%list(m)%x(tNbr)%compute%end+1+ishift; ie = domain%list(m)%x(tNbr)%compute%end+ehalo+ishift
js = domain%list(m)%y(tNbr)%compute%begin; je = domain%list(m)%y(tNbr)%compute%end+jshift
if( ie.GT.ieg )then
folded = .true.
call get_fold_index_east(jsg, jeg, ieg, jshift, position, is, ie, js, je)
end if
!--- when domain symmetry and position is EAST or CORNER, the point when jsc == je,
!--- no need to send, because the data on that point will come from other pe.
!--- come from two pe ( there will be only one point on one pe. ).
if( (position == EAST .OR. position == CORNER ) .AND. ( jsc == je .or. jec == js ) ) then
!--- do nothing, this point will come from other pe
else
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, folded, symmetry=domain%symmetry)
endif
!--- when east edge is folded, js .LT. jsg
if(js .LT. jsg) then
js = js + ioff
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, js, isc, iec, jsc, jec, dir, folded)
endif
!to_pe's SE halo
dir = 2
folded = .FALSE.
is = domain%list(m)%x(tNbr)%compute%end+1+ishift; ie = domain%list(m)%x(tNbr)%compute%end+ehalo+ishift
js = domain%list(m)%y(tNbr)%compute%begin-shalo; je = domain%list(m)%y(tNbr)%compute%begin-1
if( jsg.GT.js .AND. je.LT.jsc )then !try cyclic offset
js = js+joff; je = je+joff
end if
if( ie.GT.ieg )then
folded = .TRUE.
call get_fold_index_east(jsg, jeg, ieg, jshift, position, is, ie, js, je)
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, folded)
!--- when east edge is folded,
if(js .LT. jsg) then
js = js + joff
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, js, isc, iec, jsc, jec, dir, folded)
endif
!to_pe's southern halo
dir = 3
is = domain%list(m)%x(tNbr)%compute%begin; ie = domain%list(m)%x(tNbr)%compute%end+ishift
js = domain%list(m)%y(tNbr)%compute%begin-shalo; je = domain%list(m)%y(tNbr)%compute%begin-1
!--- to make sure the consistence between pes
if( (position == EAST .OR. position == CORNER ) .AND. ( isc == ie .or. iec == is ) ) then
!--- do nothing, this point will come from other pe
else
if( js.LT.jsg .AND. jsc.GT.je) then ! cyclic offset
js = js+joff; je = je+joff
endif
!--- when the east face is folded, the south halo points at
!--- the position should be on CORNER or EAST
if( ie == ieg .AND. (position == CORNER .OR. position == EAST) &
.AND. ( domain%list(m)%y(tNbr)%compute%begin == jsg .OR. &
domain%list(m)%y(tNbr)%compute%begin-1 .GE. middle ) ) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie-1, js, je, isc, iec, jsc, jec, dir)
!--- consider at i = ieg for east edge.
!--- when the data is at corner and not symmetry, j = jsg -1 will get from cyclic condition
if(position == CORNER .AND. .NOT. domain%symmetry .AND. domain%list(m)%y(tNbr)%compute%begin == jsg) then
call insert_update_overlap(overlap, domain%list(m)%pe, &
ie, ie, je, je, isc, iec, jsc, jec, dir, .true.)
end if
ie = domain%list(m)%x(tNbr)%compute%end+ishift; is = ie
js = domain%list(m)%y(tNbr)%compute%begin-shalo; je = domain%list(m)%y(tNbr)%compute%begin-1
if ( domain%list(m)%y(tNbr)%compute%begin == jsg ) then
select case (position)
case(EAST)
j=js; js = 2*jsg-je-1; je = 2*jsg-j-1
case(CORNER)
j=js; js = 2*jsg-je-2+2*jshift; je = 2*jsg-j-2+2*jshift
end select
if(je .GT. domain%y(tMe)%compute%end+jshift) call mpp_error( FATAL, &
'mpp_domains_define.inc(compute_overlaps_fold_east: south edge ubound error send.' )
else
select case (position)
case(EAST)
j=js; js = jsg+jeg-je; je = jsg+jeg-j
case(CORNER)
j=js; js = jsg+jeg-je-1+jshift; je = jsg+jeg-j-1+jshift
end select
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, .true.)
else
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, symmetry=domain%symmetry)
end if
endif
!to_pe's SW halo
dir = 4
is = domain%list(m)%x(tNbr)%compute%begin-whalo; ie = domain%list(m)%x(tNbr)%compute%begin-1
js = domain%list(m)%y(tNbr)%compute%begin-shalo; je = domain%list(m)%y(tNbr)%compute%begin-1
if( js.LT.jsg .AND. jsc.GT.je )then ! cyclic is assumed
js = js+joff; je = je+joff
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir)
!to_pe's western halo
dir = 5
is = domain%list(m)%x(tNbr)%compute%begin-whalo; ie = domain%list(m)%x(tNbr)%compute%begin-1
js = domain%list(m)%y(tNbr)%compute%begin; je = domain%list(m)%y(tNbr)%compute%end+jshift
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, symmetry=domain%symmetry)
!to_pe's NW halo
dir = 6
is = domain%list(m)%x(tNbr)%compute%begin-whalo; ie = domain%list(m)%x(tNbr)%compute%begin-1
js = domain%list(m)%y(tNbr)%compute%end+1+jshift; je = domain%list(m)%y(tNbr)%compute%end+nhalo+jshift
if( je.GT.jeg .AND. jec.LT.js )then !cyclic offset
js = js-joff; je = je-joff
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir)
!to_pe's northern halo
dir = 7
folded = .FALSE.
is = domain%list(m)%x(tNbr)%compute%begin; ie = domain%list(m)%x(tNbr)%compute%end+ishift
js = domain%list(m)%y(tNbr)%compute%end+1+jshift; je = domain%list(m)%y(tNbr)%compute%end+nhalo+jshift
!--- to make sure the consistence between pes
if( (position == EAST .OR. position == CORNER ) .AND. ( isc == ie .or. iec == is ) ) then
!--- do nothing, this point will come from other pe
else
if( je.GT.jeg .AND. jec.LT.js) then ! cyclic offset
js = js-joff; je = je-joff
endif
!--- when the east face is folded, the north halo points at
!--- the position should be on CORNER or EAST
if( ie == ieg .AND. (position == CORNER .OR. position == EAST) &
.AND. ( js .GE. middle .AND. domain%list(m)%y(tNbr)%compute%end+nhalo+jshift .LE. jeg ) ) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie-1, js, je, isc, iec, jsc, jec, dir)
ie = domain%list(m)%x(tNbr)%compute%end+ishift; is = ie
js = domain%list(m)%y(tNbr)%compute%end+1+jshift; je = domain%list(m)%y(tNbr)%compute%end+nhalo+jshift
select case (position)
case(EAST)
j=js; js = jsg+jeg-je; je = jsg+jeg-j
case(CORNER)
j=js; js = jsg+jeg-je-1+jshift; je = jsg+jeg-j-1+jshift
end select
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, .true.)
else
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, symmetry=domain%symmetry)
end if
endif
!to_pe's NE halo
dir = 8
folded = .false.
is = domain%list(m)%x(tNbr)%compute%end+1+ishift; ie = domain%list(m)%x(tNbr)%compute%end+ehalo+ishift
js = domain%list(m)%y(tNbr)%compute%end+1+jshift; je = domain%list(m)%y(tNbr)%compute%end+nhalo+jshift
if( je.GT.jeg .AND. jec.LT.js )then ! cyclic offset
js = js-joff; je = je-joff
end if
if( ie.GT.ieg )then
folded = .TRUE.
call get_fold_index_east(jsg, jeg, ieg, jshift, position, is, ie, js, je)
end if
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, folded)
!--- Now calculate the overlapping for fold-edge.
!--- only position at EAST and CORNER need to be considered
if( ( position == EAST .OR. position == CORNER) ) then
if( domain%x(tMe)%data%begin .LE. ieg .AND. ieg .LE. domain%x(tMe)%data%end+ishift )then !fold is within domain
dir = 1
!--- calculate the overlapping for sending
if( domain%y(tMe)%pos .LT. (size(domain%y(tMe)%list(:))+1)/2 )then
ie = domain%list(m)%x(tNbr)%compute%end+ishift; is = ie
if( ie == ieg )then ! fold is within domain.
js = domain%list(m)%y(tNbr)%compute%begin; je = domain%list(m)%y(tNbr)%compute%end+jshift
select case (position)
case(EAST)
js = max(js, middle)
j=js; js = jsg+jeg-je; je = jsg+jeg-j
case(CORNER)
js = max(js, middle)
j=js; js = jsg+jeg-je-1+jshift; je = jsg+jeg-j-1+jshift
end select
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, je, isc, iec, jsc, jec, dir, .true.)
is = max(is, isc); ie = min(ie, iec)
js = max(js, jsc); je = min(je, jec)
if(debug_update_level .NE. NO_CHECK .AND. ie.GE.is .AND. je.GE.js )then
nsend_check = nsend_check+1
call allocate_check_overlap(checkList(nsend_check), 1)
call insert_check_overlap(checkList(nsend_check), domain%list(m)%pe, &
tMe, 1, ONE_HUNDRED_EIGHTY, is, ie, js, je)
end if
end if
end if
end if
end if
end if
!--- copy the overlapping information
if( overlap%count > 0) then
nsend = nsend + 1
if(nsend > MAXLIST) call mpp_error(FATAL, &
"mpp_domains_define.inc(compute_overlaps_east): nsend is greater than MAXLIST, increase MAXLIST")
call add_update_overlap(overlapList(nsend), overlap)
call init_overlap_type(overlap)
endif
end do ! end of send set up.
! copy the overlapping information into domain data structure
if(nsend>0) then
update%nsend = nsend
allocate(update%send(nsend))
do m = 1, nsend
call add_update_overlap( update%send(m), overlapList(m) )
enddo
endif
if(nsend_check>0) then
check%nsend = nsend_check
allocate(check%send(nsend_check))
do m = 1, nsend_check
call add_check_overlap( check%send(m), checkList(m) )
enddo
endif
do m = 1, MAXLIST
call deallocate_overlap_type(overlapList(m))
if(debug_update_level .NE. NO_CHECK) call deallocate_overlap_type(checkList(m))
enddo
isgd = isg - domain%whalo
iegd = ieg + domain%ehalo
jsgd = jsg - domain%shalo
jegd = jeg + domain%nhalo
! begin setting up recv
nrecv = 0
nrecv_check = 0
do list = 0,nlist-1
m = mod( domain%pos+nlist-list, nlist )
if(domain%list(m)%tile_id(tNbr) == domain%tile_id(tMe) ) then ! only compute the overlapping within tile.
isc = domain%list(m)%x(1)%compute%begin; iec = domain%list(m)%x(1)%compute%end+ishift
jsc = domain%list(m)%y(1)%compute%begin; jec = domain%list(m)%y(1)%compute%end+jshift
!recv_e
dir = 1
folded = .false.
isd = domain%x(tMe)%compute%end+1+ishift; ied = domain%x(tMe)%data%end+ishift
jsd = domain%y(tMe)%compute%begin; jed = domain%y(tMe)%compute%end+jshift
is=isc; ie=iec; js=jsc; je=jec
if( ied.GT.ieg )then
folded = .true.
call get_fold_index_east(jsg, jeg, ieg, jshift, position, is, ie, js, je)
end if
if( (position == EAST .OR. position == CORNER ) .AND. (jsd == je .or. jed == js ) ) then
!--- do nothing, this point will come from other pe
else
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir, folded, symmetry=domain%symmetry)
end if
!--- when west edge is folded, js will be less than jsg when position is EAST and CORNER
if(js .LT. jsg ) then
js = js + joff
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, js, isd, ied, jsd, jed, dir, folded)
endif
!recv_se
dir = 2
folded = .false.
isd = domain%x(tMe)%compute%end+1+ishift; ied = domain%x(tMe)%data%end+ishift
jsd = domain%y(tMe)%data%begin; jed = domain%y(tMe)%compute%begin-1
is=isc; ie=iec; js=jsc; je=jec
if( ied.GT.ieg )then
folded = .true.
call get_fold_index_east(jsg, jeg, ieg, jshift, position, is, ie, js, je)
end if
if( jsd.LT.jsg .AND. js.GT.jed ) then ! cyclic offset
js = js-joff; je = je-joff
end if
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied, jsd, jed, dir, folded)
!--- when west edge is folded, js will be less than jsg when position is EAST and CORNER
if(js .LT. jsg ) then
js = js + joff
call insert_update_overlap(overlap, domain%list(m)%pe, &
is, ie, js, js, isd, ied, jsd, jed, dir, folded )
endif
!recv_s
dir = 3
folded = .false.
isd = domain%x(tMe)%compute%begin; ied = domain%x(tMe)%compute%end+ishift
jsd = domain%y(tMe)%data%begin; jed = domain%y(tMe)%compute%begin-1
is=isc; ie=iec; js=jsc; je=jec
if( (position == EAST .OR. position == CORNER ) .AND. ( isd == ie .or. ied == is ) ) then
!--- do nothing, this point will come from other pe
else
if( jsd.LT.jsg .AND. js .GT. jed)then
js = js-joff; je = je-joff
end if
!--- when the east face is folded, the south halo points at
!--- the position should be on CORNER or EAST
if( ied == ieg .AND. (position == CORNER .OR. position == EAST) &
.AND. ( jsd < jsg .OR. jed .GE. middle ) ) then
call insert_update_overlap( overlap, domain%list(m)%pe, &
is, ie, js, je, isd, ied-1, jsd, jed, dir)
is=isc; ie=iec; js=jsc; je=jec
if(jsd 0) then
nrecv = nrecv + 1
if(nrecv > MAXLIST) call mpp_error(FATAL, &
"mpp_domains_define.inc(compute_overlaps_east): nrecv is greater than MAXLIST, increase MAXLIST")
call add_update_overlap( overlapList(nrecv), overlap)
call init_overlap_type(overlap)
endif
enddo ! end of recv do loop
! copy the overlapping information into domain
if(nrecv>0) then
update%nrecv = nrecv
allocate(update%recv(nrecv))
do m = 1, nrecv
call add_update_overlap( update%recv(m), overlapList(m) )
do n = 1, update%recv(m)%count
if(update%recv(m)%tileNbr(n) == domain%tile_id(tMe)) then
if(update%recv(m)%dir(n) == 1) domain%x(tMe)%loffset = 0
if(update%recv(m)%dir(n) == 7) domain%y(tMe)%loffset = 0
endif
enddo
enddo
endif
if(nrecv_check>0) then
check%nrecv = nrecv_check
allocate(check%recv(nrecv_check))
do m = 1, nrecv_check
call add_check_overlap( check%recv(m), checkList(m) )
enddo
endif
call deallocate_overlap_type(overlap)
do m = 1, MAXLIST
call deallocate_overlap_type(overlapList(m))
if(debug_update_level .NE. NO_CHECK) call deallocate_overlap_type(checkList(m))
enddo
update=>NULL()
check=>NULL()
domain%initialized = .true.
end subroutine compute_overlaps_fold_east
!#####################################################################################
subroutine get_fold_index_west(jsg, jeg, isg, jshift, position, is, ie, js, je)
integer, intent(in) :: jsg, jeg, isg, jshift, position
integer, intent(inout) :: is, ie, js, je
integer :: i, j
select case(position)
case(CENTER)
j=js; js = jsg+jeg-je; je = jsg+jeg-j
i=is; is = 2*isg-ie-1; ie = 2*isg-i-1
case(EAST)
j=js; js = jsg+jeg-je; je = jsg+jeg-j
i=is; is = 2*isg-ie; ie = 2*isg-i
case(NORTH)
j=js; js = jsg+jeg-je-1+jshift; je = jsg+jeg-j-1+jshift
i=is; is = 2*isg-ie-1; ie = 2*isg-i-1
case(CORNER)
j=js; js = jsg+jeg-je-1+jshift; je = jsg+jeg-j-1+jshift
i=is; is = 2*isg-ie; ie = 2*isg-i
end select
end subroutine get_fold_index_west
!#####################################################################################
subroutine get_fold_index_east(jsg, jeg, ieg, jshift, position, is, ie, js, je)
integer, intent(in) :: jsg, jeg, ieg, jshift, position
integer, intent(inout) :: is, ie, js, je
integer :: i, j
select case(position)
case(CENTER)
j=js; js = jsg+jeg-je; je = jsg+jeg-j
i=is; is = 2*ieg-ie+1; ie = 2*ieg-i+1
case(EAST)
j=js; js = jsg+jeg-je; je = jsg+jeg-j
i=is; is = 2*ieg-ie; ie = 2*ieg-i
case(NORTH)
j=js; js = jsg+jeg-je-1+jshift; je = jsg+jeg-j-1+jshift
i=is; is = 2*ieg-ie+1; ie = 2*ieg-i+1
case(CORNER)
j=js; js = jsg+jeg-je-1+jshift; je = jsg+jeg-j-1+jshift
i=is; is = 2*ieg-ie; ie = 2*ieg-i
end select
end subroutine get_fold_index_east
!#####################################################################################
subroutine get_fold_index_south(isg, ieg, jsg, ishift, position, is, ie, js, je)
integer, intent(in) :: isg, ieg, jsg, ishift, position
integer, intent(inout) :: is, ie, js, je
integer :: i, j
select case(position)
case(CENTER)
i=is; is = isg+ieg-ie; ie = isg+ieg-i
j=js; js = 2*jsg-je-1; je = 2*jsg-j-1
case(EAST)
i=is; is = isg+ieg-ie-1+ishift; ie = isg+ieg-i-1+ishift
j=js; js = 2*jsg-je-1; je = 2*jsg-j-1
case(NORTH)
i=is; is = isg+ieg-ie; ie = isg+ieg-i
j=js; js = 2*jsg-je; je = 2*jsg-j
case(CORNER)
i=is; is = isg+ieg-ie-1+ishift; ie = isg+ieg-i-1+ishift
j=js; js = 2*jsg-je; je = 2*jsg-j
end select
end subroutine get_fold_index_south
!#####################################################################################
subroutine get_fold_index_north(isg, ieg, jeg, ishift, position, is, ie, js, je)
integer, intent(in) :: isg, ieg, jeg, ishift, position
integer, intent(inout) :: is, ie, js, je
integer :: i, j
select case(position)
case(CENTER)
i=is; is = isg+ieg-ie; ie = isg+ieg-i
j=js; js = 2*jeg-je+1; je = 2*jeg-j+1
case(EAST)
i=is; is = isg+ieg-ie-1+ishift; ie = isg+ieg-i-1+ishift
j=js; js = 2*jeg-je+1; je = 2*jeg-j+1
case(NORTH)
i=is; is = isg+ieg-ie; ie = isg+ieg-i
j=js; js = 2*jeg-je; je = 2*jeg-j
case(CORNER)
i=is; is = isg+ieg-ie-1+ishift; ie = isg+ieg-i-1+ishift
j=js; js = 2*jeg-je; je = 2*jeg-j
end select
end subroutine get_fold_index_north
!#####################################################################################
! add offset to the index
subroutine apply_cyclic_offset(lstart, lend, offset, gstart, gend, gsize)
integer, intent(inout) :: lstart, lend
integer, intent(in ) :: offset, gstart, gend, gsize
lstart = lstart + offset
if(lstart > gend) lstart = lstart - gsize
if(lstart < gstart) lstart = lstart + gsize
lend = lend + offset
if(lend > gend) lend = lend - gsize
if(lend < gstart) lend = lend + gsize
return
end subroutine apply_cyclic_offset
!###################################################################################
! this routine setup the overlapping for mpp_update_domains for arbitrary halo update.
! should be the halo size defined in mpp_define_domains.
! xhalo_out, yhalo_out should not be exactly the same as xhalo_in, yhalo_in
! currently we didn't consider about tripolar grid situation, because in the folded north
! region, the overlapping is specified through list of points, not through rectangular.
! But will return back to solve this problem in the future.
subroutine set_overlaps(domain, overlap_in, overlap_out, whalo_out, ehalo_out, shalo_out, nhalo_out)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: overlap_in
type(overlapSpec), intent(inout) :: overlap_out
integer, intent(in) :: whalo_out, ehalo_out, shalo_out, nhalo_out
integer :: nlist, m, n, isoff, ieoff, jsoff, jeoff, rotation
integer :: whalo_in, ehalo_in, shalo_in, nhalo_in
integer :: dir
type(overlap_type) :: overlap
type(overlap_type), allocatable :: send(:), recv(:)
type(overlap_type), pointer :: ptrIn => NULL()
integer :: nsend, nrecv, nsend_in, nrecv_in
if( domain%fold .NE. 0) call mpp_error(FATAL, &
"mpp_domains_define.inc(set_overlaps): folded domain is not implemented for arbitrary halo update, contact developer")
whalo_in = domain%whalo
ehalo_in = domain%ehalo
shalo_in = domain%shalo
nhalo_in = domain%nhalo
if( .NOT. domain%initialized) call mpp_error(FATAL, &
"mpp_domains_define.inc: domain is not defined yet")
nlist = size(domain%list(:))
isoff = whalo_in - abs(whalo_out)
ieoff = ehalo_in - abs(ehalo_out)
jsoff = shalo_in - abs(shalo_out)
jeoff = nhalo_in - abs(nhalo_out)
nsend = 0
nsend_in = overlap_in%nsend
nrecv_in = overlap_in%nrecv
if(nsend_in>0) allocate(send(nsend_in))
if(nrecv_in>0) allocate(recv(nrecv_in))
call allocate_update_overlap(overlap, MAXOVERLAP)
overlap_out%whalo = whalo_out
overlap_out%ehalo = ehalo_out
overlap_out%shalo = shalo_out
overlap_out%nhalo = nhalo_out
overlap_out%xbegin = overlap_in%xbegin
overlap_out%xend = overlap_in%xend
overlap_out%ybegin = overlap_in%ybegin
overlap_out%yend = overlap_in%yend
!--- setting up overlap.
do m = 1, nsend_in
ptrIn => overlap_in%send(m)
if(ptrIn%count .LE. 0) call mpp_error(FATAL, &
"mpp_domains_define.inc(set_overlaps): number of overlap for send should be a positive number for"//trim(domain%name) )
do n = 1, ptrIn%count
dir = ptrIn%dir(n)
rotation = ptrIn%rotation(n)
select case(dir)
case(1) ! to_pe's eastern halo
if(ehalo_out > 0) then
call set_single_overlap(ptrIn, overlap, 0, -ieoff, 0, 0, n, dir, rotation)
else if(ehalo_out<0) then
call set_single_overlap(ptrIn, overlap, -ehalo_out, 0, 0, 0, n, dir, rotation)
end if
case(2) ! to_pe's southeast halo
if(ehalo_out>0 .AND. shalo_out > 0) then
call set_single_overlap(ptrIn, overlap, 0, -ieoff, jsoff, 0, n, dir, rotation)
else if(ehalo_out<0 .AND. shalo_out < 0) then ! three parts: southeast, south and east.
call set_single_overlap(ptrIn, overlap, -ehalo_out, 0, 0, shalo_out, n, dir, rotation)
call set_single_overlap(ptrIn, overlap, -ehalo_out, 0, jsoff, 0, n, dir-1, rotation)
call set_single_overlap(ptrIn, overlap, 0, -ieoff, 0, shalo_out, n, dir+1, rotation)
end if
case(3) ! to_pe's southern halo
if(shalo_out > 0) then
call set_single_overlap(ptrIn, overlap, 0, 0, jsoff, 0, n, dir, rotation)
else if(shalo_out<0) then
call set_single_overlap(ptrIn, overlap, 0, 0, 0, shalo_out, n, dir, rotation)
end if
case(4) ! to_pe's southwest halo
if(whalo_out>0 .AND. shalo_out > 0) then
call set_single_overlap(ptrIn, overlap, isoff, 0, jsoff, 0, n, dir, rotation)
else if(whalo_out<0 .AND. shalo_out < 0) then
call set_single_overlap(ptrIn, overlap, 0, whalo_out, 0, shalo_out, n, dir, rotation)
call set_single_overlap(ptrIn, overlap, isoff, 0, 0, shalo_out, n, dir-1, rotation)
call set_single_overlap(ptrIn, overlap, 0, whalo_out, jsoff, 0, n, dir+1, rotation)
end if
case(5) ! to_pe's western halo
if(whalo_out > 0) then
call set_single_overlap(ptrIn, overlap, isoff, 0, 0, 0, n, dir, rotation)
else if(whalo_out<0) then
call set_single_overlap(ptrIn, overlap, 0, whalo_out, 0, 0, n, dir, rotation)
end if
case(6) ! to_pe's northwest halo
if(whalo_out>0 .AND. nhalo_out > 0) then
call set_single_overlap(ptrIn, overlap, isoff, 0, 0, -jeoff, n, dir, rotation)
else if(whalo_out<0 .AND. nhalo_out < 0) then
call set_single_overlap(ptrIn, overlap, 0, whalo_out, -nhalo_out, 0, n, dir, rotation)
call set_single_overlap(ptrIn, overlap, 0, whalo_out, 0, -jeoff, n, dir-1, rotation)
call set_single_overlap(ptrIn, overlap, isoff, 0, -nhalo_out, 0, n, dir+1, rotation)
end if
case(7) ! to_pe's northern halo
if(nhalo_out > 0) then
call set_single_overlap(ptrIn, overlap, 0, 0, 0, -jeoff, n, dir, rotation)
else if(nhalo_out<0) then
call set_single_overlap(ptrIn, overlap, 0, 0, -nhalo_out, 0, n, dir, rotation)
end if
case(8) ! to_pe's northeast halo
if(ehalo_out>0 .AND. nhalo_out > 0) then
call set_single_overlap(ptrIn, overlap, 0, -ieoff, 0, -jeoff, n, dir, rotation)
else if(ehalo_out<0 .AND. nhalo_out < 0) then
call set_single_overlap(ptrIn, overlap, -ehalo_out, 0, -nhalo_out, 0, n, dir, rotation)
call set_single_overlap(ptrIn, overlap, 0, -ieoff, -nhalo_out, 0, n, dir-1, rotation)
call set_single_overlap(ptrIn, overlap, -ehalo_out, 0, 0, -jeoff, n, 1, rotation)
end if
end select
end do ! do n = 1, ptrIn%count
if(overlap%count>0) then
nsend = nsend+1
call add_update_overlap(send(nsend), overlap)
call init_overlap_type(overlap)
endif
end do ! end do list = 0, nlist-1
if(nsend>0) then
overlap_out%nsend = nsend
allocate(overlap_out%send(nsend));
do n = 1, nsend
call add_update_overlap(overlap_out%send(n), send(n) )
enddo
else
overlap_out%nsend = 0
endif
!--------------------------------------------------
! recving
!---------------------------------------------------
overlap%count = 0
nrecv = 0
do m = 1, nrecv_in
ptrIn => overlap_in%recv(m)
if(ptrIn%count .LE. 0) call mpp_error(FATAL, &
"mpp_domains_define.inc(set_overlaps): number of overlap for recv should be a positive number")
overlap%count = 0
do n = 1, ptrIn%count
dir = ptrIn%dir(n)
rotation = ptrIn%rotation(n)
select case(dir)
case(1) ! eastern halo
if(ehalo_out > 0) then
call set_single_overlap(ptrIn, overlap, 0, -ieoff, 0, 0, n, dir)
else if(ehalo_out<0) then
call set_single_overlap(ptrIn, overlap, -ehalo_out, 0, 0, 0, n, dir)
end if
case(2) ! southeast halo
if(ehalo_out>0 .AND. shalo_out > 0) then
call set_single_overlap(ptrIn, overlap, 0, -ieoff, jsoff, 0, n, dir)
else if(ehalo_out<0 .AND. shalo_out < 0) then
call set_single_overlap(ptrIn, overlap, -ehalo_out, 0, 0, shalo_out, n, dir)
call set_single_overlap(ptrIn, overlap, -ehalo_out, 0, jsoff, 0, n, dir-1)
call set_single_overlap(ptrIn, overlap, 0, -ieoff, 0, shalo_out, n, dir+1)
end if
case(3) ! southern halo
if(shalo_out > 0) then
call set_single_overlap(ptrIn, overlap, 0, 0, jsoff, 0, n, dir)
else if(shalo_out<0) then
call set_single_overlap(ptrIn, overlap, 0, 0, 0, shalo_out, n, dir)
end if
case(4) ! southwest halo
if(whalo_out>0 .AND. shalo_out > 0) then
call set_single_overlap(ptrIn, overlap, isoff, 0, jsoff, 0, n, dir)
else if(whalo_out<0 .AND. shalo_out < 0) then
call set_single_overlap(ptrIn, overlap, 0, whalo_out, 0, shalo_out, n, dir)
call set_single_overlap(ptrIn, overlap, isoff, 0, 0, shalo_out, n, dir-1)
call set_single_overlap(ptrIn, overlap, 0, whalo_out, jsoff, 0, n, dir+1)
end if
case(5) ! western halo
if(whalo_out > 0) then
call set_single_overlap(ptrIn, overlap, isoff, 0, 0, 0, n, dir)
else if(whalo_out<0) then
call set_single_overlap(ptrIn, overlap, 0, whalo_out, 0, 0, n, dir)
end if
case(6) ! northwest halo
if(whalo_out>0 .AND. nhalo_out > 0) then
call set_single_overlap(ptrIn, overlap, isoff, 0, 0, -jeoff, n, dir)
else if(whalo_out<0 .AND. nhalo_out < 0) then
call set_single_overlap(ptrIn, overlap, 0, whalo_out, -nhalo_out, 0, n, dir)
call set_single_overlap(ptrIn, overlap, 0, whalo_out, 0, -jeoff, n, dir-1)
call set_single_overlap(ptrIn, overlap, isoff, 0, -nhalo_out, 0, n, dir+1)
end if
case(7) ! northern halo
if(nhalo_out > 0) then
call set_single_overlap(ptrIn, overlap, 0, 0, 0, -jeoff, n, dir)
else if(nhalo_out<0) then
call set_single_overlap(ptrIn, overlap, 0, 0, -nhalo_out, 0, n, dir)
end if
case(8) ! northeast halo
if(ehalo_out>0 .AND. nhalo_out > 0) then
call set_single_overlap(ptrIn, overlap, 0, -ieoff, 0, -jeoff, n, dir)
else if(ehalo_out<0 .AND. nhalo_out < 0) then
call set_single_overlap(ptrIn, overlap, -ehalo_out, 0, -nhalo_out, 0, n, dir)
call set_single_overlap(ptrIn, overlap, 0, -ieoff, -nhalo_out, 0, n, dir-1)
call set_single_overlap(ptrIn, overlap, -ehalo_out, 0, 0, -jeoff, n, 1)
end if
end select
end do ! do n = 1, ptrIn%count
if(overlap%count>0) then
nrecv = nrecv+1
call add_update_overlap(recv(nrecv), overlap)
call init_overlap_type(overlap)
endif
end do ! end do list = 0, nlist-1
if(nrecv>0) then
overlap_out%nrecv = nrecv
allocate(overlap_out%recv(nrecv));
do n = 1, nrecv
call add_update_overlap(overlap_out%recv(n), recv(n) )
enddo
else
overlap_out%nrecv = 0
endif
call deallocate_overlap_type(overlap)
do n = 1, nsend_in
call deallocate_overlap_type(send(n))
enddo
do n = 1, nrecv_in
call deallocate_overlap_type(recv(n))
enddo
if(allocated(send)) deallocate(send)
if(allocated(recv)) deallocate(recv)
ptrIn => NULL()
call set_domain_comm_inf(overlap_out)
end subroutine set_overlaps
!##############################################################################
subroutine set_single_overlap(overlap_in, overlap_out, isoff, ieoff, jsoff, jeoff, index, dir, rotation)
type(overlap_type), intent(in) :: overlap_in
type(overlap_type), intent(inout) :: overlap_out
integer, intent(in) :: isoff, jsoff, ieoff, jeoff
integer, intent(in) :: index
integer, intent(in) :: dir
integer, optional, intent(in) :: rotation
integer :: rotate
integer :: count
if( overlap_out%pe == NULL_PE ) then
overlap_out%pe = overlap_in%pe
else
if(overlap_out%pe .NE. overlap_in%pe) call mpp_error(FATAL, &
"mpp_domains_define.inc(set_single_overlap): mismatch of pe between overlap_in and overlap_out")
endif
if(isoff .NE. 0 .and. ieoff .NE. 0) call mpp_error(FATAL, &
"mpp_domains_define.inc(set_single_overlap): both isoff and ieoff are non-zero")
if(jsoff .NE. 0 .and. jeoff .NE. 0) call mpp_error(FATAL, &
"mpp_domains_define.inc(set_single_overlap): both jsoff and jeoff are non-zero")
overlap_out%count = overlap_out%count + 1
count = overlap_out%count
if(count > MAXOVERLAP) call mpp_error(FATAL, &
"set_single_overlap: number of overlap is greater than MAXOVERLAP, increase MAXOVERLAP")
rotate = ZERO
if(present(rotation)) rotate = rotation
overlap_out%rotation (count) = overlap_in%rotation(index)
overlap_out%dir (count) = dir
overlap_out%tileMe (count) = overlap_in%tileMe(index)
overlap_out%tileNbr (count) = overlap_in%tileNbr(index)
select case(rotate)
case(ZERO)
overlap_out%is(count) = overlap_in%is(index) + isoff
overlap_out%ie(count) = overlap_in%ie(index) + ieoff
overlap_out%js(count) = overlap_in%js(index) + jsoff
overlap_out%je(count) = overlap_in%je(index) + jeoff
case(NINETY)
overlap_out%is(count) = overlap_in%is(index) - jeoff
overlap_out%ie(count) = overlap_in%ie(index) - jsoff
overlap_out%js(count) = overlap_in%js(index) + isoff
overlap_out%je(count) = overlap_in%je(index) + ieoff
case(MINUS_NINETY)
overlap_out%is(count) = overlap_in%is(index) + jsoff
overlap_out%ie(count) = overlap_in%ie(index) + jeoff
overlap_out%js(count) = overlap_in%js(index) - ieoff
overlap_out%je(count) = overlap_in%je(index) - isoff
case default
call mpp_error(FATAL, "mpp_domains_define.inc: the value of rotation should be ZERO, NINETY or MINUS_NINETY")
end select
end subroutine set_single_overlap
!###################################################################################
!--- compute the overlapping between tiles for the T-cell.
subroutine define_contact_point( domain, position, num_contact, tile1, tile2, align1, align2, &
refine1, refine2, istart1, iend1, jstart1, jend1, istart2, iend2, jstart2, jend2, &
isgList, iegList, jsgList, jegList )
type(domain2D), intent(inout) :: domain
integer, intent(in) :: position
integer, intent(in) :: num_contact ! number of contact regions
integer, dimension(:), intent(in) :: tile1, tile2 ! tile number
integer, dimension(:), intent(in) :: align1, align2 ! align direction of contact region
real, dimension(:), intent(in) :: refine1, refine2 ! refinement between tiles
integer, dimension(:), intent(in) :: istart1, iend1 ! i-index in tile_1 of contact region
integer, dimension(:), intent(in) :: jstart1, jend1 ! j-index in tile_1 of contact region
integer, dimension(:), intent(in) :: istart2, iend2 ! i-index in tile_2 of contact region
integer, dimension(:), intent(in) :: jstart2, jend2 ! j-index in tile_2 of contact region
integer, dimension(:), intent(in) :: isgList, iegList ! i-global domain of each tile
integer, dimension(:), intent(in) :: jsgList, jegList ! j-global domain of each tile
integer :: isc, iec, jsc, jec, isd, ied, jsd, jed
integer :: isc1, iec1, jsc1, jec1, isc2, iec2, jsc2, jec2
integer :: isd1, ied1, jsd1, jed1, isd2, ied2, jsd2, jed2
integer :: is, ie, js, je, ioff, joff, isoff, ieoff, jsoff, jeoff
integer :: ntiles, max_contact
integer :: nlist, list, m, n, l, count, numS, numR
integer :: whalo, ehalo, shalo, nhalo
integer :: t1, t2, tt, pos
integer :: ntileMe, ntileNbr, tMe, tNbr, tileMe, dir
integer :: nxd, nyd, nxc, nyc, ism, iem, jsm, jem
integer :: dirlist(8)
!--- is2Send and is1Send will figure out the overlapping for sending from current pe.
!--- is1Recv and iscREcv will figure out the overlapping for recving onto current pe.
integer, dimension(4*num_contact) :: is1Send, ie1Send, js1Send, je1Send
integer, dimension(4*num_contact) :: is2Send, ie2Send, js2Send, je2Send
integer, dimension(4*num_contact) :: is2Recv, ie2Recv, js2Recv, je2Recv
integer, dimension(4*num_contact) :: is1Recv, ie1Recv, js1Recv, je1Recv
integer, dimension(4*num_contact) :: align1Recv, align2Recv, align1Send, align2Send
real, dimension(4*num_contact) :: refineRecv, refineSend
integer, dimension(4*num_contact) :: rotateSend, rotateRecv, tileSend, tileRecv
integer :: nsend, nrecv, nsend2, nrecv2
type(contact_type), dimension(domain%ntiles) :: eCont, wCont, sCont, nCont
type(overlap_type), dimension(0:size(domain%list(:))-1) :: overlapSend, overlapRecv
integer :: unit
if( position .NE. CENTER ) call mpp_error(FATAL, "mpp_domains_define.inc: " //&
"routine define_contact_point can only be used to calculate overlapping for cell center.")
ntiles = domain%ntiles
eCont(:)%ncontact = 0
do n = 1, ntiles
eCont(n)%ncontact = 0; sCont(n)%ncontact = 0; wCont(n)%ncontact = 0; nCont(n)%ncontact = 0;
allocate(eCont(n)%tile(num_contact), wCont(n)%tile(num_contact) )
allocate(nCont(n)%tile(num_contact), sCont(n)%tile(num_contact) )
allocate(eCont(n)%align1(num_contact), eCont(n)%align2(num_contact) )
allocate(wCont(n)%align1(num_contact), wCont(n)%align2(num_contact) )
allocate(sCont(n)%align1(num_contact), sCont(n)%align2(num_contact) )
allocate(nCont(n)%align1(num_contact), nCont(n)%align2(num_contact) )
allocate(eCont(n)%refine1(num_contact), eCont(n)%refine2(num_contact) )
allocate(wCont(n)%refine1(num_contact), wCont(n)%refine2(num_contact) )
allocate(sCont(n)%refine1(num_contact), sCont(n)%refine2(num_contact) )
allocate(nCont(n)%refine1(num_contact), nCont(n)%refine2(num_contact) )
allocate(eCont(n)%is1(num_contact), eCont(n)%ie1(num_contact), eCont(n)%js1(num_contact), eCont(n)%je1(num_contact))
allocate(eCont(n)%is2(num_contact), eCont(n)%ie2(num_contact), eCont(n)%js2(num_contact), eCont(n)%je2(num_contact))
allocate(wCont(n)%is1(num_contact), wCont(n)%ie1(num_contact), wCont(n)%js1(num_contact), wCont(n)%je1(num_contact))
allocate(wCont(n)%is2(num_contact), wCont(n)%ie2(num_contact), wCont(n)%js2(num_contact), wCont(n)%je2(num_contact))
allocate(sCont(n)%is1(num_contact), sCont(n)%ie1(num_contact), sCont(n)%js1(num_contact), sCont(n)%je1(num_contact))
allocate(sCont(n)%is2(num_contact), sCont(n)%ie2(num_contact), sCont(n)%js2(num_contact), sCont(n)%je2(num_contact))
allocate(nCont(n)%is1(num_contact), nCont(n)%ie1(num_contact), nCont(n)%js1(num_contact), nCont(n)%je1(num_contact))
allocate(nCont(n)%is2(num_contact), nCont(n)%ie2(num_contact), nCont(n)%js2(num_contact), nCont(n)%je2(num_contact))
end do
!--- set up the east, south, west and north contact for each tile.
do n = 1, num_contact
t1 = tile1(n)
t2 = tile2(n)
select case(align1(n))
case (EAST)
call fill_contact( eCont(t1), t2, istart1(n), iend1(n), jstart1(n), jend1(n), istart2(n), iend2(n), &
jstart2(n), jend2(n), align1(n), align2(n), refine1(n), refine2(n))
case (WEST)
call fill_contact( wCont(t1), t2, istart1(n), iend1(n), jstart1(n), jend1(n), istart2(n), iend2(n), &
jstart2(n), jend2(n), align1(n), align2(n), refine1(n), refine2(n))
case (SOUTH)
call fill_contact( sCont(t1), t2, istart1(n), iend1(n), jstart1(n), jend1(n), istart2(n), iend2(n), &
jstart2(n), jend2(n), align1(n), align2(n), refine1(n), refine2(n))
case (NORTH)
call fill_contact( nCont(t1), t2, istart1(n), iend1(n), jstart1(n), jend1(n), istart2(n), iend2(n), &
jstart2(n), jend2(n), align1(n), align2(n), refine1(n), refine2(n))
end select
select case(align2(n))
case (EAST)
call fill_contact( eCont(t2), t1, istart2(n), iend2(n), jstart2(n), jend2(n), istart1(n), iend1(n), &
jstart1(n), jend1(n), align2(n), align1(n), refine2(n), refine1(n))
case (WEST)
call fill_contact( wCont(t2), t1, istart2(n), iend2(n), jstart2(n), jend2(n), istart1(n), iend1(n), &
jstart1(n), jend1(n), align2(n), align1(n), refine2(n), refine1(n))
case (SOUTH)
call fill_contact( sCont(t2), t1, istart2(n), iend2(n), jstart2(n), jend2(n), istart1(n), iend1(n), &
jstart1(n), jend1(n), align2(n), align1(n), refine2(n), refine1(n))
case (NORTH)
call fill_contact( nCont(t2), t1, istart2(n), iend2(n), jstart2(n), jend2(n), istart1(n), iend1(n), &
jstart1(n), jend1(n), align2(n), align1(n), refine2(n), refine1(n))
end select
end do
!--- the tile number of current pe, halo size
whalo = domain%whalo
ehalo = domain%ehalo
shalo = domain%shalo
nhalo = domain%nhalo
!--- find if there is an extra point in x and y direction depending on position
nlist = size(domain%list(:))
max_contact = 4*num_contact ! should be enough
ntileMe = size(domain%x(:))
refineSend = 1; refineRecv = 1
!--------------------------------------------------------------------------------------------------
! loop over each tile on current domain to set up the overlapping for each tile
!--------------------------------------------------------------------------------------------------
!--- first check the overlap within the tiles.
do n = 1, domain%update_T%nsend
pos = domain%update_T%send(n)%pe - mpp_root_pe()
call add_update_overlap(overlapSend(pos), domain%update_T%send(n) )
enddo
do n = 1, domain%update_T%nrecv
pos = domain%update_T%recv(n)%pe - mpp_root_pe()
call add_update_overlap(overlapRecv(pos), domain%update_T%recv(n) )
enddo
call mpp_get_memory_domain(domain, ism, iem, jsm, jem)
domain%update_T%xbegin = ism; domain%update_T%xend = iem
domain%update_T%ybegin = jsm; domain%update_T%yend = jem
domain%update_T%whalo = whalo; domain%update_T%ehalo = ehalo
domain%update_T%shalo = shalo; domain%update_T%nhalo = nhalo
do tMe = 1, ntileMe
tileMe = domain%tile_id(tMe)
rotateSend = ZERO; rotateRecv = ZERO
!--- loop over all the contact region to figure out the index for overlapping region.
count = 0
do n = 1, eCont(tileMe)%ncontact ! east contact
count = count+1
tileRecv(count) = eCont(tileMe)%tile(n); tileSend(count) = eCont(tileMe)%tile(n)
align1Recv(count) = eCont(tileMe)%align1(n); align2Recv(count) = eCont(tileMe)%align2(n)
align1Send(count) = eCont(tileMe)%align1(n); align2Send(count) = eCont(tileMe)%align2(n)
refineSend(count) = eCont(tileMe)%refine2(n); refineRecv(count) = eCont(tileMe)%refine1(n)
is1Recv(count) = eCont(tileMe)%is1(n) + 1; ie1Recv(count) = is1Recv(count) + ehalo - 1
js1Recv(count) = eCont(tileMe)%js1(n); je1Recv(count) = eCont(tileMe)%je1(n)
select case(eCont(tileMe)%align2(n))
case ( WEST ) ! w <-> e
is2Recv(count) = eCont(tileMe)%is2(n); ie2Recv(count) = is2Recv(count) + ehalo - 1
js2Recv(count) = eCont(tileMe)%js2(n); je2Recv(count) = eCont(tileMe)%je2(n)
ie1Send(count) = eCont(tileMe)%is1(n); is1Send(count) = ie1Send(count) - whalo + 1
js1Send(count) = eCont(tileMe)%js1(n); je1Send(count) = eCont(tileMe)%je1(n)
ie2Send(count) = eCont(tileMe)%is2(n) - 1; is2Send(count) = ie2Send(count) - whalo + 1
js2Send(count) = eCont(tileMe)%js2(n); je2Send(count) = eCont(tileMe)%je2(n)
case ( SOUTH ) ! s <-> e
rotateRecv(count) = NINETY; rotateSend(count) = MINUS_NINETY
js2Recv(count) = eCont(tileMe)%js2(n); je2Recv(count) = js2Recv(count) + ehalo -1
is2Recv(count) = eCont(tileMe)%is2(n); ie2Recv(count) = eCont(tileMe)%ie2(n)
ie1Send(count) = eCont(tileMe)%is1(n); is1Send(count) = ie1Send(count) - shalo + 1
js1Send(count) = eCont(tileMe)%js1(n); je1Send(count) = eCont(tileMe)%je1(n)
is2Send(count) = eCont(tileMe)%is2(n); ie2Send(count) = eCont(tileMe)%ie2(n)
je2Send(count) = eCont(tileMe)%js2(n) - 1; js2Send(count) = je2Send(count) - shalo + 1
end select
end do
do n = 1, sCont(tileMe)%ncontact ! south contact
count = count+1
tileRecv(count) = sCont(tileMe)%tile(n); tileSend(count) = sCont(tileMe)%tile(n)
align1Recv(count) = sCont(tileMe)%align1(n); align2Recv(count) = sCont(tileMe)%align2(n);
align1Send(count) = sCont(tileMe)%align1(n); align2Send(count) = sCont(tileMe)%align2(n);
refineSend(count) = sCont(tileMe)%refine2(n); refineRecv(count) = sCont(tileMe)%refine1(n)
is1Recv(count) = sCont(tileMe)%is1(n); ie1Recv(count) = sCont(tileMe)%ie1(n)
je1Recv(count) = sCont(tileMe)%js1(n) - 1; js1Recv(count) = je1Recv(count) - shalo + 1
select case(sCont(tileMe)%align2(n))
case ( NORTH ) ! n <-> s
is2Recv(count) = sCont(tileMe)%is2(n); ie2Recv(count) = sCont(tileMe)%ie2(n)
je2Recv(count) = sCont(tileMe)%je2(n); js2Recv(count) = je2Recv(count) - shalo + 1
is1Send(count) = sCont(tileMe)%is1(n); ie1Send(count) = sCont(tileMe)%ie1(n)
js1Send(count) = sCont(tileMe)%js1(n); je1Send(count) = js1Send(count) + nhalo -1
is2Send(count) = sCont(tileMe)%is2(n); ie2Send(count) = sCont(tileMe)%ie2(n)
js2Send(count) = sCont(tileMe)%je2(n)+1; je2Send(count) = js2Send(count) + nhalo - 1
case ( EAST ) ! e <-> s
rotateRecv(count) = MINUS_NINETY; rotateSend(count) = NINETY
ie2Recv(count) = sCont(tileMe)%ie2(n); is2Recv(count) = ie2Recv(count) - shalo + 1
js2Recv(count) = sCont(tileMe)%js2(n); je2Recv(count) = sCont(tileMe)%je2(n)
is1Send(count) = sCont(tileMe)%is1(n); ie1Send(count) = sCont(tileMe)%ie1(n)
js1Send(count) = sCont(tileMe)%js1(n); je1Send(count) = js1Send(count) + ehalo - 1
is2Send(count) = sCont(tileMe)%ie2(n)+1; ie2Send(count) = is2Send(count) + ehalo - 1
js2Send(count) = sCont(tileMe)%js2(n); je2Send(count) = sCont(tileMe)%je2(n)
end select
end do
do n = 1, wCont(tileMe)%ncontact ! west contact
count = count+1
tileRecv(count) = wCont(tileMe)%tile(n); tileSend(count) = wCont(tileMe)%tile(n)
align1Recv(count) = wCont(tileMe)%align1(n); align2Recv(count) = wCont(tileMe)%align2(n);
align1Send(count) = wCont(tileMe)%align1(n); align2Send(count) = wCont(tileMe)%align2(n);
refineSend(count) = wCont(tileMe)%refine2(n); refineRecv(count) = wCont(tileMe)%refine1(n)
ie1Recv(count) = wCont(tileMe)%is1(n) - 1; is1Recv(count) = ie1Recv(count) - whalo + 1
js1Recv(count) = wCont(tileMe)%js1(n); je1Recv(count) = wCont(tileMe)%je1(n)
select case(wCont(tileMe)%align2(n))
case ( EAST ) ! e <-> w
ie2Recv(count) = wCont(tileMe)%ie2(n); is2Recv(count) = ie2Recv(count) - whalo + 1
js2Recv(count) = wCont(tileMe)%js2(n); je2Recv(count) = wCont(tileMe)%je2(n)
is1Send(count) = wCont(tileMe)%is1(n); ie1Send(count) = is1Send(count) + ehalo - 1
js1Send(count) = wCont(tileMe)%js1(n); je1Send(count) = wCont(tileMe)%je1(n)
is2Send(count) = wCont(tileMe)%ie2(n)+1; ie2Send(count) = is2Send(count) + ehalo - 1
js2Send(count) = wCont(tileMe)%js2(n); je2Send(count) = wCont(tileMe)%je2(n)
case ( NORTH ) ! n <-> w
rotateRecv(count) = NINETY; rotateSend(count) = MINUS_NINETY
je2Recv(count) = wCont(tileMe)%je2(n); js2Recv(count) = je2Recv(count) - whalo + 1
is2Recv(count) = wCont(tileMe)%is2(n); ie2Recv(count) = wCont(tileMe)%ie2(n)
is1Send(count) = wCont(tileMe)%is1(n); ie1Send(count) = is1Send(count) + nhalo - 1
js1Send(count) = wCont(tileMe)%js1(n); je1Send(count) = wCont(tileMe)%je1(n)
js2Send(count) = wCont(tileMe)%je2(n)+1; je2Send(count) = js2Send(count) + nhalo - 1
is2Send(count) = wCont(tileMe)%is2(n); ie2Send(count) = wCont(tileMe)%ie2(n)
end select
end do
do n = 1, nCont(tileMe)%ncontact ! north contact
count = count+1
tileRecv(count) = nCont(tileMe)%tile(n); tileSend(count) = nCont(tileMe)%tile(n)
align1Recv(count) = nCont(tileMe)%align1(n); align2Recv(count) = nCont(tileMe)%align2(n);
align1Send(count) = nCont(tileMe)%align1(n); align2Send(count) = nCont(tileMe)%align2(n);
refineSend(count) = nCont(tileMe)%refine2(n); refineRecv(count) = nCont(tileMe)%refine1(n)
is1Recv(count) = nCont(tileMe)%is1(n); ie1Recv(count) = nCont(tileMe)%ie1(n)
js1Recv(count) = nCont(tileMe)%je1(n)+1; je1Recv(count) = js1Recv(count) + nhalo - 1
select case(nCont(tileMe)%align2(n))
case ( SOUTH ) ! s <-> n
is2Recv(count) = nCont(tileMe)%is2(n); ie2Recv(count) = nCont(tileMe)%ie2(n)
js2Recv(count) = nCont(tileMe)%js2(n); je2Recv(count) = js2Recv(count) + nhalo - 1
is1Send(count) = nCont(tileMe)%is1(n); ie1Send(count) = nCont(tileMe)%ie1(n)
je1Send(count) = nCont(tileMe)%je1(n); js1Send(count) = je1Send(count) - shalo + 1
is2Send(count) = nCont(tileMe)%is2(n); ie2Send(count) = nCont(tileMe)%ie2(n)
je2Send(count) = nCont(tileMe)%js2(n)-1; js2Send(count) = je2Send(count) - shalo + 1
case ( WEST ) ! w <-> n
rotateRecv(count) = MINUS_NINETY; rotateSend(count) = NINETY
is2Recv(count) = nCont(tileMe)%ie2(n); ie2Recv(count) = is2Recv(count) + nhalo - 1
js2Recv(count) = nCont(tileMe)%js2(n); je2Recv(count) = nCont(tileMe)%je2(n)
is1Send(count) = nCont(tileMe)%is1(n); ie1Send(count) = nCont(tileMe)%ie1(n)
je1Send(count) = nCont(tileMe)%je1(n); js1Send(count) = je1Send(count) - whalo + 1
ie2Send(count) = nCont(tileMe)%is2(n)-1; is2Send(count) = ie2Send(count) - whalo + 1
js2Send(count) = nCont(tileMe)%js2(n); je2Send(count) = nCont(tileMe)%je2(n)
end select
end do
numS = count
numR = count
!--- figure out the index for corner overlapping,
!--- fill_corner_contact will be updated to deal with the situation that there are multiple tiles on
!--- each side of six sides of cubic grid.
if(.NOT. domain%rotated_ninety) then
call fill_corner_contact(eCont, sCont, wCont, nCont, isgList, iegList, jsgList, jegList, numR, numS, &
tileRecv, tileSend, is1Recv, ie1Recv, js1Recv, je1Recv, is2Recv, ie2Recv, &
js2Recv, je2Recv, is1Send, ie1Send, js1Send, je1Send, is2Send, ie2Send, &
js2Send, je2Send, align1Recv, align2Recv, align1Send, align2Send, &
whalo, ehalo, shalo, nhalo, tileMe )
end if
isc = domain%x(tMe)%compute%begin; iec = domain%x(tMe)%compute%end
jsc = domain%y(tMe)%compute%begin; jec = domain%y(tMe)%compute%end
!--- compute the overlapping for send.
do n = 1, numS
do list = 0, nlist-1
m = mod( domain%pos+list, nlist )
ntileNbr = size(domain%list(m)%x(:))
do tNbr = 1, ntileNbr
if( domain%list(m)%tile_id(tNbr) .NE. tileSend(n) ) cycle
isc1 = max(isc, is1Send(n)); iec1 = min(iec, ie1Send(n))
jsc1 = max(jsc, js1Send(n)); jec1 = min(jec, je1Send(n))
if( isc1 > iec1 .OR. jsc1 > jec1 ) cycle
!--- loop over 8 direction to get the overlapping starting from east with clockwise.
do dir = 1, 8
!--- get the to_pe's data domain.
select case ( dir )
case ( 1 ) ! eastern halo
if( align2Send(n) .NE. EAST ) cycle
isd = domain%list(m)%x(tNbr)%compute%end+1; ied = domain%list(m)%x(tNbr)%compute%end+ehalo
jsd = domain%list(m)%y(tNbr)%compute%begin; jed = domain%list(m)%y(tNbr)%compute%end
case ( 2 ) ! southeast halo
isd = domain%list(m)%x(tNbr)%compute%end+1; ied = domain%list(m)%x(tNbr)%compute%end+ehalo
jsd = domain%list(m)%y(tNbr)%compute%begin-shalo; jed = domain%list(m)%y(tNbr)%compute%begin-1
case ( 3 ) ! southern halo
if( align2Send(n) .NE. SOUTH ) cycle
isd = domain%list(m)%x(tNbr)%compute%begin; ied = domain%list(m)%x(tNbr)%compute%end
jsd = domain%list(m)%y(tNbr)%compute%begin-shalo; jed = domain%list(m)%y(tNbr)%compute%begin-1
case ( 4 ) ! southwest halo
isd = domain%list(m)%x(tNbr)%compute%begin-whalo; ied = domain%list(m)%x(tNbr)%compute%begin-1
jsd = domain%list(m)%y(tNbr)%compute%begin-shalo; jed = domain%list(m)%y(tNbr)%compute%begin-1
case ( 5 ) ! western halo
if( align2Send(n) .NE. WEST ) cycle
isd = domain%list(m)%x(tNbr)%compute%begin-whalo; ied = domain%list(m)%x(tNbr)%compute%begin-1
jsd = domain%list(m)%y(tNbr)%compute%begin; jed = domain%list(m)%y(tNbr)%compute%end
case ( 6 ) ! northwest halo
isd = domain%list(m)%x(tNbr)%compute%begin-whalo; ied = domain%list(m)%x(tNbr)%compute%begin-1
jsd = domain%list(m)%y(tNbr)%compute%end+1; jed = domain%list(m)%y(tNbr)%compute%end+nhalo
case ( 7 ) ! northern halo
if( align2Send(n) .NE. NORTH ) cycle
isd = domain%list(m)%x(tNbr)%compute%begin; ied = domain%list(m)%x(tNbr)%compute%end
jsd = domain%list(m)%y(tNbr)%compute%end+1; jed = domain%list(m)%y(tNbr)%compute%end+nhalo
case ( 8 ) ! northeast halo
isd = domain%list(m)%x(tNbr)%compute%end+1; ied = domain%list(m)%x(tNbr)%compute%end+ehalo
jsd = domain%list(m)%y(tNbr)%compute%end+1; jed = domain%list(m)%y(tNbr)%compute%end+nhalo
end select
isd = max(isd, is2Send(n)); ied = min(ied, ie2Send(n))
jsd = max(jsd, js2Send(n)); jed = min(jed, je2Send(n))
if( isd > ied .OR. jsd > jed ) cycle
ioff = 0; joff = 0
nxd = ied - isd + 1
nyd = jed - jsd + 1
select case ( align2Send(n) )
case ( WEST, EAST )
ioff = isd - is2Send(n)
joff = jsd - js2Send(n)
case ( SOUTH, NORTH )
ioff = isd - is2Send(n)
joff = jsd - js2Send(n)
end select
!--- get the index in current pe.
select case ( rotateSend(n) )
case ( ZERO )
isc2 = is1Send(n) + ioff; iec2 = isc2 + nxd - 1
jsc2 = js1Send(n) + joff; jec2 = jsc2 + nyd - 1
case ( NINETY ) ! N -> W or S -> E
iec2 = ie1Send(n) - joff; isc2 = iec2 - nyd + 1
jsc2 = js1Send(n) + ioff; jec2 = jsc2 + nxd - 1
case ( MINUS_NINETY ) ! W -> N or E -> S
isc2 = is1Send(n) + joff; iec2 = isc2 + nyd - 1
jec2 = je1Send(n) - ioff; jsc2 = jec2 - nxd + 1
end select
is = max(isc1,isc2); ie = min(iec1,iec2)
js = max(jsc1,jsc2); je = min(jec1,jec2)
if(ie.GE.is .AND. je.GE.js )then
if(.not. associated(overlapSend(m)%tileMe)) call allocate_update_overlap(overlapSend(m), MAXOVERLAP)
call insert_overlap_type(overlapSend(m), domain%list(m)%pe, tMe, tNbr, &
is, ie, js, je, dir, rotateSend(n), .true. )
endif
end do ! end do dir = 1, 8
end do ! end do tNbr = 1, ntileNbr
end do ! end do list = 0, nlist-1
end do ! end do n = 1, numS
!--- compute the overlapping for recv.
do n = 1, numR
do list = 0, nlist-1
m = mod( domain%pos+nlist-list, nlist )
ntileNbr = size(domain%list(m)%x(:))
do tNbr = 1, ntileNbr
if( domain%list(m)%tile_id(tNbr) .NE. tileRecv(n) ) cycle
isc = domain%list(m)%x(tNbr)%compute%begin; iec = domain%list(m)%x(tNbr)%compute%end
jsc = domain%list(m)%y(tNbr)%compute%begin; jec = domain%list(m)%y(tNbr)%compute%end
isc = max(isc, is2Recv(n)); iec = min(iec, ie2Recv(n))
jsc = max(jsc, js2Recv(n)); jec = min(jec, je2Recv(n))
if( isc > iec .OR. jsc > jec ) cycle
!--- find the offset for this overlapping.
ioff = 0; joff = 0
nxc = iec - isc + 1; nyc = jec - jsc + 1
select case ( align2Recv(n) )
case ( WEST, EAST )
if(align2Recv(n) == WEST) then
ioff = isc - is2Recv(n)
else
ioff = ie2Recv(n) - iec
endif
joff = jsc - js2Recv(n)
case ( NORTH, SOUTH )
ioff = isc - is2Recv(n)
if(align2Recv(n) == SOUTH) then
joff = jsc - js2Recv(n)
else
joff = je2Recv(n) - jec
endif
end select
!--- get the index in current pe.
select case ( rotateRecv(n) )
case ( ZERO )
isd1 = is1Recv(n) + ioff; ied1 = isd1 + nxc - 1
jsd1 = js1Recv(n) + joff; jed1 = jsd1 + nyc - 1
if( align1Recv(n) == WEST ) then
ied1 = ie1Recv(n)-ioff; isd1 = ied1 - nxc + 1
endif
if( align1Recv(n) == SOUTH ) then
jed1 = je1Recv(n)-joff; jsd1 = jed1 - nyc + 1
endif
case ( NINETY ) ! N -> W or S -> E
if( align1Recv(n) == WEST ) then
ied1 = ie1Recv(n)-joff; isd1 = ied1 - nyc + 1
else
isd1 = is1Recv(n)+joff; ied1 = isd1 + nyc - 1
endif
jed1 = je1Recv(n) - ioff; jsd1 = jed1 - nxc + 1
case ( MINUS_NINETY ) ! W -> N or E -> S
ied1 = ie1Recv(n) - joff; isd1 = ied1 - nyc + 1
if( align1Recv(n) == SOUTH ) then
jed1 = je1Recv(n)-ioff; jsd1 = jed1 - nxc + 1
else
jsd1 = js1Recv(n)+ioff; jed1 = jsd1 + nxc - 1
endif
end select
!--- loop over 8 direction to get the overlapping starting from east with clockwise.
do dir = 1, 8
select case ( dir )
case ( 1 ) ! eastern halo
if( align1Recv(n) .NE. EAST ) cycle
isd2 = domain%x(tMe)%compute%end+1; ied2 = domain%x(tMe)%data%end
jsd2 = domain%y(tMe)%compute%begin; jed2 = domain%y(tMe)%compute%end
case ( 2 ) ! southeast halo
isd2 = domain%x(tMe)%compute%end+1; ied2 = domain%x(tMe)%data%end
jsd2 = domain%y(tMe)%data%begin; jed2 = domain%y(tMe)%compute%begin-1
case ( 3 ) ! southern halo
if( align1Recv(n) .NE. SOUTH ) cycle
isd2 = domain%x(tMe)%compute%begin; ied2 = domain%x(tMe)%compute%end
jsd2 = domain%y(tMe)%data%begin; jed2 = domain%y(tMe)%compute%begin-1
case ( 4 ) ! southwest halo
isd2 = domain%x(tMe)%data%begin; ied2 = domain%x(tMe)%compute%begin-1
jsd2 = domain%y(tMe)%data%begin; jed2 = domain%y(tMe)%compute%begin-1
case ( 5 ) ! western halo
if( align1Recv(n) .NE. WEST ) cycle
isd2 = domain%x(tMe)%data%begin; ied2 = domain%x(tMe)%compute%begin-1
jsd2 = domain%y(tMe)%compute%begin; jed2 = domain%y(tMe)%compute%end
case ( 6 ) ! northwest halo
isd2 = domain%x(tMe)%data%begin; ied2 = domain%x(tMe)%compute%begin-1
jsd2 = domain%y(tMe)%compute%end+1; jed2 = domain%y(tMe)%data%end
case ( 7 ) ! northern halo
if( align1Recv(n) .NE. NORTH ) cycle
isd2 = domain%x(tMe)%compute%begin; ied2 = domain%x(tMe)%compute%end
jsd2 = domain%y(tMe)%compute%end+1; jed2 = domain%y(tMe)%data%end
case ( 8 ) ! northeast halo
isd2 = domain%x(tMe)%compute%end+1; ied2 = domain%x(tMe)%data%end
jsd2 = domain%y(tMe)%compute%end+1; jed2 = domain%y(tMe)%data%end
end select
is = max(isd1,isd2); ie = min(ied1,ied2)
js = max(jsd1,jsd2); je = min(jed1,jed2)
if(ie.GE.is .AND. je.GE.js )then
if(.not. associated(overlapRecv(m)%tileMe)) call allocate_update_overlap(overlapRecv(m), MAXOVERLAP)
call insert_overlap_type(overlapRecv(m), domain%list(m)%pe, tMe, tNbr, &
is, ie, js, je, dir, rotateRecv(n), .true.)
count = overlapRecv(m)%count
endif
end do ! end do dir = 1, 8
end do ! end do tNbr = 1, ntileNbr
end do ! end do list = 0, nlist-1
end do ! end do n = 1, numR
end do ! end do tMe = 1, ntileMe
!--- copy the overlapping information into domain data
nsend = 0; nsend2 = 0
do list = 0, nlist-1
m = mod( domain%pos+list, nlist )
if(overlapSend(m)%count>0) nsend = nsend + 1
enddo
if(debug_message_passing) then
!--- write out send information
unit = mpp_pe() + 1000
do list = 0, nlist-1
m = mod( domain%pos+list, nlist )
if(overlapSend(m)%count==0) cycle
write(unit, *) "********to_pe = " ,overlapSend(m)%pe, " count = ",overlapSend(m)%count
do n = 1, overlapSend(m)%count
write(unit, *) overlapSend(m)%is(n), overlapSend(m)%ie(n), overlapSend(m)%js(n), overlapSend(m)%je(n), &
overlapSend(m)%dir(n), overlapSend(m)%rotation(n)
enddo
enddo
if(nsend >0) call flush(unit)
endif
dirlist(1) = 1; dirlist(2) = 3; dirlist(3) = 5; dirlist(4) = 7
dirlist(5) = 2; dirlist(6) = 4; dirlist(7) = 6; dirlist(8) = 8
! copy the overlap information into domain.
if(nsend >0) then
if(associated(domain%update_T%send)) then
do m = 1, domain%update_T%nsend
call deallocate_overlap_type(domain%update_T%send(m))
enddo
deallocate(domain%update_T%send)
endif
domain%update_T%nsend = nsend
allocate(domain%update_T%send(nsend))
do list = 0, nlist-1
m = mod( domain%pos+list, nlist )
ntileNbr = size(domain%list(m)%x(:))
!--- for the send, the list should be in tileNbr order and dir order to be consistent with Recv
if(overlapSend(m)%count > 0) then
nsend2 = nsend2+1
if(nsend2>nsend) call mpp_error(FATAL, &
"mpp_domains_define.inc(define_contact_point): nsend2 is greater than nsend")
call allocate_update_overlap(domain%update_T%send(nsend2), overlapSend(m)%count)
do tNbr = 1, ntileNbr
do tt = 1, ntileMe
if(domain%list(m)%pe == domain%pe) then ! own processor
tMe = tNbr+tt-1
if(tMe > ntileMe) tMe = tMe - ntileMe
else
tMe = tt
end if
do n = 1, 8 ! loop over 8 direction
do l = 1, overlapSend(m)%count
if(overlapSend(m)%tileMe(l) .NE. tMe) cycle
if(overlapSend(m)%tileNbr(l) .NE. tNbr) cycle
if(overlapSend(m)%dir(l) .NE. dirlist(n) ) cycle
call insert_overlap_type(domain%update_T%send(nsend2), overlapSend(m)%pe, &
overlapSend(m)%tileMe(l), overlapSend(m)%tileNbr(l), overlapSend(m)%is(l), overlapSend(m)%ie(l), &
overlapSend(m)%js(l), overlapSend(m)%je(l), overlapSend(m)%dir(l), overlapSend(m)%rotation(l), &
overlapSend(m)%from_contact(l) )
end do
end do
end do
end do
end if
enddo
endif
if(nsend2 .NE. nsend) call mpp_error(FATAL, &
"mpp_domains_define.inc(define_contact_point): nsend2 does not equal to nsend")
nrecv = 0; nrecv2 = 0
do list = 0, nlist-1
m = mod( domain%pos+list, nlist )
if(overlapRecv(m)%count>0) nrecv = nrecv + 1
enddo
if(debug_message_passing) then
do list = 0, nlist-1
m = mod( domain%pos+list, nlist )
if(overlapRecv(m)%count==0) cycle
write(unit, *) "********from_pe = " ,overlapRecv(m)%pe, " count = ",overlapRecv(m)%count
do n = 1, overlapRecv(m)%count
write(unit, *) overlapRecv(m)%is(n), overlapRecv(m)%ie(n), overlapRecv(m)%js(n), overlapRecv(m)%je(n), &
overlapRecv(m)%dir(n), overlapRecv(m)%rotation(n)
enddo
enddo
if(nrecv >0) call flush(unit)
endif
if(nrecv >0) then
if(associated(domain%update_T%recv)) then
do m = 1, domain%update_T%nrecv
call deallocate_overlap_type(domain%update_T%recv(m))
enddo
deallocate(domain%update_T%recv)
endif
domain%update_T%nrecv = nrecv
allocate(domain%update_T%recv(nrecv))
do list = 0, nlist-1
m = mod( domain%pos+nlist-list, nlist )
ntileNbr = size(domain%list(m)%x(:))
if(overlapRecv(m)%count > 0) then
nrecv2 = nrecv2 + 1
if(nrecv2>nrecv) call mpp_error(FATAL, &
"mpp_domains_define.inc(define_contact_point): nrecv2 is greater than nrecv")
call allocate_update_overlap(domain%update_T%recv(nrecv2), overlapRecv(m)%count)
do tMe = 1, ntileMe
do tt = 1, ntileNbr
!--- make sure the same order tile for different pe count
if(domain%list(m)%pe == domain%pe) then ! own processor
tNbr = tMe+tt-1
if(tNbr>ntileNbr) tNbr = tNbr - ntileNbr
else
tNbr = tt
end if
do n = 1, 8 ! loop over 8 direction
do l = 1, overlapRecv(m)%count
if(overlapRecv(m)%tileMe(l) .NE. tMe) cycle
if(overlapRecv(m)%tileNbr(l) .NE. tNbr) cycle
if(overlapRecv(m)%dir(l) .NE. dirlist(n) ) cycle
call insert_overlap_type(domain%update_T%recv(nrecv2), overlapRecv(m)%pe, &
overlapRecv(m)%tileMe(l), overlapRecv(m)%tileNbr(l), overlapRecv(m)%is(l), overlapRecv(m)%ie(l), &
overlapRecv(m)%js(l), overlapRecv(m)%je(l), overlapRecv(m)%dir(l), overlapRecv(m)%rotation(l), &
overlapRecv(m)%from_contact(l))
count = domain%update_T%recv(nrecv2)%count
end do
end do
end do
end do
end if
end do
endif
if(nrecv2 .NE. nrecv) call mpp_error(FATAL, &
"mpp_domains_define.inc(define_contact_point): nrecv2 does not equal to nrecv")
do m = 0,nlist-1
call deallocate_overlap_type(overlapSend(m))
call deallocate_overlap_type(overlapRecv(m))
enddo
!--- release memory
do n = 1, ntiles
deallocate(eCont(n)%tile, wCont(n)%tile, sCont(n)%tile, nCont(n)%tile )
deallocate(eCont(n)%align1, wCont(n)%align1, sCont(n)%align1, nCont(n)%align1)
deallocate(eCont(n)%align2, wCont(n)%align2, sCont(n)%align2, nCont(n)%align2)
deallocate(eCont(n)%refine1, wCont(n)%refine1, sCont(n)%refine1, nCont(n)%refine1)
deallocate(eCont(n)%refine2, wCont(n)%refine2, sCont(n)%refine2, nCont(n)%refine2)
deallocate(eCont(n)%is1, eCont(n)%ie1, eCont(n)%js1, eCont(n)%je1 )
deallocate(eCont(n)%is2, eCont(n)%ie2, eCont(n)%js2, eCont(n)%je2 )
deallocate(wCont(n)%is1, wCont(n)%ie1, wCont(n)%js1, wCont(n)%je1 )
deallocate(wCont(n)%is2, wCont(n)%ie2, wCont(n)%js2, wCont(n)%je2 )
deallocate(sCont(n)%is1, sCont(n)%ie1, sCont(n)%js1, sCont(n)%je1 )
deallocate(sCont(n)%is2, sCont(n)%ie2, sCont(n)%js2, sCont(n)%je2 )
deallocate(nCont(n)%is1, nCont(n)%ie1, nCont(n)%js1, nCont(n)%je1 )
deallocate(nCont(n)%is2, nCont(n)%ie2, nCont(n)%js2, nCont(n)%je2 )
end do
domain%initialized = .true.
end subroutine define_contact_point
!##############################################################################
!--- always fill the contact according to index order.
subroutine fill_contact(Contact, tile, is1, ie1, js1, je1, is2, ie2, js2, je2, align1, align2, refine1, refine2 )
type(contact_type), intent(inout) :: Contact
integer, intent(in) :: tile
integer, intent(in) :: is1, ie1, js1, je1
integer, intent(in) :: is2, ie2, js2, je2
integer, intent(in) :: align1, align2
real, intent(in) :: refine1, refine2
integer :: pos, n
do pos = 1, Contact%ncontact
select case(align1)
case(WEST, EAST)
if( js1 < Contact%js1(pos) ) exit
case(SOUTH, NORTH)
if( is1 < Contact%is1(pos) ) exit
end select
end do
Contact%ncontact = Contact%ncontact + 1
do n = Contact%ncontact, pos+1, -1 ! shift the data if needed.
Contact%tile(n) = Contact%tile(n-1)
Contact%align1(n) = Contact%align1(n-1)
Contact%align2(n) = Contact%align2(n-1)
Contact%is1(n) = Contact%is1(n-1); Contact%ie1(n) = Contact%ie1(n-1)
Contact%js1(n) = Contact%js1(n-1); Contact%je1(n) = Contact%je1(n-1)
Contact%is2(n) = Contact%is2(n-1); Contact%ie2(n) = Contact%ie2(n-1)
Contact%js2(n) = Contact%js2(n-1); Contact%je2(n) = Contact%je2(n-1)
end do
Contact%tile(pos) = tile
Contact%align1(pos) = align1
Contact%align2(pos) = align2
Contact%refine1(pos) = refine1
Contact%refine2(pos) = refine2
Contact%is1(pos) = is1; Contact%ie1(pos) = ie1
Contact%js1(pos) = js1; Contact%je1(pos) = je1
Contact%is2(pos) = is2; Contact%ie2(pos) = ie2
Contact%js2(pos) = js2; Contact%je2(pos) = je2
end subroutine fill_contact
!############################################################################
! this routine sets the overlapping between tiles for E,C,N-cell based on T-cell overlapping
subroutine set_contact_point(domain, position)
type(domain2d), intent(inout) :: domain
integer, intent(in) :: position
integer :: ishift, jshift, nlist, list, m, n
integer :: ntileMe, tMe, dir, count, pos, nsend, nrecv
integer :: isoff1, ieoff1, isoff2, ieoff2, jsoff1, jeoff1, jsoff2, jeoff2
type(overlap_type), pointer :: ptrIn => NULL()
type(overlapSpec), pointer :: update_in => NULL()
type(overlapSpec), pointer :: update_out => NULL()
type(overlap_type) :: overlapList(0:size(domain%list(:))-1)
type(overlap_type) :: overlap
call mpp_get_domain_shift(domain, ishift, jshift, position)
update_in => domain%update_T
select case(position)
case (CORNER)
update_out => domain%update_C
case (EAST)
update_out => domain%update_E
case (NORTH)
update_out => domain%update_N
case default
call mpp_error(FATAL, "mpp_domains_define.inc(set_contact_point): the position should be CORNER, EAST or NORTH")
end select
update_out%xbegin = update_in%xbegin; update_out%xend = update_in%xend + ishift
update_out%ybegin = update_in%ybegin; update_out%yend = update_in%yend + jshift
update_out%whalo = update_in%whalo; update_out%ehalo = update_in%ehalo
update_out%shalo = update_in%shalo; update_out%nhalo = update_in%nhalo
nlist = size(domain%list(:))
ntileMe = size(domain%x(:))
call allocate_update_overlap(overlap, MAXOVERLAP)
do m = 0, nlist-1
call init_overlap_type(overlapList(m))
enddo
!--- first copy the send information in update_out to send
nsend = update_out%nsend
do m = 1, nsend
pos = update_out%send(m)%pe - mpp_root_pe()
call add_update_overlap(overlapList(pos), update_out%send(m))
call deallocate_overlap_type(update_out%send(m))
enddo
if(ASSOCIATED(update_out%send) )deallocate(update_out%send)
!--- loop over the list of overlapping.
nsend = update_in%nsend
do m = 1, nsend
ptrIn => update_in%send(m)
pos = PtrIn%pe - mpp_root_pe()
do n = 1, ptrIn%count
dir = ptrIn%dir(n)
! only set overlapping between tiles for send ( ptrOut%overlap(1) is false )
if(ptrIn%from_contact(n)) then
select case ( dir )
case ( 1 ) ! to_pe's eastern halo
select case(ptrIn%rotation(n))
case (ZERO) ! W -> E
isoff1 = ishift; ieoff1 = ishift; jsoff1 = 0; jeoff1 = jshift
case (NINETY) ! S -> E
isoff1 = 0; ieoff1 = jshift; jsoff1 = ishift; jeoff1 = ishift
end select
case ( 2 ) ! to_pe's south-eastearn halo
select case(ptrIn%rotation(n))
case (ZERO)
isoff1 = ishift; ieoff1 = ishift; jsoff1 = 0; jeoff1 = 0
case (NINETY)
isoff1 = jshift; ieoff1 = jshift; jsoff1 = ishift; jeoff1 = ishift
case (MINUS_NINETY)
isoff1 = 0; ieoff1 = 0; jsoff1 = 0; jeoff1 = 0
end select
case ( 3 ) ! to_pe's southern halo
select case(ptrIn%rotation(n))
case (ZERO) ! N -> S
isoff1 = 0; ieoff1 = ishift; jsoff1 = 0; jeoff1 = 0
case (MiNUS_NINETY) ! E -> S
isoff1 = 0; ieoff1 = 0; jsoff1 = 0; jeoff1 = ishift
end select
case ( 4 ) ! to_pe's south-westearn halo
select case(ptrIn%rotation(n))
case (ZERO)
isoff1 = 0; ieoff1 = 0; jsoff1 = 0; jeoff1 = 0
case (NINETY)
isoff1 = jshift; ieoff1 = jshift; jsoff1 = 0; jeoff1 = 0
case (MINUS_NINETY)
isoff1 = 0; ieoff1 = 0; jsoff1 = ishift; jeoff1 = ishift
end select
case ( 5 ) ! to_pe's western halo
select case(ptrIn%rotation(n))
case (ZERO) ! E -> W
isoff1 = 0; ieoff1 = 0; jsoff1 = 0; jeoff1 = jshift
case (NINETY) ! N -> W
isoff1 = 0; ieoff1 = jshift; jsoff1 = 0; jeoff1 = 0
end select
case ( 6 ) ! to_pe's north-westearn halo
select case(ptrIn%rotation(n))
case (ZERO)
isoff1 = 0; ieoff1 = 0; jsoff1 = jshift; jeoff1 = jshift
case (NINETY)
isoff1 = 0; ieoff1 = 0; jsoff1 = 0; jeoff1 = 0
case (MINUS_NINETY)
isoff1 = jshift; ieoff1 = jshift; jsoff1 = ishift; jeoff1 = ishift
end select
case ( 7 ) ! to_pe's northern halo
select case(ptrIn%rotation(n))
case (ZERO) ! S -> N
isoff1 = 0; ieoff1 = ishift; jsoff1 = jshift; jeoff1 = jshift
case (MINUS_NINETY) ! W -> N
isoff1 = jshift; ieoff1 = jshift; jsoff1 = 0; jeoff1 = ishift
end select
case ( 8 ) ! to_pe's north-eastearn halo
select case(ptrIn%rotation(n))
case (ZERO)
isoff1 = ishift; ieoff1 = ishift; jsoff1 = jshift; jeoff1 = jshift
case (NINETY)
isoff1 = 0; ieoff1 = 0; jsoff1 = ishift; jeoff1 = ishift
case (MINUS_NINETY)
isoff1 = jshift; ieoff1 = jshift; jsoff1 = 0; jeoff1 = 0
end select
end select
call insert_overlap_type(overlap, PtrIn%pe, PtrIn%tileMe(n), PtrIn%tileNbr(n), &
Ptrin%is(n) + isoff1, Ptrin%ie(n) + ieoff1, Ptrin%js(n) + jsoff1, &
Ptrin%je(n) + jeoff1, PtrIn%dir(n), PtrIn%rotation(n), PtrIn%from_contact(n))
end if
end do ! do n = 1, prtIn%count
if(overlap%count > 0) then
call add_update_overlap(overlapList(pos), overlap)
call init_overlap_type(overlap)
endif
end do ! do list = 0, nlist-1
nsend = 0
do list = 0, nlist-1
m = mod( domain%pos+list, nlist )
if(overlapList(m)%count>0) nsend = nsend+1
enddo
update_out%nsend = nsend
if(nsend>0) then
allocate(update_out%send(nsend))
pos = 0
do list = 0, nlist-1
m = mod( domain%pos+list, nlist )
if(overlapList(m)%count>0) then
pos = pos+1
if(pos>nsend) call mpp_error(FATAL, &
"mpp_domains_define.inc(set_contact_point): pos should be no larger than nsend")
call add_update_overlap(update_out%send(pos), overlapList(m))
call deallocate_overlap_type(overlapList(m))
endif
enddo
if(pos .NE. nsend) call mpp_error(FATAL, &
"mpp_domains_define.inc(set_contact_point): pos should equal to nsend")
endif
!--- first copy the recv information in update_out to recv
nrecv = update_out%nrecv
do m = 1, nrecv
pos = update_out%recv(m)%pe - mpp_root_pe()
call add_update_overlap(overlapList(pos), update_out%recv(m))
call deallocate_overlap_type(update_out%recv(m))
enddo
if(ASSOCIATED(update_out%recv) )deallocate(update_out%recv)
!--- loop over the list of overlapping.
nrecv = update_in%nrecv
do m=1,nrecv
ptrIn => update_in%recv(m)
pos = PtrIn%pe - mpp_root_pe()
do n = 1, ptrIn%count
dir = ptrIn%dir(n)
! only set overlapping between tiles for recv ( ptrOut%overlap(1) is false )
if(ptrIn%from_contact(n)) then
select case ( dir )
case ( 1 ) ! E
isoff1 = ishift; ieoff1 = ishift; jsoff1 = 0; jeoff1 = jshift
case ( 2 ) ! SE
isoff1 = ishift; ieoff1 = ishift; jsoff1 = 0; jeoff1 = 0
case ( 3 ) ! S
isoff1 = 0; ieoff1 = ishift; jsoff1 = 0; jeoff1 = 0
case ( 4 ) ! SW
isoff1 = 0; ieoff1 = 0; jsoff1 = 0; jeoff1 = 0
case ( 5 ) ! W
isoff1 = 0; ieoff1 = 0; jsoff1 = 0; jeoff1 = jshift
case ( 6 ) ! NW
isoff1 = 0; ieoff1 = 0; jsoff1 = jshift; jeoff1 = jshift
case ( 7 ) ! N
isoff1 = 0; ieoff1 = ishift; jsoff1 = jshift; jeoff1 = jshift
case ( 8 ) ! NE
isoff1 = ishift; ieoff1 = ishift; jsoff1 = jshift; jeoff1 = jshift
end select
call insert_overlap_type(overlap, PtrIn%pe, PtrIn%tileMe(n), PtrIn%tileNbr(n), &
Ptrin%is(n) + isoff1, Ptrin%ie(n) + ieoff1, Ptrin%js(n) + jsoff1, &
Ptrin%je(n) + jeoff1, PtrIn%dir(n), PtrIn%rotation(n), PtrIn%from_contact(n))
count = overlap%count
end if
end do ! do n = 1, ptrIn%count
if(overlap%count > 0) then
call add_update_overlap(overlapList(pos), overlap)
call init_overlap_type(overlap)
endif
do tMe = 1, size(domain%x(:))
do n = 1, overlap%count
if(overlap%tileMe(n) == tMe) then
if(overlap%dir(n) == 1 ) domain%x(tMe)%loffset = 0
if(overlap%dir(n) == 7 ) domain%y(tMe)%loffset = 0
end if
end do
end do
end do ! do list = 0, nlist-1
nrecv = 0
do list = 0, nlist-1
m = mod( domain%pos+nlist-list, nlist )
if(overlapList(m)%count>0) nrecv = nrecv+1
enddo
update_out%nrecv = nrecv
if(nrecv>0) then
allocate(update_out%recv(nrecv))
pos = 0
do list = 0, nlist-1
m = mod( domain%pos+nlist-list, nlist )
if(overlapList(m)%count>0) then
pos = pos+1
if(pos>nrecv) call mpp_error(FATAL, &
"mpp_domains_define.inc(set_contact_point): pos should be no larger than nrecv")
call add_update_overlap(update_out%recv(pos), overlapList(m))
call deallocate_overlap_type(overlapList(m))
endif
enddo
if(pos .NE. nrecv) call mpp_error(FATAL, &
"mpp_domains_define.inc(set_contact_point): pos should equal to nrecv")
endif
call deallocate_overlap_type(overlap)
end subroutine set_contact_point
!--- set up the overlapping for boundary check if the domain is symmetry. The check will be
!--- done on current pe for east boundary for E-cell, north boundary for N-cell,
!--- East and North boundary for C-cell
subroutine set_check_overlap( domain, position )
type(domain2d), intent(in) :: domain
integer, intent(in) :: position
integer :: nlist, m, n
integer, parameter :: MAXCOUNT = 100
integer :: is, ie, js, je
integer :: nsend, nrecv, pos, maxsize, rotation
type(overlap_type) :: overlap
type(overlapSpec), pointer :: update => NULL()
type(overlapSpec), pointer :: check => NULL()
select case(position)
case (CORNER)
update => domain%update_C
check => domain%check_C
case (EAST)
update => domain%update_E
check => domain%check_E
case (NORTH)
update => domain%update_N
check => domain%check_N
case default
call mpp_error(FATAL, "mpp_domains_define.inc(set_check_overlap): position should be CORNER, EAST or NORTH")
end select
check%xbegin = update%xbegin; check%xend = update%xend
check%ybegin = update%ybegin; check%yend = update%yend
check%nsend = 0
check%nrecv = 0
if( .NOT. domain%symmetry ) return
nsend = 0
maxsize = 0
do m = 1, update%nsend
do n = 1, update%send(m)%count
if( update%send(m)%rotation(n) == ONE_HUNDRED_EIGHTY ) cycle
if( ( (position == EAST .OR. position == CORNER) .AND. update%send(m)%dir(n) == 1 ) .OR. &
( (position == NORTH .OR. position == CORNER) .AND. update%send(m)%dir(n) == 7 ) ) then
maxsize = max(maxsize, update%send(m)%count)
nsend = nsend + 1
exit
endif
enddo
enddo
if(nsend>0) then
allocate(check%send(nsend))
call allocate_check_overlap(overlap, maxsize)
endif
nlist = size(domain%list(:))
!--- loop over the list of domains to find the boundary overlap for send
pos = 0
do m = 1, update%nsend
do n = 1, update%send(m)%count
if( update%send(m)%rotation(n) == ONE_HUNDRED_EIGHTY ) cycle
! comparing east direction on currently pe
if( (position == EAST .OR. position == CORNER) .AND. update%send(m)%dir(n) == 1 ) then
rotation = update%send(m)%rotation(n)
select case( rotation )
case( ZERO ) ! W -> E
is = update%send(m)%is(n) - 1
ie = is
js = update%send(m)%js(n)
je = update%send(m)%je(n)
case( NINETY ) ! S -> E
is = update%send(m)%is(n)
ie = update%send(m)%ie(n)
js = update%send(m)%js(n) - 1
je = js
end select
call insert_check_overlap(overlap, update%send(m)%pe, &
update%send(m)%tileMe(n), 1, rotation, is, ie, js, je)
end if
! comparing north direction on currently pe
if( (position == NORTH .OR. position == CORNER) .AND. update%send(m)%dir(n) == 7 ) then
rotation = update%send(m)%rotation(n)
select case( rotation )
case( ZERO ) ! S->N
is = update%send(m)%is(n)
ie = update%send(m)%ie(n)
js = update%send(m)%js(n) - 1
je = js
case( MINUS_NINETY ) ! W->N
is = update%send(m)%is(n) - 1
ie = is
js = update%send(m)%js(n)
je = update%send(m)%je(n)
end select
call insert_check_overlap(overlap, update%send(m)%pe, &
update%send(m)%tileMe(n), 4, rotation, is, ie, js, je)
end if
end do ! do n =1, update%send(m)%count
if(overlap%count>0) then
pos = pos+1
if(pos>nsend)call mpp_error(FATAL, "mpp_domains_define.inc(set_check_overlap): pos is greater than nsend")
call add_check_overlap(check%send(pos), overlap)
call init_overlap_type(overlap)
endif
end do ! end do list = 0, nlist
if(pos .NE. nsend)call mpp_error(FATAL, "mpp_domains_define.inc(set_check_overlap): pos is greater than nsend")
nrecv = 0
maxsize = 0
do m = 1, update%nrecv
do n = 1, update%recv(m)%count
if( update%recv(m)%rotation(n) == ONE_HUNDRED_EIGHTY ) cycle
if( ( (position == EAST .OR. position == CORNER) .AND. update%recv(m)%dir(n) == 1 ) .OR. &
( (position == NORTH .OR. position == CORNER) .AND. update%recv(m)%dir(n) == 7 ) ) then
maxsize = max(maxsize, update%recv(m)%count)
nrecv = nrecv + 1
exit
endif
enddo
enddo
if(nsend>0) call deallocate_overlap_type(overlap)
if(nrecv>0) then
allocate(check%recv(nrecv))
call allocate_check_overlap(overlap, maxsize)
endif
pos = 0
do m = 1, update%nrecv
do n = 1, update%recv(m)%count
if( update%recv(m)%rotation(n) == ONE_HUNDRED_EIGHTY ) cycle
if( (position == EAST .OR. position == CORNER) .AND. update%recv(m)%dir(n) == 1 ) then
is = update%recv(m)%is(n) - 1
ie = is
js = update%recv(m)%js(n)
je = update%recv(m)%je(n)
call insert_check_overlap(overlap, update%recv(m)%pe, &
update%recv(m)%tileMe(n), 1, update%recv(m)%rotation(n), is, ie, js, je)
end if
if( (position == NORTH .OR. position == CORNER) .AND. update%recv(m)%dir(n) == 7 ) then
is = update%recv(m)%is(n)
ie = update%recv(m)%ie(n)
js = update%recv(m)%js(n) - 1
je = js
call insert_check_overlap(overlap, update%recv(m)%pe, &
update%recv(m)%tileMe(n), 3, update%recv(m)%rotation(n), is, ie, js, je)
end if
end do ! n = 1, overlap%count
if(overlap%count>0) then
pos = pos+1
if(pos>nrecv)call mpp_error(FATAL, "mpp_domains_define.inc(set_check_overlap): pos is greater than nrecv")
call add_check_overlap(check%recv(pos), overlap)
call init_overlap_type(overlap)
endif
end do ! end do list = 0, nlist
if(pos .NE. nrecv)call mpp_error(FATAL, "mpp_domains_define.inc(set_check_overlap): pos is greater than nrecv")
if(nrecv>0) call deallocate_overlap_type(overlap)
end subroutine set_check_overlap
!#############################################################################
!--- set up the overlapping for boundary if the domain is symmetry.
subroutine set_bound_overlap( domain, position )
type(domain2d), intent(inout) :: domain
integer, intent(in) :: position
integer :: m, n, l, count, dr, tMe, i
integer, parameter :: MAXCOUNT = 100
integer, dimension(MAXCOUNT) :: dir, rotation, is, ie, js, je, tileMe, index
integer, dimension(size(domain%x(:)), 4) :: nrecvl
integer, dimension(size(domain%x(:)), 4, MAXCOUNT) :: isl, iel, jsl, jel
type(overlap_type), pointer :: overlap => NULL()
type(overlapSpec), pointer :: update => NULL()
type(overlapSpec), pointer :: bound => NULL()
integer :: nlist_send, nlist_recv, ishift, jshift
integer :: ism, iem, jsm, jem, nsend, nrecv
integer :: isg, ieg, jsg, jeg, nlist, list
! integer :: isc1, iec1, jsc1, jec1
! integer :: isc2, iec2, jsc2, jec2
integer :: isd, ied, jsd, jed
integer :: npes_x, npes_y, ipos, jpos, inbr, jnbr
integer :: isc, iec, jsc, jec, my_pe
integer :: pe_south1, pe_south2, pe_west0, pe_west1, pe_west2
integer :: is_south1, ie_south1, js_south1, je_south1
integer :: is_south2, ie_south2, js_south2, je_south2
integer :: is_west0, ie_west0, js_west0, je_west0
integer :: is_west1, ie_west1, js_west1, je_west1
integer :: is_west2, ie_west2, js_west2, je_west2
logical :: x_cyclic, y_cyclic, folded_north
is_south1=0; ie_south1=0; js_south1=0; je_south1=0
is_south2=0; ie_south2=0; js_south2=0; je_south2=0
is_west0=0; ie_west0=0; js_west0=0; je_west0=0
is_west1=0; ie_west1=0; js_west1=0; je_west1=0
is_west2=0; ie_west2=0; js_west2=0; je_west2=0
if( position == CENTER .OR. .NOT. domain%symmetry ) return
call mpp_get_domain_shift(domain, ishift, jshift, position)
call mpp_get_global_domain(domain, isg, ieg, jsg, jeg)
call mpp_get_memory_domain ( domain, ism, iem, jsm, jem )
select case(position)
case (CORNER)
update => domain%update_C
bound => domain%bound_C
case (EAST)
update => domain%update_E
bound => domain%bound_E
case (NORTH)
update => domain%update_N
bound => domain%bound_N
case default
call mpp_error( FATAL, "mpp_domains_mod(set_bound_overlap): invalid option of position")
end select
bound%xbegin = ism; bound%xend = iem + ishift
bound%ybegin = jsm; bound%yend = jem + jshift
nlist_send = max(update%nsend,4)
nlist_recv = max(update%nrecv,4)
bound%nsend = nlist_send
bound%nrecv = nlist_recv
if(nlist_send >0) then
allocate(bound%send(nlist_send))
bound%send(:)%count = 0
endif
if(nlist_recv >0) then
allocate(bound%recv(nlist_recv))
bound%recv(:)%count = 0
endif
!--- loop over the list of domains to find the boundary overlap for send
nlist = size(domain%list(:))
npes_x = size(domain%x(1)%list(:))
npes_y = size(domain%y(1)%list(:))
x_cyclic = domain%x(1)%cyclic
y_cyclic = domain%y(1)%cyclic
folded_north = BTEST(domain%fold,NORTH)
ipos = domain%x(1)%pos
jpos = domain%y(1)%pos
isc = domain%x(1)%compute%begin; iec = domain%x(1)%compute%end
jsc = domain%y(1)%compute%begin; jec = domain%y(1)%compute%end
nsend = 0
if(domain%ntiles == 1) then ! use neighbor processor to configure send and recv
! currently only set up for west and south boundary
! south boundary for send
pe_south1 = NULL_PE; pe_south2 = NULL_PE
if( position == NORTH .OR. position == CORNER ) then
inbr = ipos; jnbr = jpos + 1
if( jnbr == npes_y .AND. y_cyclic) jnbr = 0
if( inbr .GE. 0 .AND. inbr .LT. npes_x .AND. jnbr .GE. 0 .AND. jnbr .LT. npes_y ) then
pe_south1 = domain%pearray(inbr,jnbr)
is_south1 = isc + ishift; ie_south1 = iec+ishift
js_south1 = jec + jshift; je_south1 = js_south1
endif
endif
!--- send to the southwest processor when position is NORTH
if( position == CORNER ) then
inbr = ipos + 1; jnbr = jpos + 1
if( inbr == npes_x .AND. x_cyclic) inbr = 0
if( jnbr == npes_y .AND. y_cyclic) jnbr = 0
if( inbr .GE. 0 .AND. inbr .LT. npes_x .AND. jnbr .GE. 0 .AND. jnbr .LT. npes_y ) then
pe_south2 = domain%pearray(inbr,jnbr)
is_south2 = iec + ishift; ie_south2 = is_south2
js_south2 = jec + jshift; je_south2 = js_south2
endif
endif
!---west boundary for send
pe_west0 = NULL_PE; pe_west1 = NULL_PE; pe_west2 = NULL_PE
if( position == EAST ) then
inbr = ipos+1; jnbr = jpos
if( inbr == npes_x .AND. x_cyclic) inbr = 0
if( inbr .GE. 0 .AND. inbr .LT. npes_x .AND. jnbr .GE. 0 .AND. jnbr .LT. npes_y ) then
pe_west1 = domain%pearray(inbr,jnbr)
is_west1 = iec + ishift; ie_west1 = is_west1
js_west1 = jsc + jshift; je_west1 = jec + jshift
endif
else if ( position == CORNER ) then ! possible split into two parts.
!--- on the fold.
if( folded_north .AND. jec == jeg .AND. ipos .LT. (npes_x-1)/2 ) then
inbr = npes_x - ipos - 1; jnbr = jpos
if( inbr .GE. 0 .AND. inbr .LT. npes_x .AND. jnbr .GE. 0 .AND. jnbr .LT. npes_y ) then
pe_west0 = domain%pearray(inbr,jnbr)
is_west0 = iec+ishift; ie_west0 = is_west0
js_west0 = jec+jshift; je_west0 = js_west0
endif
endif
if( folded_north .AND. jec == jeg .AND. ipos .GE. npes_x/2 .AND. ipos .LT. (npes_x-1) ) then
inbr = ipos+1; jnbr = jpos
if( inbr == npes_x .AND. x_cyclic) inbr = 0
if( inbr .GE. 0 .AND. inbr .LT. npes_x .AND. jnbr .GE. 0 .AND. jnbr .LT. npes_y ) then
pe_west1 = domain%pearray(inbr,jnbr)
is_west1 = iec + ishift; ie_west1 = is_west1
js_west1 = jsc + jshift; je_west1 = jec
endif
else
inbr = ipos+1; jnbr = jpos
if( inbr == npes_x .AND. x_cyclic) inbr = 0
if( inbr .GE. 0 .AND. inbr .LT. npes_x .AND. jnbr .GE. 0 .AND. jnbr .LT. npes_y ) then
pe_west1 = domain%pearray(inbr,jnbr)
is_west1 = iec + ishift; ie_west1 = is_west1
js_west1 = jsc + jshift; je_west1 = jec + jshift
endif
endif
endif
!--- send to the southwest processor when position is NORTH
if( position == CORNER ) then
inbr = ipos + 1; jnbr = jpos + 1
if( inbr == npes_x .AND. x_cyclic) inbr = 0
if( jnbr == npes_y .AND. y_cyclic) jnbr = 0
if( inbr .GE. 0 .AND. inbr .LT. npes_x .AND. jnbr .GE. 0 .AND. jnbr .LT. npes_y ) then
pe_west2 = domain%pearray(inbr,jnbr)
is_west2 = iec + ishift; ie_west2 = is_west2
js_west2 = jec + jshift; je_west2 = js_west2
endif
endif
!write(1000+mpp_pe(),*)"send south 1", pe_south1, is_south1, ie_south1, js_south1, je_south1
!write(1000+mpp_pe(),*)"send south 2", pe_south2, is_south2, ie_south2, js_south2, je_south2
!write(1000+mpp_pe(),*)"send west 0", pe_west0, is_west0, ie_west0, js_west0, je_west0
!write(1000+mpp_pe(),*)"send west 1", pe_west1, is_west1, ie_west1, js_west1, je_west1
!write(1000+mpp_pe(),*)"send west 2", pe_west2, is_west2, ie_west2, js_west2, je_west2
do list = 0,nlist-1
m = mod( domain%pos+list, nlist )
count = 0
my_pe = domain%list(m)%pe
if(my_pe == pe_south1) then
count = count + 1
is(count) = is_south1; ie(count) = ie_south1
js(count) = js_south1; je(count) = je_south1
dir(count) = 2
rotation(count) = ZERO
endif
if(my_pe == pe_south2) then
count = count + 1
is(count) = is_south2; ie(count) = ie_south2
js(count) = js_south2; je(count) = je_south2
dir(count) = 2
rotation(count) = ZERO
endif
if(my_pe == pe_west0) then
count = count + 1
is(count) = is_west0; ie(count) = ie_west0
js(count) = js_west0; je(count) = je_west0
dir(count) = 3
rotation(count) = ONE_HUNDRED_EIGHTY
endif
if(my_pe == pe_west1) then
count = count + 1
is(count) = is_west1; ie(count) = ie_west1
js(count) = js_west1; je(count) = je_west1
dir(count) = 3
rotation(count) = ZERO
endif
if(my_pe == pe_west2) then
count = count + 1
is(count) = is_west2; ie(count) = ie_west2
js(count) = js_west2; je(count) = je_west2
dir(count) = 3
rotation(count) = ZERO
endif
if(count >0) then
nsend = nsend + 1
if(nsend > nlist_send) call mpp_error(FATAL, "set_bound_overlap: nsend > nlist_send")
bound%send(nsend)%count = count
bound%send(nsend)%pe = my_pe
allocate(bound%send(nsend)%is(count), bound%send(nsend)%ie(count) )
allocate(bound%send(nsend)%js(count), bound%send(nsend)%je(count) )
allocate(bound%send(nsend)%dir(count), bound%send(nsend)%rotation(count) )
allocate(bound%send(nsend)%tileMe(count))
bound%send(nsend)%is(:) = is(1:count)
bound%send(nsend)%ie(:) = ie(1:count)
bound%send(nsend)%js(:) = js(1:count)
bound%send(nsend)%je(:) = je(1:count)
bound%send(nsend)%dir(:) = dir(1:count)
bound%send(nsend)%tileMe(:) = 1
bound%send(nsend)%rotation(:) = rotation(1:count)
!write(1000+mpp_pe(),*) "send:", count, my_pe
!do i = 1, count
! write(1000+mpp_pe(),*) "send index:", is(i), ie(i), js(i), je(i), dir(i), rotation(i)
!enddo
endif
enddo
else
!--- The following did not consider wide halo case.
do m = 1, update%nsend
overlap => update%send(m)
if( overlap%count == 0 ) cycle
count = 0
do n = 1, overlap%count
!--- currently not support folded-north
if( overlap%rotation(n) == ONE_HUNDRED_EIGHTY ) cycle
if( (position == EAST .OR. position == CORNER) .AND. overlap%dir(n) == 1) then ! east
count=count+1
dir(count) = 1
rotation(count) = overlap%rotation(n)
tileMe(count) = overlap%tileMe(n)
select case( rotation(count) )
case( ZERO ) ! W -> E
is(count) = overlap%is(n) - 1
ie(count) = is(count)
js(count) = overlap%js(n)
je(count) = overlap%je(n)
case( NINETY ) ! S -> E
is(count) = overlap%is(n)
ie(count) = overlap%ie(n)
js(count) = overlap%js(n) - 1
je(count) = js(count)
end select
end if
if( (position == NORTH .OR. position == CORNER) .AND. overlap%dir(n) == 3 ) then ! south
count=count+1
dir(count) = 2
rotation(count) = overlap%rotation(n)
tileMe(count) = overlap%tileMe(n)
select case( rotation(count) )
case( ZERO ) ! N->S
is(count) = overlap%is(n)
ie(count) = overlap%ie(n)
js(count) = overlap%je(n) + 1
je(count) = js(count)
case( MINUS_NINETY ) ! E->S
is(count) = overlap%ie(n) + 1
ie(count) = is(count)
js(count) = overlap%js(n)
je(count) = overlap%je(n)
end select
end if
if( (position == EAST .OR. position == CORNER) .AND. overlap%dir(n) == 5 ) then ! west
count=count+1
dir(count) = 3
rotation(count) = overlap%rotation(n)
tileMe(count) = overlap%tileMe(n)
select case( rotation(count) )
case( ZERO ) ! E->W
is(count) = overlap%ie(n) + 1
ie(count) = is(count)
js(count) = overlap%js(n)
je(count) = overlap%je(n)
case( NINETY ) ! N->W
is(count) = overlap%is(n)
ie(count) = overlap%ie(n)
js(count) = overlap%je(n) + 1
je(count) = js(count)
end select
end if
if( (position == NORTH .OR. position == CORNER) .AND. overlap%dir(n) == 7 ) then ! north
count=count+1
dir(count) = 4
rotation(count) = overlap%rotation(n)
tileMe(count) = overlap%tileMe(n)
select case( rotation(count) )
case( ZERO ) ! S->N
is(count) = overlap%is(n)
ie(count) = overlap%ie(n)
js(count) = overlap%js(n) - 1
je(count) = js(count)
case( MINUS_NINETY ) ! W->N
is(count) = overlap%is(n) - 1
ie(count) = is(count)
js(count) = overlap%js(n)
je(count) = overlap%je(n)
end select
end if
end do ! do n =1, overlap%count
if(count>0) then
nsend = nsend + 1
bound%send(nsend)%count = count
bound%send(nsend)%pe = overlap%pe
allocate(bound%send(nsend)%is(count), bound%send(nsend)%ie(count) )
allocate(bound%send(nsend)%js(count), bound%send(nsend)%je(count) )
allocate(bound%send(nsend)%dir(count), bound%send(nsend)%rotation(count) )
allocate(bound%send(nsend)%tileMe(count))
bound%send(nsend)%is(:) = is(1:count)
bound%send(nsend)%ie(:) = ie(1:count)
bound%send(nsend)%js(:) = js(1:count)
bound%send(nsend)%je(:) = je(1:count)
bound%send(nsend)%dir(:) = dir(1:count)
bound%send(nsend)%tileMe(:) = tileMe(1:count)
bound%send(nsend)%rotation(:) = rotation(1:count)
end if
end do ! end do list = 0, nlist
endif
!--- loop over the list of domains to find the boundary overlap for recv
bound%nsend = nsend
nrecvl(:,:) = 0
nrecv = 0
!--- will computing overlap for tripolar grid.
if( domain%ntiles == 1 ) then
! currently only set up for west and south boundary
! south boundary for recv
pe_south1 = NULL_PE; pe_south2 = NULL_PE
if( position == NORTH .OR. position == CORNER ) then
inbr = ipos; jnbr = jpos - 1
if( jnbr == -1 .AND. y_cyclic) jnbr = npes_y-1
if( inbr .GE. 0 .AND. inbr .LT. npes_x .AND. jnbr .GE. 0 .AND. jnbr .LT. npes_y ) then
pe_south1 = domain%pearray(inbr,jnbr)
is_south1 = isc + ishift; ie_south1 = iec+ishift
js_south1 = jsc; je_south1 = js_south1
endif
endif
!--- south boudary for recv: the southwest point when position is NORTH
if( position == CORNER ) then
inbr = ipos - 1; jnbr = jpos - 1
if( inbr == -1 .AND. x_cyclic) inbr = npes_x-1
if( jnbr == -1 .AND. y_cyclic) jnbr = npes_y-1
if( inbr .GE. 0 .AND. inbr .LT. npes_x .AND. jnbr .GE. 0 .AND. jnbr .LT. npes_y ) then
pe_south2 = domain%pearray(inbr,jnbr)
is_south2 = isc; ie_south2 = is_south2
js_south2 = jsc; je_south2 = js_south2
endif
endif
!---west boundary for recv
pe_west0 = NULL_PE; pe_west1 = NULL_PE; pe_west2 = NULL_PE
if( position == EAST ) then
inbr = ipos-1; jnbr = jpos
if( inbr == -1 .AND. x_cyclic) inbr = npes_x-1
if( inbr .GE. 0 .AND. inbr .LT. npes_x .AND. jnbr .GE. 0 .AND. jnbr .LT. npes_y ) then
pe_west1 = domain%pearray(inbr,jnbr)
is_west1 = isc; ie_west1 = is_west1
js_west1 = jsc + jshift; je_west1 = jec + jshift
endif
else if ( position == CORNER ) then ! possible split into two parts.
!--- on the fold.
if( folded_north .AND. jec == jeg .AND. ipos .GT. npes_x/2 ) then
inbr = npes_x - ipos - 1; jnbr = jpos
if( inbr .GE. 0 .AND. inbr .LT. npes_x .AND. jnbr .GE. 0 .AND. jnbr .LT. npes_y ) then
pe_west0 = domain%pearray(inbr,jnbr)
is_west0 = isc; ie_west0 = is_west0
js_west0 = jec+jshift; je_west0 = js_west0
endif
inbr = ipos-1; jnbr = jpos
if( inbr == -1 .AND. x_cyclic) inbr = npes_x-1
if( inbr .GE. 0 .AND. inbr .LT. npes_x .AND. jnbr .GE. 0 .AND. jnbr .LT. npes_y ) then
pe_west1 = domain%pearray(inbr,jnbr)
is_west1 = isc; ie_west1 = is_west1
js_west1 = jsc + jshift; je_west1 = jec
endif
else
inbr = ipos-1; jnbr = jpos
if( inbr == -1 .AND. x_cyclic) inbr = npes_x-1
if( inbr .GE. 0 .AND. inbr .LT. npes_x .AND. jnbr .GE. 0 .AND. jnbr .LT. npes_y ) then
pe_west1 = domain%pearray(inbr,jnbr)
is_west1 = isc; ie_west1 = is_west1
js_west1 = jsc + jshift; je_west1 = jec+jshift
endif
endif
endif
!--- west boundary for recv: the southwest point when position is CORNER
if( position == CORNER ) then
inbr = ipos - 1; jnbr = jpos - 1
if( inbr == -1 .AND. x_cyclic) inbr = npes_x - 1
if( jnbr == -1 .AND. y_cyclic) jnbr = npes_y - 1
if( inbr .GE. 0 .AND. inbr .LT. npes_x .AND. jnbr .GE. 0 .AND. jnbr .LT. npes_y ) then
pe_west2 = domain%pearray(inbr,jnbr)
is_west2 = isc; ie_west2 = is_west2
js_west2 = jsc; je_west2 = js_west2
endif
endif
!write(1000+mpp_pe(),*)"recv south 1", pe_south1, is_south1, ie_south1, js_south1, je_south1
!write(1000+mpp_pe(),*)"recv south 2", pe_south2, is_south2, ie_south2, js_south2, je_south2
!write(1000+mpp_pe(),*)"recv west 0", pe_west0, is_west0, ie_west0, js_west0, je_west0
!write(1000+mpp_pe(),*)"recv west 1", pe_west1, is_west1, ie_west1, js_west1, je_west1
!write(1000+mpp_pe(),*)"recv west 2", pe_west2, is_west2, ie_west2, js_west2, je_west2
tMe = 1
do list = 0,nlist-1
m = mod( domain%pos+nlist-list, nlist )
count = 0
my_pe = domain%list(m)%pe
if(my_pe == pe_south1) then
count = count + 1
is(count) = is_south1; ie(count) = ie_south1
js(count) = js_south1; je(count) = je_south1
dir(count) = 2
rotation(count) = ZERO
index(count) = 1 + ishift
endif
if(my_pe == pe_south2) then
count = count + 1
is(count) = is_south2; ie(count) = ie_south2
js(count) = js_south2; je(count) = je_south2
dir(count) = 2
rotation(count) = ZERO
index(count) = 1
endif
if(my_pe == pe_west0) then
count = count + 1
is(count) = is_west0; ie(count) = ie_west0
js(count) = js_west0; je(count) = je_west0
dir(count) = 3
rotation(count) = ONE_HUNDRED_EIGHTY
index(count) = jec-jsc+1+jshift
endif
if(my_pe == pe_west1) then
count = count + 1
is(count) = is_west1; ie(count) = ie_west1
js(count) = js_west1; je(count) = je_west1
dir(count) = 3
rotation(count) = ZERO
index(count) = 1 + jshift
endif
if(my_pe == pe_west2) then
count = count + 1
is(count) = is_west2; ie(count) = ie_west2
js(count) = js_west2; je(count) = je_west2
dir(count) = 3
rotation(count) = ZERO
index(count) = 1
endif
if(count >0) then
nrecv = nrecv + 1
if(nrecv > nlist_recv) call mpp_error(FATAL, "set_bound_overlap: nrecv > nlist_recv")
bound%recv(nrecv)%count = count
bound%recv(nrecv)%pe = my_pe
allocate(bound%recv(nrecv)%is(count), bound%recv(nrecv)%ie(count) )
allocate(bound%recv(nrecv)%js(count), bound%recv(nrecv)%je(count) )
allocate(bound%recv(nrecv)%dir(count), bound%recv(nrecv)%index(count) )
allocate(bound%recv(nrecv)%tileMe(count), bound%recv(nrecv)%rotation(count) )
bound%recv(nrecv)%is(:) = is(1:count)
bound%recv(nrecv)%ie(:) = ie(1:count)
bound%recv(nrecv)%js(:) = js(1:count)
bound%recv(nrecv)%je(:) = je(1:count)
bound%recv(nrecv)%dir(:) = dir(1:count)
bound%recv(nrecv)%tileMe(:) = 1
bound%recv(nrecv)%rotation(:) = rotation(1:count)
bound%recv(nrecv)%index(:) = index(1:count)
!write(1000+mpp_pe(),*) "recv:", count, my_pe
!do i = 1, count
! write(1000+mpp_pe(),*) "recv index:", is(i), ie(i), js(i), je(i), dir(i), rotation(i)
!enddo
endif
enddo
else
do m = 1, update%nrecv
overlap => update%recv(m)
if( overlap%count == 0 ) cycle
count = 0
do n = 1, overlap%count
!--- currently not support folded-north
if( overlap%rotation(n) == ONE_HUNDRED_EIGHTY ) cycle
if( (position == EAST .OR. position == CORNER) .AND. overlap%dir(n) == 1) then ! east
count=count+1
dir(count) = 1
rotation(count) = overlap%rotation(n)
tileMe(count) = overlap%tileMe(n)
is(count) = overlap%is(n) - 1
ie(count) = is(count)
js(count) = overlap%js(n)
je(count) = overlap%je(n)
tMe = tileMe(count)
nrecvl(tMe, 1) = nrecvl(tMe,1) + 1
isl (tMe,1,nrecvl(tMe, 1)) = is (count)
iel (tMe,1,nrecvl(tMe, 1)) = ie (count)
jsl (tMe,1,nrecvl(tMe, 1)) = js (count)
jel (tMe,1,nrecvl(tMe, 1)) = je (count)
end if
if( (position == NORTH .OR. position == CORNER) .AND. overlap%dir(n) == 3) then ! south
count=count+1
dir(count) = 2
rotation(count) = overlap%rotation(n)
tileMe(count) = overlap%tileMe(n)
is(count) = overlap%is(n)
ie(count) = overlap%ie(n)
js(count) = overlap%je(n) + 1
je(count) = js(count)
tMe = tileMe(count)
nrecvl(tMe, 2) = nrecvl(tMe,2) + 1
isl (tMe,2,nrecvl(tMe, 2)) = is (count)
iel (tMe,2,nrecvl(tMe, 2)) = ie (count)
jsl (tMe,2,nrecvl(tMe, 2)) = js (count)
jel (tMe,2,nrecvl(tMe, 2)) = je (count)
end if
if( (position == EAST .OR. position == CORNER) .AND. overlap%dir(n) == 5) then ! west
count=count+1
dir(count) = 3
rotation(count) = overlap%rotation(n)
tileMe(count) = overlap%tileMe(n)
is(count) = overlap%ie(n) + 1
ie(count) = is(count)
js(count) = overlap%js(n)
je(count) = overlap%je(n)
tMe = tileMe(count)
nrecvl(tMe, 3) = nrecvl(tMe,3) + 1
isl (tMe,3,nrecvl(tMe, 3)) = is (count)
iel (tMe,3,nrecvl(tMe, 3)) = ie (count)
jsl (tMe,3,nrecvl(tMe, 3)) = js (count)
jel (tMe,3,nrecvl(tMe, 3)) = je (count)
end if
if( (position == NORTH .OR. position == CORNER) .AND. overlap%dir(n) == 7) then ! north
count=count+1
dir(count) = 4
rotation(count) = overlap%rotation(n)
tileMe(count) = overlap%tileMe(n)
is(count) = overlap%is(n)
ie(count) = overlap%ie(n)
js(count) = overlap%js(n) - 1
je(count) = js(count)
tMe = tileMe(count)
nrecvl(tMe, 4) = nrecvl(tMe,4) + 1
isl (tMe,4,nrecvl(tMe, 4)) = is (count)
iel (tMe,4,nrecvl(tMe, 4)) = ie (count)
jsl (tMe,4,nrecvl(tMe, 4)) = js (count)
jel (tMe,4,nrecvl(tMe, 4)) = je (count)
end if
end do ! do n = 1, overlap%count
if(count>0) then
nrecv = nrecv + 1
bound%recv(nrecv)%count = count
bound%recv(nrecv)%pe = overlap%pe
allocate(bound%recv(nrecv)%is(count), bound%recv(nrecv)%ie(count) )
allocate(bound%recv(nrecv)%js(count), bound%recv(nrecv)%je(count) )
allocate(bound%recv(nrecv)%dir(count), bound%recv(nrecv)%index(count) )
allocate(bound%recv(nrecv)%tileMe(count), bound%recv(nrecv)%rotation(count) )
bound%recv(nrecv)%is(:) = is(1:count)
bound%recv(nrecv)%ie(:) = ie(1:count)
bound%recv(nrecv)%js(:) = js(1:count)
bound%recv(nrecv)%je(:) = je(1:count)
bound%recv(nrecv)%dir(:) = dir(1:count)
bound%recv(nrecv)%tileMe(:) = tileMe(1:count)
bound%recv(nrecv)%rotation(:) = rotation(1:count)
end if
end do ! end do list = 0, nlist
!--- find the boundary index for each contact within the east boundary
do m = 1, nrecv
do n = 1, bound%recv(m)%count
tMe = bound%recv(m)%tileMe(n)
dr = bound%recv(m)%dir(n)
bound%recv(m)%index(n) = 1
do l = 1, nrecvl(tMe,dr)
if(dr == 1 .OR. dr == 3) then ! EAST, WEST
if( bound%recv(m)%js(n) > jsl(tMe, dr, l) ) then
if( bound%recv(m)%rotation(n) == ONE_HUNDRED_EIGHTY ) then
bound%recv(m)%index(n) = bound%recv(m)%index(n) + &
max(abs(jel(tMe, dr, l)-jsl(tMe, dr, l))+1, &
abs(iel(tMe, dr, l)-isl(tMe, dr, l))+1)
else
bound%recv(m)%index(n) = bound%recv(m)%index(n) + &
max(abs(jel(tMe, dr, l)-jsl(tMe, dr, l)), &
abs(iel(tMe, dr, l)-isl(tMe, dr, l))) + 1 - jshift
endif
end if
else ! South, North
if( bound%recv(m)%is(n) > isl(tMe, dr, l) ) then
bound%recv(m)%index(n) = bound%recv(m)%index(n) + &
max(abs(jel(tMe, dr, l)-jsl(tMe, dr, l)), &
abs(iel(tMe, dr, l)-isl(tMe, dr, l))) + 1 - ishift
end if
end if
end do
end do
end do
endif
bound%nrecv = nrecv
end subroutine set_bound_overlap
!#############################################################################
subroutine fill_corner_contact(eCont, sCont, wCont, nCont, isg, ieg, jsg, jeg, numR, numS, tileRecv, tileSend, &
is1Recv, ie1Recv, js1Recv, je1Recv, is2Recv, ie2Recv, js2Recv, je2Recv, &
is1Send, ie1Send, js1Send, je1Send, is2Send, ie2Send, js2Send, je2Send, &
align1Recv, align2Recv, align1Send, align2Send, &
whalo, ehalo, shalo, nhalo, tileMe)
type(contact_type), dimension(:), intent(in) :: eCont, sCont, wCont, nCont
integer, dimension(:), intent(in) :: isg, ieg, jsg, jeg
integer, intent(inout) :: numR, numS
integer, dimension(:), intent(inout) :: tileRecv, tileSend
integer, dimension(:), intent(inout) :: is1Recv, ie1Recv, js1Recv, je1Recv
integer, dimension(:), intent(inout) :: is2Recv, ie2Recv, js2Recv, je2Recv
integer, dimension(:), intent(inout) :: is1Send, ie1Send, js1Send, je1Send
integer, dimension(:), intent(inout) :: is2Send, ie2Send, js2Send, je2Send
integer, dimension(:), intent(inout) :: align1Recv, align2Recv, align1Send, align2Send
integer, intent(in) :: tileMe, whalo, ehalo, shalo, nhalo
integer :: is1, ie1, js1, je1, is2, ie2, js2, je2
integer :: tn, tc, n, m
logical :: found_corner
found_corner = .false.
!--- southeast for recving
if(eCont(tileMe)%ncontact > 0) then
if(eCont(tileMe)%js1(1) == jsg(tileMe) ) then
tn = eCont(tileMe)%tile(1)
if(econt(tileMe)%js2(1) > jsg(tn) ) then ! the corner tile is tn.
if( econt(tileMe)%js2(1) - jsg(tn) < shalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: southeast tile for recv 1 is not tiled properly")
found_corner = .true.; tc = tn
is1 = eCont(tileMe)%ie1(1) + 1; je1 = eCont(tileMe)%js1(1) - 1
is2 = eCont(tileMe)%is2(1); je2 = eCont(tileMe)%js2(1) - 1
else if(sCont(tn)%ncontact >0) then ! the corner tile may be south tile of tn.
if(sCont(tn)%is1(1) == isg(tn)) then ! corner is nc.
found_corner = .true.; tc = sCont(tn)%tile(1)
is1 = eCont(tileMe)%ie1(1) + 1; je1 = eCont(tileMe)%js1(1) - 1
is2 = sCont(tn)%is2(1); je2 = sCont(tn)%je2(1)
end if
end if
end if
end if
if( .not. found_corner ) then ! not found,
n = sCont(tileMe)%ncontact
if( n > 0) then
if( sCont(tileMe)%ie1(n) == ieg(tileMe)) then
tn = sCont(tileMe)%tile(n)
if(scont(tileMe)%ie2(n) < ieg(tn) ) then ! the corner tile is tn.
if(ieg(tn) - scont(tileMe)%ie2(n) < ehalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: southeast tile for recv 2 is not tiled properly")
found_corner = .true.; tc = tn
is1 = sCont(tileMe)%ie1(n) + 1; je1 = sCont(tileMe)%js1(n) - 1
is2 = sCont(tileMe)%ie2(n) + 1; je2 = sCont(tileMe)%je2(n)
else if(eCont(tn)%ncontact >0) then ! the corner tile may be east tile of tn.
m = eCont(tn)%ncontact
if(eCont(tn)%je1(m) == jeg(tn)) then ! corner is nc.
found_corner = .true.; tc = eCont(tn)%tile(m)
is1 = sCont(tileMe)%ie1(n) + 1; je1 = sCont(tileMe)%js1(n) - 1
is2 = eCont(tn)%is2(m); je2 = eCont(tn)%je2(m)
end if
end if
end if
end if
end if
if(found_corner) then
numR = numR + 1
tileRecv(numR) = tc; align1Recv(numR) = SOUTH_EAST; align2Recv(numR) = NORTH_WEST
is1Recv(numR) = is1; ie1Recv(numR) = is1 + ehalo - 1
js1Recv(numR) = je1 - shalo + 1; je1Recv(numR) = je1
is2Recv(numR) = is2; ie2Recv(numR) = is2 + ehalo - 1
js2Recv(numR) = je2 - shalo + 1; je2Recv(numR) = je2
end if
!--- southwest for recving
found_corner = .false.
if(wCont(tileMe)%ncontact > 0) then
if(wCont(tileMe)%js1(1) == jsg(tileMe) ) then
tn = wCont(tileMe)%tile(1)
if(wcont(tileMe)%js2(1) > jsg(tn) ) then ! the corner tile is tn.
if( wcont(tileMe)%js2(1) - jsg(tn) < shalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: southwest tile for recv 1 is not tiled properly")
found_corner = .true.; tc = tn
ie1 = wCont(tileMe)%is1(1) - 1; je1 = wCont(tileMe)%js1(1) - 1
ie2 = wCont(tileMe)%is2(1); je2 = wCont(tileMe)%js2(1) - 1
else if(sCont(tn)%ncontact >0) then ! the corner tile may be south tile of tn.
n = sCont(tn)%ncontact
if(sCont(tn)%ie1(n) == ieg(tn)) then ! corner is nc.
found_corner = .true.; tc = sCont(tn)%tile(n)
ie1 = wCont(tileMe)%is1(1) - 1; je1 = wCont(tileMe)%js1(1) - 1
ie2 = sCont(tn)%ie2(1); je2 = sCont(tn)%je2(1)
end if
end if
end if
end if
if( .not. found_corner ) then ! not found,
n = sCont(tileMe)%ncontact
if( n > 0) then
if( sCont(tileMe)%is1(1) == isg(tileMe)) then
tn = sCont(tileMe)%tile(1)
if(sCont(tileMe)%is2(1) > isg(tn) ) then ! the corner tile is tn.
if( scont(tileMe)%is2(1)-isg(tn) < whalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: southwest tile for recv 1 is not tiled properly")
found_corner = .true.; tc = tn
ie1 = sCont(tileMe)%is1(1) - 1; je1 = sCont(tileMe)%js1(1) - 1
ie2 = sCont(tileMe)%is2(1) - 1; je2 = sCont(tileMe)%js2(1)
else if(wCont(tn)%ncontact >0) then ! the corner tile may be west tile of tn.
m = wCont(tn)%ncontact
if(wCont(tn)%je1(m) == jeg(tn)) then ! corner is nc.
found_corner = .true.; tc = wCont(tn)%tile(m)
ie1 = sCont(tileMe)%is1(1) - 1; je1 = sCont(tileMe)%js1(1) - 1
ie2 = wCont(tn)%ie2(m); je2 = wCont(tn)%je2(m)
end if
end if
end if
end if
end if
if(found_corner) then
numR = numR + 1
tileRecv(numR) = tc; align1Recv(numR) = SOUTH_WEST; align2Recv(numR) = NORTH_EAST
is1Recv(numR) = ie1 - whalo + 1; ie1Recv(numR) = ie1
js1Recv(numR) = je1 - shalo + 1; je1Recv(numR) = je1
is2Recv(numR) = ie2 - whalo + 1; ie2Recv(numR) = ie2
js2Recv(numR) = je2 - shalo + 1; je2Recv(numR) = je2
end if
!--- northwest for recving
found_corner = .false.
n = wCont(tileMe)%ncontact
if( n > 0) then
if(wCont(tileMe)%je1(n) == jeg(tileMe) ) then
tn = wCont(tileMe)%tile(n)
if(wcont(tileMe)%je2(n) < jeg(tn) ) then ! the corner tile is tn.
if( jeg(tn) - wcont(tileMe)%je2(n) < nhalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: northwest tile for recv 1 is not tiled properly")
found_corner = .true.; tc = tn
ie1 = wCont(tileMe)%is1(n) - 1; js1 = wCont(tileMe)%je1(n) + 1
ie2 = wCont(tileMe)%is2(n); js2 = wCont(tileMe)%je2(n) + 1
else if(nCont(tn)%ncontact >0) then ! the corner tile may be south tile of tn.
m = nCont(tn)%ncontact
if(nCont(tn)%ie1(m) == ieg(tn)) then ! corner is nc.
found_corner = .true.; tc = nCont(tn)%tile(m)
ie1 = wCont(tileMe)%is1(n) - 1; js1 = wCont(tileMe)%je1(n) + 1
ie2 = nCont(tn)%ie2(m); js2 = nCont(tn)%js2(m)
end if
endif
endif
end if
if( .not. found_corner ) then ! not found,
if( nCont(tileMe)%ncontact > 0) then
if( nCont(tileMe)%is1(1) == isg(tileMe)) then
tn = nCont(tileMe)%tile(1)
if(nCont(tileMe)%is2(1) > isg(tn) ) then ! the corner tile is tn.
if( ncont(tileMe)%is2(1)-isg(tn) < whalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: northwest tile for recv 2 is not tiled properly")
found_corner = .true.; tc = tn
ie1 = nCont(tileMe)%is1(1) - 1; js1 = nCont(tileMe)%je1(1) + 1
ie2 = nCont(tileMe)%is2(1) - 1; js2 = nCont(tileMe)%js2(1)
else if(wCont(tn)%ncontact >0) then ! the corner tile may be west tile of tn.
if(wCont(tn)%js1(1) == jsg(tn)) then ! corner is nc.
found_corner = .true.; tc = wCont(tn)%tile(1)
ie1 = nCont(tileMe)%is1(1) - 1; js1 = nCont(tileMe)%je1(1) + 1
ie2 = wCont(tn)%ie2(1); js2 = wCont(tn)%js2(1)
end if
end if
end if
end if
end if
if(found_corner) then
numR = numR + 1
tileRecv(numR) = tc; align1Recv(numR) =NORTH_WEST; align2Recv(numR) = SOUTH_EAST
is1Recv(numR) = ie1 - whalo + 1; ie1Recv(numR) = ie1
js1Recv(numR) = js1; je1Recv(numR) = js1 + nhalo - 1
is2Recv(numR) = ie2 - whalo + 1; ie2Recv(numR) = ie2
js2Recv(numR) = js2; je2Recv(numR) = js2 + nhalo - 1
end if
!--- northeast for recving
found_corner = .false.
n = eCont(tileMe)%ncontact
if( n > 0) then
if(eCont(tileMe)%je1(n) == jeg(tileMe) ) then
tn = eCont(tileMe)%tile(n)
if(econt(tileMe)%je2(n) < jeg(tn) ) then ! the corner tile is tn.
if( jeg(tn) - econt(tileMe)%je2(n) < nhalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: northeast tile for recv 1 is not tiled properly")
found_corner = .true.; tc = tn
is1 = eCont(tileMe)%ie1(n) + 1; js1 = eCont(tileMe)%je1(n) + 1
is2 = eCont(tileMe)%is2(1); js2 = eCont(tileMe)%je2(1) + 1
else if(nCont(tn)%ncontact >0) then ! the corner tile may be south tile of tn.
if(nCont(tn)%is1(1) == isg(tn)) then ! corner is nc.
found_corner = .true.; tc = nCont(tn)%tile(1)
is1 = eCont(tileMe)%ie1(n) + 1; js1 = eCont(tileMe)%je1(n) + 1
is2 = nCont(tn)%is2(1); js2 = nCont(tn)%js2(1)
end if
end if
end if
end if
if( .not. found_corner ) then ! not found,
n = nCont(tileMe)%ncontact
if( n > 0) then
if( nCont(tileMe)%ie1(n) == ieg(tileMe)) then
tn = nCont(tileMe)%tile(n)
if(nCont(tileMe)%ie2(n) < ieg(tn) ) then ! the corner tile is tn.
if(ieg(tn) - sCont(tileMe)%ie2(n) < ehalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: northeast tile for recv 2 is not tiled properly")
found_corner = .true.; tc = tn
is1 = sCont(tileMe)%ie1(n) + 1; js1 = sCont(tileMe)%je1(n) + 1
is2 = sCont(tileMe)%ie2(n) + 1; js2 = sCont(tileMe)%js2(n)
else if(eCont(tn)%ncontact >0) then ! the corner tile may be east tile of tn.
if(eCont(tn)%js1(1) == jsg(tn)) then ! corner is nc.
found_corner = .true.; tc = eCont(tn)%tile(1)
is1 = sCont(tileMe)%ie1(n) + 1; js1 = sCont(tileMe)%je1(n) + 1
is2 = eCont(tn)%is2(m); js2 = eCont(tn)%js2(m)
end if
end if
end if
end if
end if
if(found_corner) then
numR = numR + 1
tileRecv(numR) = tc; align1Recv(numR) =NORTH_EAST; align2Recv(numR) = SOUTH_WEST
is1Recv(numR) = is1; ie1Recv(numR) = is1 + ehalo - 1
js1Recv(numR) = js1; je1Recv(numR) = js1 + nhalo - 1
is2Recv(numR) = is2; ie2Recv(numR) = is2 + ehalo - 1
js2Recv(numR) = js2; je2Recv(numR) = js2 + nhalo - 1
end if
!--- to_pe's southeast for sending
do n = 1, wCont(tileMe)%ncontact
tn = wCont(tileMe)%tile(n)
if(wCont(tileMe)%js2(n) == jsg(tn) ) then
if(wcont(tileMe)%js1(n) > jsg(tileMe) ) then ! send to tile tn.
if( wcont(tileMe)%js1(n) - jsg(tileMe) < shalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: southeast tile for send 1 is not tiled properly")
numS = numS+1; tileSend(numS) = tn
align1Send(numS) = NORTH_WEST; align2Send(numS) = SOUTH_EAST
is1Send(numS) = wCont(tileMe)%is1(n); ie1Send(numS) = is1Send(numS) + ehalo - 1
je1Send(numS) = wCont(tileMe)%js1(n) - 1; js1Send(numS) = je1Send(numS) - shalo + 1
is2Send(numS) = wCont(tileMe)%ie2(n) + 1; ie2Send(numS) = is2Send(numS) + ehalo - 1
je2Send(numS) = wCont(tileMe)%js2(n) - 1; js2Send(numS) = je2Send(numS) - shalo + 1
end if
end if
end do
do n = 1, nCont(tileMe)%ncontact
tn = nCont(tileMe)%tile(n)
if(nCont(tileMe)%ie2(n) == ieg(tn) ) then
if(nCont(tileMe)%ie1(n) < ieg(tileMe) ) then ! send to tile tn.
if( ieg(tileMe) - nCont(tileMe)%ie1(n) < ehalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: southeast tile for send 2 is not tiled properly")
numS = numS+1; tileSend(numS) = tn
align1Send(numS) = NORTH_WEST; align2Send(numS) = SOUTH_EAST
is1Send(numS) = nCont(tileMe)%ie1(n) + 1; ie1Send(numS) = is1Send(numS) + ehalo - 1
je1Send(numS) = nCont(tileMe)%je1(n) ; js1Send(numS) = je1Send(numS) - shalo + 1
is2Send(numS) = nCont(tileMe)%ie2(n) + 1; ie2Send(numS) = is2Send(numS) + ehalo - 1
je2Send(numS) = nCont(tileMe)%je2(n) - 1; js2Send(numS) = je2Send(numS) - shalo + 1
end if
end if
end do
!--- found the corner overlap that is not specified through contact line.
n = wCont(tileMe)%ncontact
found_corner = .false.
if( n > 0) then
tn = wCont(tileMe)%tile(n)
if( wCont(tileMe)%je1(n) == jeg(tileMe) .AND. wCont(tileMe)%je2(n) == jeg(tn) ) then
m = nCont(tn)%ncontact
if(m >0) then
tc = nCont(tn)%tile(m)
if( nCont(tn)%ie1(m) == ieg(tn) .AND. nCont(tn)%ie2(m) == ieg(tc) ) found_corner = .true.
end if
end if
end if
if( .not. found_corner ) then ! not found, then starting from north contact
if( nCont(tileMe)%ncontact > 0) then
tn = nCont(tileMe)%tile(1)
if( nCont(tileMe)%is1(1) == isg(tileMe) .AND. nCont(tileMe)%is2(1) == isg(tn) ) then
if(wCont(tn)%ncontact >0) then
tc = wCont(tn)%tile(1)
if( wCont(tn)%js1(1) == jsg(tn) .AND. wCont(tn)%js2(1) == jsg(tc) ) found_corner = .true.
end if
end if
end if
end if
if(found_corner) then
numS = numS+1; tileSend(numS) = tc
align1Send(numS) = NORTH_WEST; align2Send(numS) = SOUTH_EAST
is1Send(numS) = isg(tileMe); ie1Send(numS) = is1Send(numS) + ehalo - 1
je1Send(numS) = jeg(tileMe); js1Send(numS) = je1Send(numS) - shalo + 1
is2Send(numS) = ieg(tc) + 1; ie2Send(numS) = is2Send(numS) + ehalo - 1
je2Send(numS) = jsg(tc) - 1; js2Send(numS) = je2Send(numS) - shalo + 1
end if
!--- to_pe's southwest for sending
do n = 1, eCont(tileMe)%ncontact
tn = eCont(tileMe)%tile(n)
if(eCont(tileMe)%js2(n) == jsg(tn) ) then
if(econt(tileMe)%js1(n) > jsg(tileMe) ) then ! send to tile tn.
if( econt(tileMe)%js1(n) - jsg(tileMe) < shalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: southwest tile for send 1 is not tiled properly")
numS = numS+1; tileSend(numS) = tn
align1Send(numS) = NORTH_EAST; align2Send(numS) = SOUTH_WEST
ie1Send(numS) = eCont(tileMe)%ie1(n); is1Send(numS) = ie1Send(numS) - whalo + 1
je1Send(numS) = eCont(tileMe)%js1(n) - 1; js1Send(numS) = je1Send(numS) - shalo + 1
ie2Send(numS) = eCont(tileMe)%is2(n) - 1; is2Send(numS) = ie2Send(numS) - whalo + 1
je2Send(numS) = eCont(tileMe)%js2(n) - 1; js2Send(numS) = je2Send(numS) - shalo + 1
end if
end if
end do
do n = 1, nCont(tileMe)%ncontact
tn = nCont(tileMe)%tile(n)
if(nCont(tileMe)%is2(n) == isg(tn) ) then
if(ncont(tileMe)%is1(n) > isg(tileMe) ) then ! send to tile tn.
if( ncont(tileMe)%is1(n) - isg(tileMe) < whalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: southwest tile for send 2 is not tiled properly")
numS = numS+1; tileSend(numS) = tn
align1Send(numS) = NORTH_EAST; align2Send(numS) = SOUTH_WEST
ie1Send(numS) = nCont(tileMe)%is1(n) - 1; is1Send(numS) = ie1Send(numS) - whalo + 1
ie1Send(numS) = nCont(tileMe)%je1(n) ; js1Send(numS) = je1Send(numS) - shalo + 1
ie2Send(numS) = nCont(tileMe)%is2(n) - 1; is2Send(numS) = je2Send(numS) - whalo + 1
je2Send(numS) = nCont(tileMe)%js2(n) - 1; js2Send(numS) = je2Send(numS) - shalo + 1
end if
end if
end do
!--- found the corner overlap that is not specified through contact line.
n = eCont(tileMe)%ncontact
found_corner = .false.
if( n > 0) then
tn = eCont(tileMe)%tile(n)
if( eCont(tileMe)%je1(n) == jeg(tileMe) .AND. eCont(tileMe)%je2(n) == jeg(tn) ) then
if(nCont(tn)%ncontact >0) then
tc = nCont(tn)%tile(1)
if( nCont(tn)%is1(1) == isg(tn) .AND. nCont(tn)%is2(n) == isg(tc) ) found_corner = .true.
end if
end if
end if
if( .not. found_corner ) then ! not found, then starting from north contact
n = nCont(tileMe)%ncontact
if( n > 0) then
tn = nCont(tileMe)%tile(n)
if( nCont(tileMe)%ie1(n) == ieg(tileMe) .AND. nCont(tileMe)%ie2(n) == ieg(tn) ) then
if(eCont(tn)%ncontact >0) then
tc = eCont(tn)%tile(1)
if( eCont(tn)%js1(1) == jsg(tn) .AND. eCont(tn)%js2(n) == jsg(tc) ) found_corner = .true.
end if
end if
end if
end if
if(found_corner) then
numS = numS+1; tileSend(numS) = tc
align1Send(numS) = NORTH_EAST; align2Send(numS) = SOUTH_WEST
ie1Send(numS) = ieg(tileMe); is1Send(numS) = ie1Send(numS) - whalo + 1
je1Send(numS) = jeg(tileMe); js1Send(numS) = je1Send(numS) - shalo + 1
ie2Send(numS) = isg(tc) - 1; is2Send(numS) = ie2Send(numS) - whalo + 1
je2Send(numS) = jsg(tc) - 1; js2Send(numS) = je2Send(numS) - shalo + 1
end if
!--- to_pe's northwest for sending
do n = 1, eCont(tileMe)%ncontact
tn = eCont(tileMe)%tile(n)
if(eCont(tileMe)%je2(n) == jeg(tn) ) then
if(econt(tileMe)%je1(n) < jeg(tileMe) ) then ! send to tile tn.
if( jeg(tileMe) - econt(tileMe)%je1(n) < nhalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: northwest tile for send 1 is not tiled properly")
numS = numS+1; tileSend(numS) = tn
align1Send(numS) = SOUTH_EAST; align2Send(numS) = NORTH_WEST
ie1Send(numS) = eCont(tileMe)%ie1(n) ; is1Send(numS) = ie1Send(numS) - whalo + 1
js1Send(numS) = eCont(tileMe)%je1(n) + 1; je1Send(numS) = js1Send(numS) + nhalo - 1
ie2Send(numS) = eCont(tileMe)%is2(n) - 1; is2Send(numS) = ie2Send(numS) - whalo + 1
js2Send(numS) = eCont(tileMe)%je2(n) + 1; je2Send(numS) = js2Send(numS) + nhalo - 1
end if
end if
end do
do n = 1, sCont(tileMe)%ncontact
tn = sCont(tileMe)%tile(n)
if(sCont(tileMe)%is2(n) == isg(tn) ) then
if(scont(tileMe)%is1(n) > isg(tileMe) ) then ! send to tile tn.
if( scont(tileMe)%is1(n) - isg(tileMe) < whalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: southwest tile for send 2 is not tiled properly")
numS = numS+1; tileSend(numS) = tn
align1Send(numS) = SOUTH_EAST; align2Send(numS) = NORTH_WEST
ie1Send(numS) = nCont(tileMe)%is1(n) - 1; is1Send(numS) = ie1Send(numS) - whalo + 1
js1Send(numS) = nCont(tileMe)%je1(n) ; je1Send(numS) = js1Send(numS) + nhalo - 1
ie2Send(numS) = nCont(tileMe)%is2(n) - 1; is2Send(numS) = ie2Send(numS) - whalo + 1
js2Send(numS) = nCont(tileMe)%je2(n) + 1; je2Send(numS) = js2Send(numS) + nhalo - 1
end if
end if
end do
!--- found the corner overlap that is not specified through contact line.
n = eCont(tileMe)%ncontact
found_corner = .false.
if( n > 0) then
tn = eCont(tileMe)%tile(1)
if( eCont(tileMe)%js1(1) == jsg(tileMe) .AND. eCont(tileMe)%js2(1) == jsg(tn) ) then
if(sCont(tn)%ncontact >0) then
tc = sCont(tn)%tile(1)
if( sCont(tn)%is1(1) == isg(tn) .AND. sCont(tn)%is2(1) == isg(tc) ) found_corner = .true.
end if
end if
end if
if( .not. found_corner ) then ! not found, then starting from north contact
n = sCont(tileMe)%ncontact
found_corner = .false.
if( n > 0) then
tn = sCont(tileMe)%tile(n)
if( sCont(tileMe)%ie1(n) == ieg(tileMe) .AND. sCont(tileMe)%ie2(n) == ieg(tn) ) then
if(eCont(tn)%ncontact >0) then
tc = eCont(tn)%tile(n)
if( eCont(tn)%je1(n) == jeg(tn) .AND. eCont(tn)%je2(n) == jeg(tc) ) found_corner = .true.
end if
end if
end if
end if
if(found_corner) then
numS = numS+1; tileSend(numS) = tc
align1Send(numS) = SOUTH_EAST; align2Send(numS) = NORTH_WEST
ie1Send(numS) = ieg(tileMe); is1Send(numS) = ie1Send(numS) - whalo + 1
js1Send(numS) = jsg(tileMe); je1Send(numS) = js1Send(numS) + nhalo - 1
ie2Send(numS) = isg(tc) - 1; is2Send(numS) = ie2Send(numS) - whalo + 1
js2Send(numS) = jeg(tc) + 1; je2Send(numS) = js2Send(numS) + nhalo - 1
end if
!--- to_pe's northeast for sending
do n = 1, wCont(tileMe)%ncontact
tn = wCont(tileMe)%tile(n)
if(wCont(tileMe)%je2(n) == jeg(tn) ) then
if(wcont(tileMe)%je1(n) < jeg(tileMe) ) then ! send to tile tn.
if( jeg(tileMe) - wcont(tileMe)%je1(n) < nhalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: northeast tile for send 1 is not tiled properly")
numS = numS+1; tileSend(numS) = tn
align1Send(numS) = SOUTH_WEST; align2Send(numS) = NORTH_EAST
is1Send(numS) = wCont(tileMe)%is1(n) ; ie1Send(numS) = is1Send(numS) + ehalo - 1
js1Send(numS) = wCont(tileMe)%je1(n) + 1; je1Send(numS) = js1Send(numS) + nhalo - 1
is2Send(numS) = wCont(tileMe)%ie2(n) + 1; ie2Send(numS) = is2Send(numS) + ehalo - 1
js2Send(numS) = wCont(tileMe)%je2(n) + 1; je2Send(numS) = js2Send(numS) + nhalo - 1
end if
end if
end do
do n = 1, sCont(tileMe)%ncontact
tn = sCont(tileMe)%tile(n)
if(sCont(tileMe)%ie2(n) == ieg(tn) ) then
if(sCont(tileMe)%ie1(n) < ieg(tileMe) ) then ! send to tile tn.
if( ieg(tileMe) - sCont(tileMe)%ie1(n) < ehalo ) call mpp_error(FATAL, &
"mpp_domains_define.inc: southeast tile for send 2 is not tiled properly")
numS = numS+1; tileSend(numS) = tn
align1Send(numS) = SOUTH_WEST; align2Send(numS) = NORTH_EAST
is1Send(numS) = sCont(tileMe)%ie1(n) + 1; ie1Send(numS) = is1Send(numS) + ehalo - 1
js1Send(numS) = sCont(tileMe)%js1(n) ; je1Send(numS) = js1Send(numS) + nhalo - 1
is2Send(numS) = sCont(tileMe)%ie2(n) + 1; ie2Send(numS) = is1Send(numS) + ehalo - 1
js2Send(numS) = sCont(tileMe)%je2(n) + 1; je2Send(numS) = js2Send(numS) + nhalo - 1
end if
end if
end do
!--- found the corner overlap that is not specified through contact line.
n = wCont(tileMe)%ncontact
found_corner = .false.
if( n > 0) then
tn = wCont(tileMe)%tile(1)
if( wCont(tileMe)%js1(n) == jsg(tileMe) .AND. wCont(tileMe)%js2(n) == jsg(tn) ) then
m = sCont(tn)%ncontact
if(m >0) then
tc = sCont(tn)%tile(m)
if( sCont(tn)%ie1(m) == ieg(tn) .AND. sCont(tn)%ie2(m) == ieg(tc) ) found_corner = .true.
end if
end if
end if
if( .not. found_corner ) then ! not found, then starting from north contact
n = sCont(tileMe)%ncontact
found_corner = .false.
if( n > 0) then
tn = sCont(tileMe)%tile(1)
if( sCont(tileMe)%is1(1) == isg(tileMe) .AND. sCont(tileMe)%is2(1) == isg(tn) ) then
m = wCont(tn)%ncontact
if( m > 0 ) then
tc = wCont(tn)%tile(m)
if( wCont(tn)%je1(m) == jeg(tn) .AND. wCont(tn)%je2(m) == jeg(tc) ) found_corner = .true.
end if
end if
end if
end if
if(found_corner) then
numS = numS+1; tileSend(numS) = tc
align1Send(numS) = SOUTH_WEST; align2Send(numS) = NORTH_EAST
is1Send(numS) = isg(tileMe); ie1Send(numS) = is1Send(numS) + ehalo - 1
js1Send(numS) = jsg(tileMe); je1Send(numS) = js1Send(numS) + nhalo - 1
is2Send(numS) = ieg(tc) + 1; ie2Send(numS) = is2Send(numS) + ehalo - 1
js2Send(numS) = jeg(tc) + 1; je2Send(numS) = js2Send(numS) + nhalo - 1
end if
end subroutine fill_corner_contact
!--- find the alignment direction, check if index is reversed, if reversed, exchange index.
subroutine check_alignment( is, ie, js, je, isg, ieg, jsg, jeg, alignment )
integer, intent(inout) :: is, ie, js, je, isg, ieg, jsg, jeg
integer, intent(out) :: alignment
integer :: i, j
if ( is == ie ) then ! x-alignment
if ( is == isg ) then
alignment = WEST
else if ( is == ieg ) then
alignment = EAST
else
call mpp_error(FATAL, 'mpp_domains_define.inc: The contact region is not on the x-boundary of the tile')
end if
if ( js > je ) then
j = js; js = je; je = j
end if
else if ( js == je ) then ! y-alignment
if ( js == jsg ) then
alignment = SOUTH
else if ( js == jeg ) then
alignment = NORTH
else
call mpp_error(FATAL, 'mpp_domains_define.inc: The contact region is not on the y-boundary of the tile')
end if
if ( is > ie ) then
i = is; is = ie; ie = i
end if
else
call mpp_error(FATAL, 'mpp_domains_define.inc: The contact region should be line contact' )
end if
end subroutine check_alignment
!#####################################################################
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_MODIFY_DOMAIN: modify extent of domain !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
!
!
!
!
!
!
subroutine mpp_modify_domain1D(domain_in,domain_out,cbegin,cend,gbegin,gend, hbegin, hend)
!
type(domain1D), intent(in) :: domain_in
type(domain1D), intent(inout) :: domain_out
integer, intent(in), optional :: hbegin, hend ! halo size
integer, intent(in), optional :: cbegin, cend ! extent of compute_domain
integer, intent(in), optional :: gbegin, gend ! extent of global domain
integer :: ndivs, global_indices(2) !(/ isg, ieg /)
integer :: flag
! get the global indices of the input domain
global_indices(1) = domain_in%global%begin; global_indices(2) = domain_in%global%end
! get the layout
ndivs = size(domain_in%list(:))
! get the flag
flag = 0
if(domain_in%cyclic) flag = flag + CYCLIC_GLOBAL_DOMAIN
if(domain_in%data%is_global) flag = flag + GLOBAL_DATA_DOMAIN
call mpp_define_domains( global_indices, ndivs, domain_out, pelist = domain_in%list(:)%pe, &
flags = flag, begin_halo = hbegin, end_halo = hend, extent = domain_in%list(:)%compute%size )
if(present(cbegin)) domain_out%compute%begin = cbegin
if(present(cend)) domain_out%compute%end = cend
domain_out%compute%size = domain_out%compute%end - domain_out%compute%begin + 1
if(present(gbegin)) domain_out%global%begin = gbegin
if(present(gend)) domain_out%global%end = gend
domain_out%global%size = domain_out%global%end - domain_out%global%begin + 1
end subroutine mpp_modify_domain1D
!
!#######################################################################
!----------------------------------------------------------------------------------
!
!
!
!
!
!
!
!
!
!
subroutine mpp_modify_domain2D(domain_in, domain_out, isc, iec, jsc, jec, isg, ieg, jsg, jeg, whalo, ehalo, shalo, nhalo)
!
type(domain2D), intent(in) :: domain_in
type(domain2D), intent(inout) :: domain_out
integer, intent(in), optional :: isc, iec, jsc, jec
integer, intent(in), optional :: isg, ieg, jsg, jeg
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
integer :: global_indices(4), layout(2)
integer :: xflag, yflag, nlist, i
if(present(whalo) .or. present(ehalo) .or. present(shalo) .or. present(nhalo) ) then
! get the global indices of the input domain
global_indices(1) = domain_in%x(1)%global%begin; global_indices(2) = domain_in%x(1)%global%end
global_indices(3) = domain_in%y(1)%global%begin; global_indices(4) = domain_in%y(1)%global%end
! get the layout
layout(1) = size(domain_in%x(1)%list(:)); layout(2) = size(domain_in%y(1)%list(:))
! get the flag
xflag = 0; yflag = 0
if(domain_in%x(1)%cyclic) xflag = xflag + CYCLIC_GLOBAL_DOMAIN
if(domain_in%x(1)%data%is_global) xflag = xflag + GLOBAL_DATA_DOMAIN
if(domain_in%y(1)%cyclic) yflag = yflag + CYCLIC_GLOBAL_DOMAIN
if(domain_in%y(1)%data%is_global) yflag = yflag + GLOBAL_DATA_DOMAIN
call mpp_define_domains( global_indices, layout, domain_out, pelist = domain_in%list(:)%pe, &
xflags = xflag, yflags = yflag, whalo = whalo, ehalo = ehalo, &
shalo = shalo, nhalo = nhalo, &
xextent = domain_in%x(1)%list(:)%compute%size, &
yextent = domain_in%y(1)%list(:)%compute%size, &
symmetry=domain_in%symmetry, &
maskmap = domain_in%pearray .NE. NULL_PE )
domain_out%ntiles = domain_in%ntiles
domain_out%tile_id = domain_in%tile_id
else
call mpp_define_null_domain(domain_out)
nlist = size(domain_in%list(:))
allocate(domain_out%list(0:nlist-1) )
do i = 0, nlist-1
allocate(domain_out%list(i)%tile_id(1))
domain_out%list(i)%tile_id(1) = 1
enddo
call mpp_modify_domain(domain_in%x(1), domain_out%x(1), isc, iec, isg, ieg)
call mpp_modify_domain(domain_in%y(1), domain_out%y(1), jsc, jec, jsg, jeg)
domain_out%ntiles = domain_in%ntiles
domain_out%tile_id = domain_in%tile_id
endif
end subroutine mpp_modify_domain2D
!
!#####################################################################
subroutine mpp_define_null_domain1D(domain)
type(domain1D), intent(inout) :: domain
domain%global%begin = -1; domain%global%end = -1; domain%global%size = 0
domain%data%begin = -1; domain%data%end = -1; domain%data%size = 0
domain%compute%begin = -1; domain%compute%end = -1; domain%compute%size = 0
domain%pe = NULL_PE
end subroutine mpp_define_null_domain1D
!#####################################################################
subroutine mpp_define_null_domain2D(domain)
type(domain2D), intent(inout) :: domain
allocate(domain%x(1), domain%y(1), domain%tile_id(1))
call mpp_define_null_domain(domain%x(1))
call mpp_define_null_domain(domain%y(1))
domain%pe = NULL_PE
domain%tile_id(1) = 1
domain%ntiles = 1
domain%max_ntile_pe = 1
domain%ncontacts = 0
end subroutine mpp_define_null_domain2D
!####################################################################
subroutine mpp_deallocate_domain1D(domain)
type(domain1D), intent(inout) :: domain
if(ASSOCIATED(domain%list)) deallocate(domain%list)
end subroutine mpp_deallocate_domain1D
!####################################################################
subroutine mpp_deallocate_domain2D(domain)
type(domain2D), intent(inout) :: domain
call deallocate_domain2D_local(domain)
if(ASSOCIATED(domain%io_domain) ) then
call deallocate_domain2D_local(domain%io_domain)
deallocate(domain%io_domain)
endif
end subroutine mpp_deallocate_domain2D
!##################################################################
subroutine deallocate_domain2D_local(domain)
type(domain2D), intent(inout) :: domain
integer :: i, ntileMe
ntileMe = size(domain%x(:))
if(ASSOCIATED(domain%pearray))deallocate(domain%pearray)
do i = 1, ntileMe
call mpp_deallocate_domain1D(domain%x(i))
call mpp_deallocate_domain1D(domain%y(i))
enddo
deallocate(domain%x, domain%y, domain%tile_id)
if(ASSOCIATED(domain%list)) then
do i = 0, size(domain%list(:))-1
deallocate(domain%list(i)%x, domain%list(i)%y, domain%list(i)%tile_id)
enddo
deallocate(domain%list)
endif
if(ASSOCIATED(domain%check_C)) call deallocate_overlapSpec(domain%check_C)
if(ASSOCIATED(domain%check_E)) call deallocate_overlapSpec(domain%check_E)
if(ASSOCIATED(domain%check_N)) call deallocate_overlapSpec(domain%check_N)
if(ASSOCIATED(domain%bound_C)) call deallocate_overlapSpec(domain%bound_C)
if(ASSOCIATED(domain%bound_E)) call deallocate_overlapSpec(domain%bound_E)
if(ASSOCIATED(domain%bound_N)) call deallocate_overlapSpec(domain%bound_N)
if(ASSOCIATED(domain%update_T)) call deallocate_overlapSpec(domain%update_T)
if(ASSOCIATED(domain%update_E)) call deallocate_overlapSpec(domain%update_E)
if(ASSOCIATED(domain%update_C)) call deallocate_overlapSpec(domain%update_C)
if(ASSOCIATED(domain%update_N)) call deallocate_overlapSpec(domain%update_N)
end subroutine deallocate_domain2D_local
!####################################################################
subroutine allocate_check_overlap(overlap, count)
type(overlap_type), intent(inout) :: overlap
integer, intent(in ) :: count
overlap%count = 0
overlap%pe = NULL_PE
if(associated(overlap%tileMe)) call mpp_error(FATAL, &
"allocate_check_overlap(mpp_domains_define): overlap is already been allocated")
if(count < 1) call mpp_error(FATAL, &
"allocate_check_overlap(mpp_domains_define): count should be a positive integer")
allocate(overlap%tileMe (count), overlap%dir(count) )
allocate(overlap%is (count), overlap%ie (count) )
allocate(overlap%js (count), overlap%je (count) )
allocate(overlap%rotation(count) )
overlap%rotation = ZERO
end subroutine allocate_check_overlap
!#######################################################################
subroutine insert_check_overlap(overlap, pe, tileMe, dir, rotation, is, ie, js, je)
type(overlap_type), intent(inout) :: overlap
integer, intent(in ) :: pe
integer, intent(in ) :: tileMe, dir, rotation
integer, intent(in ) :: is, ie, js, je
integer :: count
overlap%count = overlap%count + 1
count = overlap%count
if(.NOT. associated(overlap%tileMe)) call mpp_error(FATAL, &
"mpp_domains_define.inc(insert_check_overlap): overlap is not assigned any memory")
if(count > size(overlap%tileMe(:)) ) call mpp_error(FATAL, &
"mpp_domains_define.inc(insert_check_overlap): overlap%count is greater than size(overlap%tileMe)")
if( overlap%pe == NULL_PE ) then
overlap%pe = pe
else
if(overlap%pe .NE. pe) call mpp_error(FATAL, &
"mpp_domains_define.inc(insert_check_overlap): mismatch on pe")
endif
overlap%tileMe (count) = tileMe
overlap%dir (count) = dir
overlap%rotation(count) = rotation
overlap%is (count) = is
overlap%ie (count) = ie
overlap%js (count) = js
overlap%je (count) = je
end subroutine insert_check_overlap
!#######################################################################
!--- this routine add the overlap_in into overlap_out
subroutine add_check_overlap( overlap_out, overlap_in)
type(overlap_type), intent(inout) :: overlap_out
type(overlap_type), intent(in ) :: overlap_in
type(overlap_type) :: overlap
integer :: count, count_in, count_out
! if overlap_out%count == 0, then just copy overlap_in to overlap_out
count_in = overlap_in %count
count_out = overlap_out%count
count = count_in+count_out
if(count_in == 0) call mpp_error(FATAL, &
"add_check_overlap(mpp_domains_define): overlap_in%count is zero")
if(count_out == 0) then
if(associated(overlap_out%tileMe)) call mpp_error(FATAL, &
"add_check_overlap(mpp_domains_define): overlap is already been allocated but count=0")
call allocate_check_overlap(overlap_out, count_in)
overlap_out%pe = overlap_in%pe
else ! need to expand the dimension size of overlap
call allocate_check_overlap(overlap, count_out)
if(overlap_out%pe .NE. overlap_in%pe) call mpp_error(FATAL, &
"mpp_domains_define.inc(add_check_overlap): mismatch of pe between overlap_in and overlap_out")
overlap%tileMe (1:count_out) = overlap_out%tileMe (1:count_out)
overlap%is (1:count_out) = overlap_out%is (1:count_out)
overlap%ie (1:count_out) = overlap_out%ie (1:count_out)
overlap%js (1:count_out) = overlap_out%js (1:count_out)
overlap%je (1:count_out) = overlap_out%je (1:count_out)
overlap%dir (1:count_out) = overlap_out%dir (1:count_out)
overlap%rotation (1:count_out) = overlap_out%rotation (1:count_out)
call deallocate_overlap_type(overlap_out)
call allocate_check_overlap(overlap_out, count)
overlap_out%tileMe (1:count_out) = overlap%tileMe (1:count_out)
overlap_out%is (1:count_out) = overlap%is (1:count_out)
overlap_out%ie (1:count_out) = overlap%ie (1:count_out)
overlap_out%js (1:count_out) = overlap%js (1:count_out)
overlap_out%je (1:count_out) = overlap%je (1:count_out)
overlap_out%dir (1:count_out) = overlap%dir (1:count_out)
overlap_out%rotation (1:count_out) = overlap%rotation (1:count_out)
call deallocate_overlap_type(overlap)
end if
overlap_out%count = count
overlap_out%tileMe (count_out+1:count) = overlap_in%tileMe (1:count_in)
overlap_out%is (count_out+1:count) = overlap_in%is (1:count_in)
overlap_out%ie (count_out+1:count) = overlap_in%ie (1:count_in)
overlap_out%js (count_out+1:count) = overlap_in%js (1:count_in)
overlap_out%je (count_out+1:count) = overlap_in%je (1:count_in)
overlap_out%dir (count_out+1:count) = overlap_in%dir (1:count_in)
overlap_out%rotation (count_out+1:count) = overlap_in%rotation (1:count_in)
end subroutine add_check_overlap
!####################################################################
subroutine init_overlap_type(overlap)
type(overlap_type), intent(inout) :: overlap
overlap%count = 0
overlap%pe = NULL_PE
end subroutine init_overlap_type
!####################################################################
subroutine allocate_update_overlap( overlap, count)
type(overlap_type), intent(inout) :: overlap
integer, intent(in ) :: count
overlap%count = 0
overlap%pe = NULL_PE
if(associated(overlap%tileMe)) call mpp_error(FATAL, &
"allocate_update_overlap(mpp_domains_define): overlap is already been allocated")
if(count < 1) call mpp_error(FATAL, &
"allocate_update_overlap(mpp_domains_define): count should be a positive integer")
allocate(overlap%tileMe (count), overlap%tileNbr (count) )
allocate(overlap%is (count), overlap%ie (count) )
allocate(overlap%js (count), overlap%je (count) )
allocate(overlap%dir (count), overlap%rotation(count) )
allocate(overlap%from_contact(count), overlap%msgsize (count) )
overlap%rotation = ZERO
overlap%from_contact = .FALSE.
end subroutine allocate_update_overlap
!#####################################################################################
subroutine insert_update_overlap(overlap, pe, is1, ie1, js1, je1, is2, ie2, js2, je2, dir, reverse, symmetry)
type(overlap_type), intent(inout) :: overlap
integer, intent(in ) :: pe
integer, intent(in ) :: is1, ie1, js1, je1, is2, ie2, js2, je2
integer, intent(in ) :: dir
logical, optional, intent(in ) :: reverse, symmetry
logical :: is_reverse, is_symmetry, is_overlapped
integer :: is, ie, js, je, count
is_reverse = .FALSE.
if(PRESENT(reverse)) is_reverse = reverse
is_symmetry = .FALSE.
if(PRESENT(symmetry)) is_symmetry = symmetry
is = max(is1,is2); ie = min(ie1,ie2)
js = max(js1,js2); je = min(je1,je2)
is_overlapped = .false.
!--- to avoid unnecessary ( duplicate overlap ) for symmetry domain
if(is_symmetry .AND. (dir == 1 .OR. dir == 5)) then ! x-direction
if( ie .GE. is .AND. je .GT. js ) is_overlapped = .true.
else if(is_symmetry .AND. (dir == 3 .OR. dir == 7)) then ! y-direction
if( ie .GT. is .AND. je .GE. js ) is_overlapped = .true.
else if(ie.GE.is .AND. je.GE.js )then
is_overlapped = .true.
endif
if(is_overlapped) then
if( overlap%count == 0 ) then
overlap%pe = pe
else
if(overlap%pe .NE. pe) call mpp_error(FATAL, &
"mpp_domains_define.inc(insert_update_overlap): mismatch on pe")
endif
overlap%count = overlap%count+1
count = overlap%count
if(count > MAXOVERLAP) call mpp_error(FATAL, &
"mpp_domains_define.inc(insert_update_overlap): number of overlap is greater than MAXOVERLAP, increase MAXOVERLAP")
overlap%is(count) = is
overlap%ie(count) = ie
overlap%js(count) = js
overlap%je(count) = je
overlap%tileMe (count) = 1
overlap%tileNbr(count) = 1
overlap%dir(count) = dir
if(is_reverse) then
overlap%rotation(count) = ONE_HUNDRED_EIGHTY
else
overlap%rotation(count) = ZERO
end if
end if
end subroutine insert_update_overlap
!#####################################################################################
subroutine insert_overlap_type(overlap, pe, tileMe, tileNbr, is, ie, js, je, dir, &
rotation, from_contact)
type(overlap_type), intent(inout) :: overlap
integer, intent(in ) :: tileMe, tileNbr, pe
integer, intent(in ) :: is, ie, js, je
integer, intent(in ) :: dir, rotation
logical, intent(in ) :: from_contact
integer :: count
if( overlap%count == 0 ) then
overlap%pe = pe
else
if(overlap%pe .NE. pe) call mpp_error(FATAL, &
"mpp_domains_define.inc(insert_overlap_type): mismatch on pe")
endif
overlap%count = overlap%count+1
count = overlap%count
if(count > MAXOVERLAP) call mpp_error(FATAL, &
"mpp_domains_define.inc(insert_overlap_type): number of overlap is greater than MAXOVERLAP, increase MAXOVERLAP")
overlap%tileMe (count) = tileMe
overlap%tileNbr (count) = tileNbr
overlap%is (count) = is
overlap%ie (count) = ie
overlap%js (count) = js
overlap%je (count) = je
overlap%dir (count) = dir
overlap%rotation (count) = rotation
overlap%from_contact(count) = from_contact
overlap%msgsize (count) = (ie-is+1)*(je-js+1)
end subroutine insert_overlap_type
!#######################################################################
subroutine deallocate_overlap_type( overlap)
type(overlap_type), intent(inout) :: overlap
if(overlap%count == 0) then
if( .NOT. associated(overlap%tileMe)) return
else
if( .NOT. associated(overlap%tileMe)) call mpp_error(FATAL, &
"deallocate_overlap_type(mpp_domains_define): overlap is not been allocated")
endif
if(ASSOCIATED(overlap%tileMe)) deallocate(overlap%tileMe)
if(ASSOCIATED(overlap%tileNbr)) deallocate(overlap%tileNbr)
if(ASSOCIATED(overlap%is)) deallocate(overlap%is)
if(ASSOCIATED(overlap%ie)) deallocate(overlap%ie)
if(ASSOCIATED(overlap%js)) deallocate(overlap%js)
if(ASSOCIATED(overlap%je)) deallocate(overlap%je)
if(ASSOCIATED(overlap%dir)) deallocate(overlap%dir)
if(ASSOCIATED(overlap%rotation)) deallocate(overlap%rotation)
if(ASSOCIATED(overlap%from_contact)) deallocate(overlap%from_contact)
if(ASSOCIATED(overlap%msgsize)) deallocate(overlap%msgsize)
overlap%count = 0
end subroutine deallocate_overlap_type
!#######################################################################
subroutine deallocate_overlapSpec(overlap)
type(overlapSpec), intent(inout) :: overlap
integer :: n
if(ASSOCIATED(overlap%send)) then
do n = 1, size(overlap%send(:))
call deallocate_overlap_type(overlap%send(n))
enddo
deallocate(overlap%send)
endif
if(ASSOCIATED(overlap%recv)) then
do n = 1, size(overlap%recv(:))
call deallocate_overlap_type(overlap%recv(n))
enddo
deallocate(overlap%recv)
endif
end subroutine deallocate_overlapSpec
!#######################################################################
!--- this routine add the overlap_in into overlap_out
subroutine add_update_overlap( overlap_out, overlap_in)
type(overlap_type), intent(inout) :: overlap_out
type(overlap_type), intent(in ) :: overlap_in
type(overlap_type) :: overlap
integer :: count, count_in, count_out, n
! if overlap_out%count == 0, then just copy overlap_in to overlap_out
count_in = overlap_in %count
count_out = overlap_out%count
count = count_in+count_out
if(count_in == 0) call mpp_error(FATAL, &
"mpp_domains_define.inc(add_update_overlap): overlap_in%count is zero")
if(count_out == 0) then
if(associated(overlap_out%tileMe)) call mpp_error(FATAL, &
"mpp_domains_define.inc(add_update_overlap): overlap is already been allocated but count=0")
call allocate_update_overlap(overlap_out, count_in)
overlap_out%pe = overlap_in%pe
else ! need to expand the dimension size of overlap
if(overlap_in%pe .NE. overlap_out%pe) call mpp_error(FATAL, &
"mpp_domains_define.inc(add_update_overlap): mismatch of pe between overlap_in and overlap_out")
call allocate_update_overlap(overlap, count_out)
overlap%tileMe (1:count_out) = overlap_out%tileMe (1:count_out)
overlap%tileNbr (1:count_out) = overlap_out%tileNbr (1:count_out)
overlap%is (1:count_out) = overlap_out%is (1:count_out)
overlap%ie (1:count_out) = overlap_out%ie (1:count_out)
overlap%js (1:count_out) = overlap_out%js (1:count_out)
overlap%je (1:count_out) = overlap_out%je (1:count_out)
overlap%dir (1:count_out) = overlap_out%dir (1:count_out)
overlap%rotation (1:count_out) = overlap_out%rotation (1:count_out)
overlap%from_contact(1:count_out) = overlap_out%from_contact(1:count_out)
call deallocate_overlap_type(overlap_out)
call allocate_update_overlap(overlap_out, count)
overlap_out%tileMe (1:count_out) = overlap%tileMe (1:count_out)
overlap_out%tileNbr (1:count_out) = overlap%tileNbr (1:count_out)
overlap_out%is (1:count_out) = overlap%is (1:count_out)
overlap_out%ie (1:count_out) = overlap%ie (1:count_out)
overlap_out%js (1:count_out) = overlap%js (1:count_out)
overlap_out%je (1:count_out) = overlap%je (1:count_out)
overlap_out%dir (1:count_out) = overlap%dir (1:count_out)
overlap_out%rotation (1:count_out) = overlap%rotation (1:count_out)
overlap_out%index (1:count_out) = overlap%index (1:count_out)
overlap_out%from_contact(1:count_out) = overlap%from_contact(1:count_out)
overlap_out%msgsize (1:count_out) = overlap%msgsize (1:count_out)
call deallocate_overlap_type(overlap)
end if
overlap_out%count = count
overlap_out%tileMe (count_out+1:count) = overlap_in%tileMe (1:count_in)
overlap_out%tileNbr (count_out+1:count) = overlap_in%tileNbr (1:count_in)
overlap_out%is (count_out+1:count) = overlap_in%is (1:count_in)
overlap_out%ie (count_out+1:count) = overlap_in%ie (1:count_in)
overlap_out%js (count_out+1:count) = overlap_in%js (1:count_in)
overlap_out%je (count_out+1:count) = overlap_in%je (1:count_in)
overlap_out%dir (count_out+1:count) = overlap_in%dir (1:count_in)
overlap_out%rotation (count_out+1:count) = overlap_in%rotation (1:count_in)
overlap_out%from_contact(count_out+1:count) = overlap_in%from_contact(1:count_in)
do n = count_out+1, count
overlap_out%msgsize(n) = (overlap_out%ie(n)-overlap_out%is(n)+1)*(overlap_out%je(n)-overlap_out%js(n)+1)
enddo
end subroutine add_update_overlap
!##############################################################################
subroutine expand_update_overlap_list(overlapList, npes)
type(overlap_type), pointer :: overlapList(:)
integer, intent(in ) :: npes
type(overlap_type), pointer,save :: newlist(:) => NULL()
integer :: nlist_old, nlist, m
nlist_old = size(overlaplist(:))
if(nlist_old .GE. npes) call mpp_error(FATAL, &
'mpp_domains_define.inc(expand_update_overlap_list): size of overlaplist should be smaller than npes')
nlist = min(npes, 2*nlist_old)
allocate(newlist(nlist))
do m = 1, nlist_old
call add_update_overlap(newlist(m), overlaplist(m))
call deallocate_overlap_type(overlapList(m))
enddo
deallocate(overlapList)
overlaplist => newlist
newlist => NULL()
return
end subroutine expand_update_overlap_list
!##################################################################################
subroutine expand_check_overlap_list(overlaplist, npes)
type(overlap_type), pointer :: overlaplist(:)
integer, intent(in) :: npes
type(overlap_type), pointer,save :: newlist(:) => NULL()
integer :: nlist_old, nlist, m
nlist_old = size(overlaplist(:))
if(nlist_old .GE. npes) call mpp_error(FATAL, &
'mpp_domains_define.inc(expand_check_overlap_list): size of overlaplist should be smaller than npes')
nlist = min(npes, 2*nlist_old)
allocate(newlist(nlist))
do m = 1,size(overlaplist(:))
call add_check_overlap(newlist(m), overlaplist(m))
call deallocate_overlap_type(overlapList(m))
enddo
deallocate(overlapList)
overlaplist => newlist
return
end subroutine expand_check_overlap_list
!###############################################################################
subroutine check_overlap_pe_order(domain, overlap, name)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: overlap
character(len=*), intent(in) :: name
integer :: m
integer :: pe1, pe2
!---make sure overlap%nsend and overlap%nrecv is no larger than MAXLIST
if( overlap%nsend > MAXLIST) call mpp_error(FATAL, &
"mpp_domains_define.inc(check_overlap_pe_order): overlap%nsend > MAXLIST, increase MAXLIST")
if( overlap%nrecv > MAXLIST) call mpp_error(FATAL, &
"mpp_domains_define.inc(check_overlap_pe_order): overlap%nrecv > MAXLIST, increase MAXLIST")
do m = 2, overlap%nsend
pe1 = overlap%send(m-1)%pe
pe2 = overlap%send(m)%pe
!-- when p1 == domain%pe, pe2 could be any value except domain%pe
if( pe2 == domain%pe ) then
print*, trim(name)//" at pe = ", domain%pe, ": send pe is ", pe1, pe2
call mpp_error(FATAL, &
"mpp_domains_define.inc(check_overlap_pe_order): send pe2 can not equal to domain%pe")
else if( (pe1 > domain%pe .AND. pe2 > domain%pe) .OR. (pe1 < domain%pe .AND. pe2 < domain%pe)) then
if( pe2 < pe1 ) then
print*, trim(name)//" at pe = ", domain%pe, ": send pe is ", pe1, pe2
call mpp_error(FATAL, &
"mpp_domains_define.inc(check_overlap_pe_order): pe is not in right order for send 1")
endif
else if ( pe2 > domain%pe .AND. pe1 < domain%pe ) then
print*, trim(name)//" at pe = ", domain%pe, ": send pe is ", pe1, pe2
call mpp_error(FATAL, &
"mpp_domains_define.inc(check_overlap_pe_order): pe is not in right order for send 2")
endif
enddo
do m = 2, overlap%nrecv
pe1 = overlap%recv(m-1)%pe
pe2 = overlap%recv(m)%pe
!-- when p1 == domain%pe, pe2 could be any value except domain%pe
if( pe2 == domain%pe ) then
print*, trim(name)//" at pe = ", domain%pe, ": recv pe is ", pe1, pe2
call mpp_error(FATAL, &
"mpp_domains_define.inc(check_overlap_pe_order): recv pe2 can not equal to domain%pe")
else if( (pe1 > domain%pe .AND. pe2 > domain%pe) .OR. (pe1 < domain%pe .AND. pe2 < domain%pe)) then
if( pe2 > pe1 ) then
print*, trim(name)//" at pe = ", domain%pe, ": recv pe is ", pe1, pe2
call mpp_error(FATAL, &
"mpp_domains_define.inc(check_overlap_pe_order): pe is not in right order for recv 1")
endif
else if ( pe2 < domain%pe .AND. pe1 > domain%pe ) then
print*, trim(name)//" at pe = ", domain%pe, ": recv pe is ", pe1, pe2
call mpp_error(FATAL, &
"mpp_domains_define.inc(check_overlap_pe_order): pe is not in right order for recv 2")
endif
enddo
end subroutine check_overlap_pe_order
!###############################################################################
subroutine set_domain_comm_inf(update)
type(overlapSpec), intent(inout) :: update
integer :: m, totsize, n
! first set the send and recv size
update%sendsize = 0
update%recvsize = 0
do m = 1, update%nrecv
totsize = 0
do n = 1, update%recv(m)%count
totsize = totsize + update%recv(m)%msgsize(n)
enddo
update%recv(m)%totsize = totsize
if(m==1) then
update%recv(m)%start_pos = 0
else
update%recv(m)%start_pos = update%recv(m-1)%start_pos + update%recv(m-1)%totsize
endif
update%recvsize = update%recvsize + totsize
enddo
do m = 1, update%nsend
totsize = 0
do n = 1, update%send(m)%count
totsize = totsize + update%send(m)%msgsize(n)
enddo
update%send(m)%totsize = totsize
if(m==1) then
update%send(m)%start_pos = 0
else
update%send(m)%start_pos = update%send(m-1)%start_pos + update%send(m-1)%totsize
endif
update%sendsize = update%sendsize + totsize
enddo
return
end subroutine set_domain_comm_inf
# 2787 "../mpp/mpp_domains.F90" 2
# 1 "../mpp/include/mpp_domains_misc.inc" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_DOMAINS: initialization and termination !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
!
! Initialize domain decomp package.
!
!
! Called to initialize the mpp_domains_mod package.
!
! flags can be set to MPP_VERBOSE to have
! mpp_domains_mod keep you informed of what it's up
! to. MPP_DEBUG returns even more information for debugging.
!
! mpp_domains_init will call mpp_init, to make sure
! mpp_mod is initialized. (Repeated
! calls to mpp_init do no harm, so don't worry if you already
! called it).
!
!
! call mpp_domains_init(flags)
!
!
!
subroutine mpp_domains_init(flags)
integer, intent(in), optional :: flags
integer :: n
# 53
integer :: unit_begin, unit_end, unit_nml, io_status, unit
logical :: opened
# 59
if( module_is_initialized )return
call mpp_init(flags) !this is a no-op if already initialized
call mpp_pset_init !this is a no-op if already initialized
module_is_initialized = .TRUE.
pe = mpp_root_pe()
unit = stdlog()
if( mpp_pe() .EQ.mpp_root_pe() ) write( unit,'(/a)' )'MPP_DOMAINS module '//trim(version)
if( PRESENT(flags) )then
debug = flags.EQ.MPP_DEBUG
verbose = flags.EQ.MPP_VERBOSE .OR. debug
domain_clocks_on = flags.EQ.MPP_DOMAIN_TIME
end if
!--- namelist
read (input_nml_file, mpp_domains_nml, iostat=io_status)
# 89
if (io_status > 0) then
call mpp_error(FATAL,'=>mpp_domains_init: Error reading input.nml')
endif
select case(lowercase(trim(debug_update_domain)))
case("none")
debug_update_level = NO_CHECK
case("fatal")
debug_update_level = FATAL
case("warning")
debug_update_level = WARNING
case("note")
debug_update_level = NOTe
case default
call mpp_error(FATAL, "mpp_domains_init: debug_update_level should be 'none', 'fatal', 'warning', or 'note'")
end select
allocate(nonblock_data(MAX_NONBLOCK_UPDATE))
do n = 1, MAX_NONBLOCK_UPDATE
call init_nonblock_type(nonblock_data(n))
enddo
call mpp_domains_set_stack_size(32768) !default, pretty arbitrary
# 118
!NULL_DOMAIN is a domaintype that can be used to initialize to undef
call mpp_define_null_domain(NULL_DOMAIN1d);
call mpp_define_null_domain(NULL_DOMAIN2d);
call mpp_define_null_UG_domain(NULL_DOMAINUG)
if( domain_clocks_on )then
pack_clock = mpp_clock_id( 'Halo pack' )
send_clock = mpp_clock_id( 'Halo send' )
recv_clock = mpp_clock_id( 'Halo recv' )
unpk_clock = mpp_clock_id( 'Halo unpk' )
wait_clock = mpp_clock_id( 'Halo wait' )
send_pack_clock_nonblock = mpp_clock_id( 'Halo pack and send nonblock' )
recv_clock_nonblock = mpp_clock_id( 'Halo recv nonblock' )
unpk_clock_nonblock = mpp_clock_id( 'Halo unpk nonblock' )
wait_clock_nonblock = mpp_clock_id( 'Halo wait nonblock' )
nest_pack_clock = mpp_clock_id( 'nest pack' )
nest_send_clock = mpp_clock_id( 'nest send' )
nest_recv_clock = mpp_clock_id( 'nest recv' )
nest_unpk_clock = mpp_clock_id( 'nest unpk' )
nest_wait_clock = mpp_clock_id( 'nest wait' )
group_pack_clock = mpp_clock_id( 'group pack' )
group_send_clock = mpp_clock_id( 'group send' )
group_recv_clock = mpp_clock_id( 'group recv' )
group_unpk_clock = mpp_clock_id( 'group unpk' )
group_wait_clock = mpp_clock_id( 'group wait' )
nonblock_group_pack_clock = mpp_clock_id( 'nonblock group pack' )
nonblock_group_send_clock = mpp_clock_id( 'nonblock group send' )
nonblock_group_recv_clock = mpp_clock_id( 'nonblock group recv' )
nonblock_group_unpk_clock = mpp_clock_id( 'nonblock group unpk' )
nonblock_group_wait_clock = mpp_clock_id( 'nonblock group wait' )
end if
return
end subroutine mpp_domains_init
!#####################################################################
subroutine init_nonblock_type( nonblock_obj )
type(nonblock_type), intent(inout) :: nonblock_obj
nonblock_obj%recv_pos = 0
nonblock_obj%send_pos = 0
nonblock_obj%recv_msgsize = 0
nonblock_obj%send_msgsize = 0
nonblock_obj%update_flags = 0
nonblock_obj%update_position = 0
nonblock_obj%update_gridtype = 0
nonblock_obj%update_whalo = 0
nonblock_obj%update_ehalo = 0
nonblock_obj%update_shalo = 0
nonblock_obj%update_nhalo = 0
nonblock_obj%request_send_count = 0
nonblock_obj%request_recv_count = 0
nonblock_obj%size_recv(:) = 0
nonblock_obj%type_recv(:) = 0
nonblock_obj%request_send(:) = MPI_REQUEST_NULL
nonblock_obj%request_recv(:) = MPI_REQUEST_NULL
# 180
nonblock_obj%buffer_pos_send(:) = 0
nonblock_obj%buffer_pos_recv(:) = 0
nonblock_obj%nfields = 0
nonblock_obj%field_addrs(:) = 0
nonblock_obj%field_addrs2(:) = 0
return
end subroutine init_nonblock_type
!#####################################################################
!
!
! Exit mpp_domains_mod.
!
!
! Serves no particular purpose, but is provided should you require to
! re-initialize mpp_domains_mod, for some odd reason.
!
!
! call mpp_domains_exit()
!
!
subroutine mpp_domains_exit()
integer :: unit
if( .NOT.module_is_initialized )return
call mpp_max(mpp_domains_stack_hwm)
unit = stdout()
if( mpp_pe().EQ.mpp_root_pe() )write( unit,* )'MPP_DOMAINS_STACK high water mark=', mpp_domains_stack_hwm
module_is_initialized = .FALSE.
return
end subroutine mpp_domains_exit
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_CHECK_FIELD: Check parallel !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
!
!
!
!
!
!
!
subroutine mpp_check_field_3D(field_in, pelist1, pelist2, domain, mesg, &
w_halo, s_halo, e_halo, n_halo, force_abort, position )
! This routine is used to do parallel checking for 3d data between n and m pe. The comparison is
! is done on pelist2. When size of pelist2 is 1, we can check the halo; otherwise,
! halo can not be checked.
real, dimension(:,:,:), intent(in) :: field_in ! field to be checked
integer, dimension(:), intent(in) :: pelist1, pelist2 ! pe list for the two groups
type(domain2d), intent(in) :: domain ! domain for each pe
character(len=*), intent(in) :: mesg ! message to be printed out
! if differences found
integer, intent(in), optional :: w_halo, s_halo, e_halo, n_halo
! halo size for west, south, east and north
logical, intent(in), optional :: force_abort ! when true, call mpp_error if any difference
! found. default value is false.
integer, intent(in), optional :: position ! when domain is symmetry, only value = CENTER is
! implemented.
integer :: k
character(len=256) :: temp_mesg
do k = 1, size(field_in,3)
write(temp_mesg, '(a, i3)') trim(mesg)//" at level " , k
call mpp_check_field_2d(field_in(:,:,k), pelist1, pelist2, domain, temp_mesg, &
w_halo, s_halo, e_halo, n_halo, force_abort, position )
enddo
end subroutine mpp_check_field_3D
!#####################################################################################
!
!
!
subroutine mpp_check_field_2d(field_in, pelist1, pelist2, domain, mesg, &
w_halo, s_halo, e_halo, n_halo,force_abort, position )
! This routine is used to do parallel checking for 2d data between n and m pe. The comparison is
! is done on pelist2. When size of pelist2 is 1, we can check the halo; otherwise,
! halo can not be checked.
real, dimension(:,:), intent(in) :: field_in ! field to be checked
integer, dimension(:), intent(in) :: pelist1, pelist2 ! pe list for the two groups
type(domain2d), intent(in) :: domain ! domain for each pe
character(len=*), intent(in) :: mesg ! message to be printed out
! if differences found
integer, intent(in), optional :: w_halo, s_halo, e_halo, n_halo
! halo size for west, south, east and north
logical, intent(in), optional :: force_abort ! when, call mpp_error if any difference
! found. default value is false.
integer, intent(in), optional :: position ! when domain is symmetry, only value = CENTER is
! implemented.
if(present(position)) then
if(position .NE. CENTER .AND. domain%symmetry) call mpp_error(FATAL, &
'mpp_check_field: when domain is symmetry, only value CENTER is implemented, contact author')
endif
if(size(pelist2(:)) == 1) then
call mpp_check_field_2d_type1(field_in, pelist1, pelist2, domain, mesg, &
w_halo, s_halo, e_halo, n_halo, force_abort )
else if(size(pelist1(:)) == 1) then
call mpp_check_field_2d_type1(field_in, pelist2, pelist1, domain, mesg, &
w_halo, s_halo, e_halo, n_halo, force_abort )
else if(size(pelist1(:)) .gt. 1 .and. size(pelist2(:)) .gt. 1) then
call mpp_check_field_2d_type2(field_in, pelist1, pelist2, domain, mesg, force_abort )
else
call mpp_error(FATAL, 'mpp_check_field: size of both pelists should be greater than 0')
endif
end subroutine mpp_check_field_2D
!####################################################################################
subroutine mpp_check_field_2d_type1(field_in, pelist1, pelist2, domain, mesg, &
w_halo, s_halo, e_halo, n_halo,force_abort )
! This routine is used to check field between running on 1 pe (pelist2) and
! n pe(pelist1). The need_to_be_checked data is sent to the pelist2 and All the
! comparison is done on pelist2.
real, dimension(:,:), intent(in) :: field_in ! field to be checked
integer, dimension(:), intent(in) :: pelist1, pelist2 ! pe list for the two groups
type(domain2d), intent(in) :: domain ! domain for each pe
character(len=*), intent(in) :: mesg ! message to be printed out
! if differences found
integer, intent(in), optional :: w_halo, s_halo, e_halo, n_halo
! halo size for west, south, east and north
logical, intent(in), optional :: force_abort ! when, call mpp_error if any difference
! found. default value is false.
! some local data
integer :: pe,npes, p
integer :: hwest, hsouth, heast, hnorth, isg, ieg, jsg, jeg, xhalo, yhalo
integer :: i,j,im,jm,l,is,ie,js,je,isc,iec,jsc,jec,isd,ied,jsd,jed
real,dimension(:,:), allocatable :: field1,field2
real,dimension(:), allocatable :: send_buffer
integer, dimension(4) :: ibounds
logical :: check_success, error_exit
check_success = .TRUE.
error_exit = .FALSE.
if(present(force_abort)) error_exit = force_abort
hwest = 0; if(present(w_halo)) hwest = w_halo
heast = 0; if(present(e_halo)) heast = e_halo
hsouth = 0; if(present(s_halo)) hsouth = s_halo
hnorth = 0; if(present(n_halo)) hnorth = n_halo
pe = mpp_pe ()
npes = mpp_npes()
call mpp_get_compute_domain(domain, isc, iec, jsc, jec)
call mpp_get_data_domain(domain, isd, ied, jsd, jed)
call mpp_get_global_domain(domain, isg, ieg, jsg, jeg)
xhalo = isc - isd
yhalo = jsc - jsd
!--- need to checked halo size should not be bigger than x_halo or y_halo
if(hwest .gt. xhalo .or. heast .gt. xhalo .or. hsouth .gt. yhalo .or. hnorth .gt. yhalo) &
call mpp_error(FATAL,'mpp_check_field: '//trim(mesg)//': The halo size is not correct')
is = isc - hwest; ie = iec + heast; js = jsc - hsouth; je = jec + hnorth
allocate(field2(is:ie,js:je))
! check if the field_in is on compute domain or data domain
if((size(field_in,1) .eq. iec-isc+1) .and. (size(field_in,2) .eq. jec-jsc+1)) then
!if field_in on compute domain, you can not check halo points
if( hwest .ne. 0 .or. heast .ne. 0 .or. hsouth .ne. 0 .or. hnorth .ne. 0 ) &
call mpp_error(FATAL,'mpp_check_field: '//trim(mesg)//': field is on compute domain, can not check halo')
field2(:,:) = field_in(:,:)
else if((size(field_in,1) .eq. ied-isd+1) .and. (size(field_in,2) .eq. jed-jsd+1)) then
field2(is:ie,js:je) = field_in(is-isd+1:ie-isd+1,js-jsd+1:je-jsd+1)
else if((size(field_in,1) .eq. ieg-isg+1) .and. (size(field_in,2) .eq. jeg-jsg+1)) then
if( hwest .ne. 0 .or. heast .ne. 0 .or. hsouth .ne. 0 .or. hnorth .ne. 0 ) &
call mpp_error(FATAL,'mpp_check_field: '//trim(mesg)//': field is on compute domain, can not check halo')
field2(is:ie,js:je) = field_in(1:ie-is+1,1:je-js+1)
else if((size(field_in,1) .eq. ieg-isg+1+2*xhalo) .and. (size(field_in,2) .eq. jeg-jsg+1+2*yhalo)) then
field2(is:ie,js:je) = field_in(is-isd+1:ie-isd+1,js-jsd+1:je-jsd+1)
else
print*, 'on pe ', pe, 'domain: ', isc, iec, jsc, jec, isd, ied, jsd, jed, 'size of field: ', size(field_in,1), size(field_in,2)
call mpp_error(FATAL,'mpp_check_field: '//trim(mesg)//':field is not on compute, data or global domain')
endif
call mpp_sync_self()
if(any(pelist1 == pe)) then ! send data to root pe
im = ie-is+1; jm=je-js+1
allocate(send_buffer(im*jm))
ibounds(1) = is; ibounds(2) = ie; ibounds(3) = js; ibounds(4) = je
l = 0
do i = is,ie
do j = js,je
l = l+1
send_buffer(l) = field2(i,j)
enddo
enddo
! send the check bounds and data to the root pe
! Force use of "scalar", integer pointer mpp interface
call mpp_send(ibounds(1), plen=4, to_pe=pelist2(1), tag=COMM_TAG_1)
call mpp_send(send_buffer(1),plen=im*jm, to_pe=pelist2(1), tag=COMM_TAG_2)
deallocate(send_buffer)
else if(pelist2(1) == pe) then ! receive data and compare
do p = pelist1(1), pelist1(size(pelist1(:)))
! Force use of "scalar", integer pointer mpp interface
call mpp_recv(ibounds(1), glen=4,from_pe=p, tag=COMM_TAG_1)
is = ibounds(1); ie = ibounds(2); js=ibounds(3); je=ibounds(4)
im = ie-is+1; jm=je-js+1
if(allocated(field1)) deallocate(field1)
if(allocated(send_buffer)) deallocate(send_buffer)
allocate(field1(is:ie,js:je),send_buffer(im*jm))
! Force use of "scalar", integer pointer mpp interface
call mpp_recv(send_buffer(1),glen=im*jm,from_pe=p, tag=COMM_TAG_2)
l = 0
! compare here, the comparison criteria can be changed according to need
do i = is,ie
do j = js,je
l = l+1
field1(i,j) = send_buffer(l)
if(field1(i,j) .ne. field2(i,j)) then
! write to standard output
print*,trim(mesg)//": ", i, j, field1(i,j), field2(i,j), field1(i,j) - field2(i,j)
! write(stdout(),'(a,2i,2f)') trim(mesg), i, j, pass_field(i,j), field_check(i,j)
check_success = .FALSE.
if(error_exit) call mpp_error(FATAL,"mpp_check_field: can not reproduce at this point")
endif
enddo
enddo
enddo
if(check_success) then
print*, trim(mesg)//": ", 'comparison between 1 pe and ', npes-1, ' pes is ok'
endif
! release memery
deallocate(field1, send_buffer)
endif
deallocate(field2)
call mpp_sync()
end subroutine mpp_check_field_2d_type1
!####################################################################
subroutine mpp_check_field_2d_type2(field_in, pelist1, pelist2, domain, mesg,force_abort)
! This routine is used to check field between running on m pe (root pe) and
! n pe. This routine can not check halo.
real, dimension(:,:), intent(in) :: field_in
type(domain2d), intent(in) :: domain
integer, dimension(:), intent(in) :: pelist1
integer, dimension(:), intent(in) :: pelist2
character(len=*), intent(in) :: mesg
logical, intent(in), optional :: force_abort ! when, call mpp_error if any difference
! found. default value is false.
! some local variables
logical :: check_success, error_exit
real, dimension(:,:), allocatable :: field1, field2
integer :: i, j, pe, npes, isd,ied,jsd,jed, is, ie, js, je
type(domain2d) :: domain1, domain2
check_success = .TRUE.
error_exit = .FALSE.
if(present(force_abort)) error_exit = force_abort
pe = mpp_pe()
npes = mpp_npes()
call mpp_sync_self()
if(any(pelist1 == pe)) domain1 = domain
if(any(pelist2 == pe)) domain2 = domain
! Comparison is made on pelist2.
if(any(pelist2 == pe)) then
call mpp_get_data_domain(domain2, isd, ied, jsd, jed)
call mpp_get_compute_domain(domain2, is, ie, js, je)
allocate(field1(isd:ied, jsd:jed),field2(isd:ied, jsd:jed))
if((size(field_in,1) .ne. ied-isd+1) .or. (size(field_in,2) .ne. jed-jsd+1)) &
call mpp_error(FATAL,'mpp_check_field: input field is not on the data domain')
field2(isd:ied, jsd:jed) = field_in(:,:)
endif
! broadcast domain
call mpp_broadcast_domain(domain1)
call mpp_broadcast_domain(domain2)
call mpp_redistribute(domain1,field_in,domain2,field1)
if(any(pelist2 == pe)) then
do i =is,ie
do j =js,je
if(field1(i,j) .ne. field2(i,j)) then
print*, trim(mesg)//": ", i, j, field1(i,j), field2(i,j), field1(i,j) - field2(i,j)
! write(stdout(),'(a,2i,2f)') trim(mesg), i, j, field_check(i,j), field_out(i,j)
check_success = .FALSE.
if(error_exit) call mpp_error(FATAL,"mpp_check_field: can not reproduce at this point")
endif
enddo
enddo
if(check_success) &
print*, trim(mesg)//": ", 'comparison between ', size(pelist1(:)), ' pes and ', &
size(pelist2(:)), ' pe on', pe, ' pes is ok'
endif
if(any(pelist2 == pe)) deallocate(field1, field2)
call mpp_sync()
return
end subroutine mpp_check_field_2d_type2
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_BROADCAST_DOMAIN !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
subroutine mpp_broadcast_domain_1( domain )
!broadcast domain (useful only outside the context of its own pelist)
type(domain2D), intent(inout) :: domain
integer, allocatable :: pes(:)
logical :: native !true if I'm on the pelist of this domain
integer :: listsize, listpos
integer :: n
integer, dimension(11) :: msg, info !pe and compute domain of each item in list
integer :: errunit
errunit = stderr()
if( .NOT.module_is_initialized ) &
call mpp_error( FATAL, 'MPP_BROADCAST_DOMAIN_1: You must first call mpp_domains_init.' )
!get the current pelist
allocate( pes(0:mpp_npes()-1) )
call mpp_get_current_pelist(pes)
!am I part of this domain?
native = ASSOCIATED(domain%list)
!set local list size
if( native )then
listsize = size(domain%list(:))
else
listsize = 0
end if
call mpp_max(listsize)
if( .NOT.native )then
!initialize domain%list and set null values in message
allocate( domain%list(0:listsize-1) )
domain%pe = NULL_PE
domain%pos = -1
allocate(domain%x(1), domain%y(1), domain%tile_id(1))
do n = 0, listsize-1
allocate(domain%list(n)%x(1), domain%list(n)%y(1), domain%list(n)%tile_id(1) )
end do
domain%x%compute%begin = 1
domain%x%compute%end = -1
domain%y%compute%begin = 1
domain%y%compute%end = -1
domain%x%global %begin = -1
domain%x%global %end = -1
domain%y%global %begin = -1
domain%y%global %end = -1
domain%tile_id = -1
domain%whalo = -1
domain%ehalo = -1
domain%shalo = -1
domain%nhalo = -1
domain%symmetry = .false.
end if
!initialize values in info
info(1) = domain%pe
call mpp_get_compute_domain( domain, info(2), info(3), info(4), info(5) )
info(6) = domain%tile_id(1)
info(7) = domain%whalo
info(8) = domain%ehalo
info(9) = domain%shalo
info(10)= domain%nhalo
if(domain%symmetry) then
info(11) = 1
else
info(11) = 0
endif
!broadcast your info across current pelist and unpack if needed
listpos = 0
do n = 0,mpp_npes()-1
msg = info
if( mpp_pe().EQ.pes(n) .AND. debug )write( errunit,* )'PE ', mpp_pe(), 'broadcasting msg ', msg
call mpp_broadcast( msg, 11, pes(n) )
!no need to unpack message if native
!no need to unpack message from non-native PE
if( .NOT.native .AND. msg(1).NE.NULL_PE )then
domain%list(listpos)%pe = msg(1)
domain%list(listpos)%x%compute%begin = msg(2)
domain%list(listpos)%x%compute%end = msg(3)
domain%list(listpos)%y%compute%begin = msg(4)
domain%list(listpos)%y%compute%end = msg(5)
domain%list(listpos)%tile_id(1) = msg(6)
if(domain%x(1)%global%begin < 0) then
domain%x(1)%global%begin = msg(2)
domain%x(1)%global%end = msg(3)
domain%y(1)%global%begin = msg(4)
domain%y(1)%global%end = msg(5)
domain%whalo = msg(7)
domain%ehalo = msg(8)
domain%shalo = msg(9)
domain%nhalo = msg(10)
if(msg(11) == 1) then
domain%symmetry = .true.
else
domain%symmetry = .false.
endif
else
domain%x(1)%global%begin = min(domain%x(1)%global%begin, msg(2))
domain%x(1)%global%end = max(domain%x(1)%global%end, msg(3))
domain%y(1)%global%begin = min(domain%y(1)%global%begin, msg(4))
domain%y(1)%global%end = max(domain%y(1)%global%end, msg(5))
endif
listpos = listpos + 1
if( debug )write( errunit,* )'PE ', mpp_pe(), 'received domain from PE ', msg(1), 'is,ie,js,je=', msg(2:5)
end if
end do
end subroutine mpp_broadcast_domain_1
!##############################################################################
subroutine mpp_broadcast_domain_2( domain_in, domain_out )
!broadcast domain (useful only outside the context of its own pelist)
type(domain2D), intent(in) :: domain_in
type(domain2D), intent(inout) :: domain_out
integer, allocatable :: pes(:)
logical :: native !true if I'm on the pelist of this domain
integer :: listsize, listpos
integer :: n
integer, dimension(12) :: msg, info !pe and compute domain of each item in list
integer :: errunit, npes_in, npes_out, pstart, pend
errunit = stderr()
if( .NOT.module_is_initialized ) &
call mpp_error( FATAL, 'MPP_BROADCAST_DOMAIN_2: You must first call mpp_domains_init.' )
!get the current pelist
allocate( pes(0:mpp_npes()-1) )
call mpp_get_current_pelist(pes)
! domain_in must be initialized
if( .not. ASSOCIATED(domain_in%list) ) then
call mpp_error( FATAL, 'MPP_BROADCAST_DOMAIN_2: domain_in is not initialized')
endif
if( ASSOCIATED(domain_out%list) ) then
call mpp_error( FATAL, 'MPP_BROADCAST_DOMAIN_2: domain_out is already initialized')
endif
npes_in = size(domain_in%list(:))
if( npes_in == mpp_npes() ) then
call mpp_error( FATAL, 'MPP_BROADCAST_DOMAIN_2: size(domain_in%list(:)) == mpp_npes()')
endif
npes_out = mpp_npes() - npes_in
!initialize domain_out%list and set null values in message
allocate( domain_out%list(0:npes_out-1) )
domain_out%pe = NULL_PE
domain_out%pos = -1
allocate(domain_out%x(1), domain_out%y(1), domain_out%tile_id(1))
do n = 0, npes_out-1
allocate(domain_out%list(n)%x(1), domain_out%list(n)%y(1), domain_out%list(n)%tile_id(1) )
end do
domain_out%x%compute%begin = 1
domain_out%x%compute%end = -1
domain_out%y%compute%begin = 1
domain_out%y%compute%end = -1
domain_out%x%global %begin = -1
domain_out%x%global %end = -1
domain_out%y%global %begin = -1
domain_out%y%global %end = -1
domain_out%tile_id = -1
domain_out%whalo = -1
domain_out%ehalo = -1
domain_out%shalo = -1
domain_out%nhalo = -1
domain_out%symmetry = .false.
!initialize values in info
info(1) = domain_in%pe
call mpp_get_compute_domain( domain_in, info(2), info(3), info(4), info(5) )
info(6) = domain_in%tile_id(1)
info(7) = domain_in%whalo
info(8) = domain_in%ehalo
info(9) = domain_in%shalo
info(10)= domain_in%nhalo
if(domain_in%symmetry) then
info(11) = 1
else
info(11) = 0
endif
info(12) = domain_in%ntiles
!broadcast your info across current pelist and unpack if needed
if( domain_in%list(0)%pe == mpp_root_pe() ) then
pstart = npes_in
pend = mpp_npes()-1
else
pstart = 0
pend = npes_out-1
endif
do n = 0,mpp_npes()-1
msg = info
if( mpp_pe().EQ.pes(n) .AND. debug )write( errunit,* )'PE ', mpp_pe(), 'broadcasting msg ', msg
call mpp_broadcast( msg, 12, pes(n) )
!--- pack if from other domain
if( n .GE. pstart .AND. n .LE. pend )then
listpos = n - pstart
domain_out%list(listpos)%pe = msg(1)
domain_out%list(listpos)%x%compute%begin = msg(2)
domain_out%list(listpos)%x%compute%end = msg(3)
domain_out%list(listpos)%y%compute%begin = msg(4)
domain_out%list(listpos)%y%compute%end = msg(5)
domain_out%list(listpos)%tile_id(1) = msg(6)
if(domain_out%x(1)%global%begin < 0) then
domain_out%x(1)%global%begin = msg(2)
domain_out%x(1)%global%end = msg(3)
domain_out%y(1)%global%begin = msg(4)
domain_out%y(1)%global%end = msg(5)
domain_out%whalo = msg(7)
domain_out%ehalo = msg(8)
domain_out%shalo = msg(9)
domain_out%nhalo = msg(10)
if(msg(11) == 1) then
domain_out%symmetry = .true.
else
domain_out%symmetry = .false.
endif
domain_out%ntiles = msg(12)
else
domain_out%x(1)%global%begin = min(domain_out%x(1)%global%begin, msg(2))
domain_out%x(1)%global%end = max(domain_out%x(1)%global%end, msg(3))
domain_out%y(1)%global%begin = min(domain_out%y(1)%global%begin, msg(4))
domain_out%y(1)%global%end = max(domain_out%y(1)%global%end, msg(5))
endif
if( debug )write( errunit,* )'PE ', mpp_pe(), 'received domain from PE ', msg(1), 'is,ie,js,je=', msg(2:5)
end if
end do
end subroutine mpp_broadcast_domain_2
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_UPDATE_DOMAINS: fill halos for 2D decomposition !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# 1 "../mpp/include/mpp_update_domains2D.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_update_domain2D_r8_2D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 2D field whose computational domains have been computed
real(8), intent(inout) :: field(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
return
end subroutine mpp_update_domain2D_r8_2D
subroutine mpp_update_domain2D_r8_3D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 3D field whose computational domains have been computed
real(8), intent(inout) :: field(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer :: update_position, update_whalo, update_ehalo, update_shalo, update_nhalo, ntile
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES),save :: f_addrs=-9999
integer :: tile, max_ntile
character(len=3) :: text
logical :: set_mismatch, is_complete
logical :: do_update
integer, save :: isize=0, jsize=0, ke=0, l_size=0, list=0
integer, save :: pos, whalosz, ehalosz, shalosz, nhalosz
real(8) :: d_type
type(overlapSpec), pointer :: update => NULL()
type(overlapSpec), pointer :: check => NULL()
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
!--- when there is NINETY or MINUS_NINETY rotation for some contact, the salar data can not be on E or N-cell,
if(present(position)) then
if(domain%rotated_ninety .AND. ( position == EAST .OR. position == NORTH ) ) &
call mpp_error(FATAL, 'MPP_UPDATE_3D: hen there is NINETY or MINUS_NINETY rotation, ' // &
'can not use scalar version update_domain for data on E or N-cell' )
end if
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_UPDATE_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
update_position = CENTER
if(present(position)) update_position = position
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); ke = size(field,3); pos = update_position
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize /= size(field,1))
set_mismatch = set_mismatch .OR. (jsize /= size(field,2))
set_mismatch = set_mismatch .OR. (ke /= size(field,3))
set_mismatch = set_mismatch .OR. (update_position /= pos)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_3D: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
if(debug_update_level .NE. NO_CHECK) then
check => search_check_overlap(domain, update_position)
if(ASSOCIATED(check) ) then
call mpp_do_check(f_addrs(1:l_size,1:ntile), domain, check, d_type, ke, flags, name )
endif
endif
update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
!call mpp_do_update( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke, &
! b_addrs(1:l_size,1:ntile), bsize, flags)
if ( PRESENT ( flags ) ) then
call mpp_do_update( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke, flags )
else
call mpp_do_update( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke )
endif
end if
l_size=0; f_addrs=-9999; isize=0; jsize=0; ke=0
endif
return
end subroutine mpp_update_domain2D_r8_3D
subroutine mpp_update_domain2D_r8_4D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 4D field whose computational domains have been computed
real(8), intent(inout) :: field(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domain2D_r8_4D
subroutine mpp_update_domain2D_r8_5D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 5D field whose computational domains have been computed
real(8), intent(inout) :: field(:,:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
return
end subroutine mpp_update_domain2D_r8_5D
subroutine mpp_redistribute_r8_2D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
real(8), intent(in) :: field_in (:,:)
real(8), intent(out) :: field_out(:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
real(8) :: field3D_in (size(field_in, 1),size(field_in, 2),1)
real(8) :: field3D_out(size(field_out,1),size(field_out,2),1)
type(DomainCommunicator2D),pointer,optional :: dc_handle
pointer( ptr_in, field3D_in )
pointer( ptr_out, field3D_out )
ptr_in = 0
ptr_out = 0
if(domain_in%initialized) ptr_in = LOC(field_in )
if(domain_out%initialized) ptr_out = LOC(field_out)
call mpp_redistribute( domain_in, field3D_in, domain_out, field3D_out, complete, free, list_size, dc_handle, position )
return
end subroutine mpp_redistribute_r8_2D
subroutine mpp_redistribute_r8_3D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
real(8), intent(in) :: field_in (:,:,:)
real(8), intent(out) :: field_out(:,:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
type(DomainCommunicator2D),pointer,optional :: dc_handle
type(DomainCommunicator2D),pointer,save :: d_comm =>NULL()
logical :: do_redist,free_comm
integer :: lsize
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: l_addrs_in=-9999, l_addrs_out=-9999
integer, save :: isize_in=0,jsize_in=0,ke_in=0,l_size=0
integer, save :: isize_out=0,jsize_out=0,ke_out=0
logical :: set_mismatch
integer :: ke
character(len=2) :: text
real(8) :: d_type
integer(8) :: floc_in, floc_out
floc_in = 0
floc_out = 0
if(domain_in%initialized) floc_in = LOC(field_in)
if(domain_out%initialized) floc_out = LOC(field_out)
if(present(position)) then
if(position .NE. CENTER) call mpp_error( FATAL, &
'MPP_REDISTRIBUTE_3Dold_: only position = CENTER is implemented, contact author')
endif
do_redist=.true.; if(PRESENT(complete))do_redist=complete
free_comm=.false.; if(PRESENT(free))free_comm=free
if(free_comm)then
l_addrs_in(1) = floc_in; l_addrs_out(1) = floc_out
if(l_addrs_out(1)>0)then
ke = size(field_out,3)
else
ke = size(field_in,3)
end if
lsize=1; if(PRESENT(list_size))lsize=list_size
call mpp_redistribute_free_comm(domain_in,l_addrs_in(1),domain_out,l_addrs_out(1),ke,lsize)
else
l_size = l_size+1
if(l_size > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_REDISTRIBUTE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group redistribute.' )
end if
l_addrs_in(l_size) = floc_in; l_addrs_out(l_size) = floc_out
if(l_size == 1)then
if(l_addrs_in(l_size) > 0)then
isize_in=size(field_in,1); jsize_in=size(field_in,2); ke_in = size(field_in,3)
end if
if(l_addrs_out(l_size) > 0)then
isize_out=size(field_out,1); jsize_out=size(field_out,2); ke_out = size(field_out,3)
endif
else
set_mismatch = .false.
set_mismatch = l_addrs_in(l_size) == 0 .AND. l_addrs_in(l_size-1) /= 0
set_mismatch = set_mismatch .OR. (l_addrs_in(l_size) > 0 .AND. l_addrs_in(l_size-1) == 0)
set_mismatch = set_mismatch .OR. (l_addrs_out(l_size) == 0 .AND. l_addrs_out(l_size-1) /= 0)
set_mismatch = set_mismatch .OR. (l_addrs_out(l_size) > 0 .AND. l_addrs_out(l_size-1) == 0)
if(l_addrs_in(l_size) > 0)then
set_mismatch = set_mismatch .OR. (isize_in /= size(field_in,1))
set_mismatch = set_mismatch .OR. (jsize_in /= size(field_in,2))
set_mismatch = set_mismatch .OR. (ke_in /= size(field_in,3))
endif
if(l_addrs_out(l_size) > 0)then
set_mismatch = set_mismatch .OR. (isize_out /= size(field_out,1))
set_mismatch = set_mismatch .OR. (jsize_out /= size(field_out,2))
set_mismatch = set_mismatch .OR. (ke_out /= size(field_out,3))
endif
if(set_mismatch)then
write( text,'(i2)' ) l_size
call mpp_error(FATAL,'MPP_REDISTRIBUTE_3D: Incompatible field at count '//text//' for group redistribute.' )
endif
endif
if(do_redist)then
if(PRESENT(dc_handle))d_comm =>dc_handle ! User has kept pointer to d_comm
if(.not.ASSOCIATED(d_comm))then ! d_comm needs initialization or lookup
d_comm =>mpp_redistribute_init_comm(domain_in,l_addrs_in(1:l_size),domain_out,l_addrs_out(1:l_size), &
isize_in,jsize_in,ke_in,isize_out,jsize_out,ke_out)
if(PRESENT(dc_handle))dc_handle =>d_comm ! User wants to keep pointer to d_comm
endif
call mpp_do_redistribute( l_addrs_in(1:l_size), l_addrs_out(1:l_size), d_comm, d_type )
l_size=0; l_addrs_in=-9999; l_addrs_out=-9999
isize_in=0; jsize_in=0; ke_in=0
isize_out=0; jsize_out=0; ke_out=0
d_comm =>NULL()
endif
endif
end subroutine mpp_redistribute_r8_3D
subroutine mpp_redistribute_r8_4D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
real(8), intent(in) :: field_in (:,:,:,:)
real(8), intent(out) :: field_out(:,:,:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
real(8) :: field3D_in (size(field_in, 1),size(field_in, 2),size(field_in ,3)*size(field_in ,4))
real(8) :: field3D_out(size(field_out,1),size(field_out,2),size(field_out,3)*size(field_out,4))
type(DomainCommunicator2D),pointer,optional :: dc_handle
pointer( ptr_in, field3D_in )
pointer( ptr_out, field3D_out )
ptr_in = 0
ptr_out = 0
if(domain_in%initialized) ptr_in = LOC(field_in )
if(domain_out%initialized) ptr_out = LOC(field_out)
call mpp_redistribute( domain_in, field3D_in, domain_out, field3D_out, complete, free, list_size, dc_handle, position )
return
end subroutine mpp_redistribute_r8_4D
subroutine mpp_redistribute_r8_5D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
real(8), intent(in) :: field_in (:,:,:,:,:)
real(8), intent(out) :: field_out(:,:,:,:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
real(8) :: field3D_in (size(field_in, 1),size(field_in, 2),size(field_in ,3)*size(field_in ,4)*size(field_in ,5))
real(8) :: field3D_out(size(field_out,1),size(field_out,2),size(field_out,3)*size(field_out,4)*size(field_out,5))
type(DomainCommunicator2D),pointer,optional :: dc_handle
pointer( ptr_in, field3D_in )
pointer( ptr_out, field3D_out )
ptr_in = 0
ptr_out = 0
if(domain_in%initialized) ptr_in = LOC(field_in )
if(domain_out%initialized) ptr_out = LOC(field_out)
call mpp_redistribute( domain_in, field3D_in, domain_out, field3D_out, complete, free, list_size, dc_handle, position )
return
end subroutine mpp_redistribute_r8_5D
! is set to false for real(8) integer.
!vector fields
subroutine mpp_update_domain2D_r8_2Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 2D field whose computational domains have been computed
real(8), intent(inout) :: fieldx(:,:), fieldy(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3Dx(size(fieldx,1),size(fieldx,2),1)
real(8) :: field3Dy(size(fieldy,1),size(fieldy,2),1)
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_update_domains( field3Dx, field3Dy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domain2D_r8_2Dv
subroutine mpp_update_domain2D_r8_3Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 3D field whose computational domains have been computed
real(8), intent(inout) :: fieldx(:,:,:), fieldy(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo, ntile
integer :: grid_offset_type
logical :: exchange_uv
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES),save :: f_addrsx=-9999, f_addrsy=-9999
logical :: do_update, is_complete
integer, save :: isize(2)=0,jsize(2)=0,ke=0,l_size=0, offset_type=0, list=0
integer, save :: whalosz, ehalosz, shalosz, nhalosz
integer :: tile, max_ntile
integer :: position_x, position_y
logical :: set_mismatch
character(len=3) :: text
real(8) :: d_type
type(overlapSpec), pointer :: updatex => NULL()
type(overlapSpec), pointer :: updatey => NULL()
type(overlapSpec), pointer :: checkx => NULL()
type(overlapSpec), pointer :: checky => NULL()
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_UPDATE_3D_V: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_UPDATE_3D_V: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_UPDATE_3D_V: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_UPDATE_3D_V: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
grid_offset_type = AGRID
if( PRESENT(gridtype) ) grid_offset_type = gridtype
exchange_uv = .false.
if(grid_offset_type == DGRID_NE) then
exchange_uv = .true.
grid_offset_type = CGRID_NE
else if( grid_offset_type == DGRID_SW ) then
exchange_uv = .true.
grid_offset_type = CGRID_SW
end if
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_UPDATE_3D_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D_V: "// &
"optional argument tile_count should be present when number of tiles on some pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_3D_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
if(list == 1 .AND. tile == 1)then
isize(1)=size(fieldx,1); jsize(1)=size(fieldx,2); ke = size(fieldx,3)
isize(2)=size(fieldy,1); jsize(2)=size(fieldy,2)
offset_type = grid_offset_type
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize(1) /= size(fieldx,1))
set_mismatch = set_mismatch .OR. (jsize(1) /= size(fieldx,2))
set_mismatch = set_mismatch .OR. (ke /= size(fieldx,3))
set_mismatch = set_mismatch .OR. (isize(2) /= size(fieldy,1))
set_mismatch = set_mismatch .OR. (jsize(2) /= size(fieldy,2))
set_mismatch = set_mismatch .OR. (ke /= size(fieldy,3))
set_mismatch = set_mismatch .OR. (grid_offset_type /= offset_type)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_3D_V: Incompatible field at count '//text//' for group vector update.' )
end if
end if
if(is_complete) then
l_size = list
list = 0
end if
if(do_update)then
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case default
call mpp_error(FATAL, "mpp_update_domains2D.h: invalid value of grid_offset_type")
end select
if(debug_update_level .NE. NO_CHECK) then
checkx => search_check_overlap(domain, position_x)
checky => search_check_overlap(domain, position_y)
if(ASSOCIATED(checkx)) then
if(exchange_uv) then
call mpp_do_check(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, &
checky, checkx, d_type, ke, flags, name)
else
call mpp_do_check(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, &
checkx, checky, d_type, ke, flags, name)
end if
endif
endif
updatex => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_x)
updatey => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_y)
if(exchange_uv) then
call mpp_do_update(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, updatey, updatex, &
d_type,ke, grid_offset_type, flags)
else
call mpp_do_update(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, updatex, updatey, &
d_type,ke,grid_offset_type, flags)
end if
end if
l_size=0; f_addrsx=-9999; f_addrsy=-9999; isize=0; jsize=0; ke=0
end if
return
end subroutine mpp_update_domain2D_r8_3Dv
subroutine mpp_update_domain2D_r8_4Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 4D field whose computational domains have been computed
real(8), intent(inout) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4))
real(8) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_update_domains( field3Dx, field3Dy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domain2D_r8_4Dv
subroutine mpp_update_domain2D_r8_5Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 5D field whose computational domains have been computed
real(8), intent(inout) :: fieldx(:,:,:,:,:), fieldy(:,:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4)*size(fieldx,5))
real(8) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4)*size(fieldy,5))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_update_domains( field3Dx, field3Dy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domain2D_r8_5Dv
# 775 "../mpp/include/mpp_domains_misc.inc" 2
# 797
# 1 "../mpp/include/mpp_update_domains2D.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_update_domain2D_i8_2D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 2D field whose computational domains have been computed
integer(8), intent(inout) :: field(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(8) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
return
end subroutine mpp_update_domain2D_i8_2D
subroutine mpp_update_domain2D_i8_3D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(inout) :: field(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer :: update_position, update_whalo, update_ehalo, update_shalo, update_nhalo, ntile
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES),save :: f_addrs=-9999
integer :: tile, max_ntile
character(len=3) :: text
logical :: set_mismatch, is_complete
logical :: do_update
integer, save :: isize=0, jsize=0, ke=0, l_size=0, list=0
integer, save :: pos, whalosz, ehalosz, shalosz, nhalosz
integer(8) :: d_type
type(overlapSpec), pointer :: update => NULL()
type(overlapSpec), pointer :: check => NULL()
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
!--- when there is NINETY or MINUS_NINETY rotation for some contact, the salar data can not be on E or N-cell,
if(present(position)) then
if(domain%rotated_ninety .AND. ( position == EAST .OR. position == NORTH ) ) &
call mpp_error(FATAL, 'MPP_UPDATE_3D: hen there is NINETY or MINUS_NINETY rotation, ' // &
'can not use scalar version update_domain for data on E or N-cell' )
end if
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_UPDATE_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
update_position = CENTER
if(present(position)) update_position = position
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); ke = size(field,3); pos = update_position
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize /= size(field,1))
set_mismatch = set_mismatch .OR. (jsize /= size(field,2))
set_mismatch = set_mismatch .OR. (ke /= size(field,3))
set_mismatch = set_mismatch .OR. (update_position /= pos)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_3D: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
if(debug_update_level .NE. NO_CHECK) then
check => search_check_overlap(domain, update_position)
if(ASSOCIATED(check) ) then
call mpp_do_check(f_addrs(1:l_size,1:ntile), domain, check, d_type, ke, flags, name )
endif
endif
update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
!call mpp_do_update( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke, &
! b_addrs(1:l_size,1:ntile), bsize, flags)
if ( PRESENT ( flags ) ) then
call mpp_do_update( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke, flags )
else
call mpp_do_update( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke )
endif
end if
l_size=0; f_addrs=-9999; isize=0; jsize=0; ke=0
endif
return
end subroutine mpp_update_domain2D_i8_3D
subroutine mpp_update_domain2D_i8_4D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 4D field whose computational domains have been computed
integer(8), intent(inout) :: field(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domain2D_i8_4D
subroutine mpp_update_domain2D_i8_5D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 5D field whose computational domains have been computed
integer(8), intent(inout) :: field(:,:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
return
end subroutine mpp_update_domain2D_i8_5D
subroutine mpp_redistribute_i8_2D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
integer(8), intent(in) :: field_in (:,:)
integer(8), intent(out) :: field_out(:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
integer(8) :: field3D_in (size(field_in, 1),size(field_in, 2),1)
integer(8) :: field3D_out(size(field_out,1),size(field_out,2),1)
type(DomainCommunicator2D),pointer,optional :: dc_handle
pointer( ptr_in, field3D_in )
pointer( ptr_out, field3D_out )
ptr_in = 0
ptr_out = 0
if(domain_in%initialized) ptr_in = LOC(field_in )
if(domain_out%initialized) ptr_out = LOC(field_out)
call mpp_redistribute( domain_in, field3D_in, domain_out, field3D_out, complete, free, list_size, dc_handle, position )
return
end subroutine mpp_redistribute_i8_2D
subroutine mpp_redistribute_i8_3D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
integer(8), intent(in) :: field_in (:,:,:)
integer(8), intent(out) :: field_out(:,:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
type(DomainCommunicator2D),pointer,optional :: dc_handle
type(DomainCommunicator2D),pointer,save :: d_comm =>NULL()
logical :: do_redist,free_comm
integer :: lsize
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: l_addrs_in=-9999, l_addrs_out=-9999
integer, save :: isize_in=0,jsize_in=0,ke_in=0,l_size=0
integer, save :: isize_out=0,jsize_out=0,ke_out=0
logical :: set_mismatch
integer :: ke
character(len=2) :: text
integer(8) :: d_type
integer(8) :: floc_in, floc_out
floc_in = 0
floc_out = 0
if(domain_in%initialized) floc_in = LOC(field_in)
if(domain_out%initialized) floc_out = LOC(field_out)
if(present(position)) then
if(position .NE. CENTER) call mpp_error( FATAL, &
'MPP_REDISTRIBUTE_3Dold_: only position = CENTER is implemented, contact author')
endif
do_redist=.true.; if(PRESENT(complete))do_redist=complete
free_comm=.false.; if(PRESENT(free))free_comm=free
if(free_comm)then
l_addrs_in(1) = floc_in; l_addrs_out(1) = floc_out
if(l_addrs_out(1)>0)then
ke = size(field_out,3)
else
ke = size(field_in,3)
end if
lsize=1; if(PRESENT(list_size))lsize=list_size
call mpp_redistribute_free_comm(domain_in,l_addrs_in(1),domain_out,l_addrs_out(1),ke,lsize)
else
l_size = l_size+1
if(l_size > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_REDISTRIBUTE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group redistribute.' )
end if
l_addrs_in(l_size) = floc_in; l_addrs_out(l_size) = floc_out
if(l_size == 1)then
if(l_addrs_in(l_size) > 0)then
isize_in=size(field_in,1); jsize_in=size(field_in,2); ke_in = size(field_in,3)
end if
if(l_addrs_out(l_size) > 0)then
isize_out=size(field_out,1); jsize_out=size(field_out,2); ke_out = size(field_out,3)
endif
else
set_mismatch = .false.
set_mismatch = l_addrs_in(l_size) == 0 .AND. l_addrs_in(l_size-1) /= 0
set_mismatch = set_mismatch .OR. (l_addrs_in(l_size) > 0 .AND. l_addrs_in(l_size-1) == 0)
set_mismatch = set_mismatch .OR. (l_addrs_out(l_size) == 0 .AND. l_addrs_out(l_size-1) /= 0)
set_mismatch = set_mismatch .OR. (l_addrs_out(l_size) > 0 .AND. l_addrs_out(l_size-1) == 0)
if(l_addrs_in(l_size) > 0)then
set_mismatch = set_mismatch .OR. (isize_in /= size(field_in,1))
set_mismatch = set_mismatch .OR. (jsize_in /= size(field_in,2))
set_mismatch = set_mismatch .OR. (ke_in /= size(field_in,3))
endif
if(l_addrs_out(l_size) > 0)then
set_mismatch = set_mismatch .OR. (isize_out /= size(field_out,1))
set_mismatch = set_mismatch .OR. (jsize_out /= size(field_out,2))
set_mismatch = set_mismatch .OR. (ke_out /= size(field_out,3))
endif
if(set_mismatch)then
write( text,'(i2)' ) l_size
call mpp_error(FATAL,'MPP_REDISTRIBUTE_3D: Incompatible field at count '//text//' for group redistribute.' )
endif
endif
if(do_redist)then
if(PRESENT(dc_handle))d_comm =>dc_handle ! User has kept pointer to d_comm
if(.not.ASSOCIATED(d_comm))then ! d_comm needs initialization or lookup
d_comm =>mpp_redistribute_init_comm(domain_in,l_addrs_in(1:l_size),domain_out,l_addrs_out(1:l_size), &
isize_in,jsize_in,ke_in,isize_out,jsize_out,ke_out)
if(PRESENT(dc_handle))dc_handle =>d_comm ! User wants to keep pointer to d_comm
endif
call mpp_do_redistribute( l_addrs_in(1:l_size), l_addrs_out(1:l_size), d_comm, d_type )
l_size=0; l_addrs_in=-9999; l_addrs_out=-9999
isize_in=0; jsize_in=0; ke_in=0
isize_out=0; jsize_out=0; ke_out=0
d_comm =>NULL()
endif
endif
end subroutine mpp_redistribute_i8_3D
subroutine mpp_redistribute_i8_4D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
integer(8), intent(in) :: field_in (:,:,:,:)
integer(8), intent(out) :: field_out(:,:,:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
integer(8) :: field3D_in (size(field_in, 1),size(field_in, 2),size(field_in ,3)*size(field_in ,4))
integer(8) :: field3D_out(size(field_out,1),size(field_out,2),size(field_out,3)*size(field_out,4))
type(DomainCommunicator2D),pointer,optional :: dc_handle
pointer( ptr_in, field3D_in )
pointer( ptr_out, field3D_out )
ptr_in = 0
ptr_out = 0
if(domain_in%initialized) ptr_in = LOC(field_in )
if(domain_out%initialized) ptr_out = LOC(field_out)
call mpp_redistribute( domain_in, field3D_in, domain_out, field3D_out, complete, free, list_size, dc_handle, position )
return
end subroutine mpp_redistribute_i8_4D
subroutine mpp_redistribute_i8_5D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
integer(8), intent(in) :: field_in (:,:,:,:,:)
integer(8), intent(out) :: field_out(:,:,:,:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
integer(8) :: field3D_in (size(field_in, 1),size(field_in, 2),size(field_in ,3)*size(field_in ,4)*size(field_in ,5))
integer(8) :: field3D_out(size(field_out,1),size(field_out,2),size(field_out,3)*size(field_out,4)*size(field_out,5))
type(DomainCommunicator2D),pointer,optional :: dc_handle
pointer( ptr_in, field3D_in )
pointer( ptr_out, field3D_out )
ptr_in = 0
ptr_out = 0
if(domain_in%initialized) ptr_in = LOC(field_in )
if(domain_out%initialized) ptr_out = LOC(field_out)
call mpp_redistribute( domain_in, field3D_in, domain_out, field3D_out, complete, free, list_size, dc_handle, position )
return
end subroutine mpp_redistribute_i8_5D
# 623
# 819 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_update_domains2D.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_update_domain2D_r4_2D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 2D field whose computational domains have been computed
real(4), intent(inout) :: field(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
return
end subroutine mpp_update_domain2D_r4_2D
subroutine mpp_update_domain2D_r4_3D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 3D field whose computational domains have been computed
real(4), intent(inout) :: field(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer :: update_position, update_whalo, update_ehalo, update_shalo, update_nhalo, ntile
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES),save :: f_addrs=-9999
integer :: tile, max_ntile
character(len=3) :: text
logical :: set_mismatch, is_complete
logical :: do_update
integer, save :: isize=0, jsize=0, ke=0, l_size=0, list=0
integer, save :: pos, whalosz, ehalosz, shalosz, nhalosz
real(4) :: d_type
type(overlapSpec), pointer :: update => NULL()
type(overlapSpec), pointer :: check => NULL()
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
!--- when there is NINETY or MINUS_NINETY rotation for some contact, the salar data can not be on E or N-cell,
if(present(position)) then
if(domain%rotated_ninety .AND. ( position == EAST .OR. position == NORTH ) ) &
call mpp_error(FATAL, 'MPP_UPDATE_3D: hen there is NINETY or MINUS_NINETY rotation, ' // &
'can not use scalar version update_domain for data on E or N-cell' )
end if
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_UPDATE_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
update_position = CENTER
if(present(position)) update_position = position
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); ke = size(field,3); pos = update_position
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize /= size(field,1))
set_mismatch = set_mismatch .OR. (jsize /= size(field,2))
set_mismatch = set_mismatch .OR. (ke /= size(field,3))
set_mismatch = set_mismatch .OR. (update_position /= pos)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_3D: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
if(debug_update_level .NE. NO_CHECK) then
check => search_check_overlap(domain, update_position)
if(ASSOCIATED(check) ) then
call mpp_do_check(f_addrs(1:l_size,1:ntile), domain, check, d_type, ke, flags, name )
endif
endif
update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
!call mpp_do_update( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke, &
! b_addrs(1:l_size,1:ntile), bsize, flags)
if ( PRESENT ( flags ) ) then
call mpp_do_update( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke, flags )
else
call mpp_do_update( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke )
endif
end if
l_size=0; f_addrs=-9999; isize=0; jsize=0; ke=0
endif
return
end subroutine mpp_update_domain2D_r4_3D
subroutine mpp_update_domain2D_r4_4D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 4D field whose computational domains have been computed
real(4), intent(inout) :: field(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domain2D_r4_4D
subroutine mpp_update_domain2D_r4_5D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 5D field whose computational domains have been computed
real(4), intent(inout) :: field(:,:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
return
end subroutine mpp_update_domain2D_r4_5D
subroutine mpp_redistribute_r4_2D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
real(4), intent(in) :: field_in (:,:)
real(4), intent(out) :: field_out(:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
real(4) :: field3D_in (size(field_in, 1),size(field_in, 2),1)
real(4) :: field3D_out(size(field_out,1),size(field_out,2),1)
type(DomainCommunicator2D),pointer,optional :: dc_handle
pointer( ptr_in, field3D_in )
pointer( ptr_out, field3D_out )
ptr_in = 0
ptr_out = 0
if(domain_in%initialized) ptr_in = LOC(field_in )
if(domain_out%initialized) ptr_out = LOC(field_out)
call mpp_redistribute( domain_in, field3D_in, domain_out, field3D_out, complete, free, list_size, dc_handle, position )
return
end subroutine mpp_redistribute_r4_2D
subroutine mpp_redistribute_r4_3D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
real(4), intent(in) :: field_in (:,:,:)
real(4), intent(out) :: field_out(:,:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
type(DomainCommunicator2D),pointer,optional :: dc_handle
type(DomainCommunicator2D),pointer,save :: d_comm =>NULL()
logical :: do_redist,free_comm
integer :: lsize
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: l_addrs_in=-9999, l_addrs_out=-9999
integer, save :: isize_in=0,jsize_in=0,ke_in=0,l_size=0
integer, save :: isize_out=0,jsize_out=0,ke_out=0
logical :: set_mismatch
integer :: ke
character(len=2) :: text
real(4) :: d_type
integer(8) :: floc_in, floc_out
floc_in = 0
floc_out = 0
if(domain_in%initialized) floc_in = LOC(field_in)
if(domain_out%initialized) floc_out = LOC(field_out)
if(present(position)) then
if(position .NE. CENTER) call mpp_error( FATAL, &
'MPP_REDISTRIBUTE_3Dold_: only position = CENTER is implemented, contact author')
endif
do_redist=.true.; if(PRESENT(complete))do_redist=complete
free_comm=.false.; if(PRESENT(free))free_comm=free
if(free_comm)then
l_addrs_in(1) = floc_in; l_addrs_out(1) = floc_out
if(l_addrs_out(1)>0)then
ke = size(field_out,3)
else
ke = size(field_in,3)
end if
lsize=1; if(PRESENT(list_size))lsize=list_size
call mpp_redistribute_free_comm(domain_in,l_addrs_in(1),domain_out,l_addrs_out(1),ke,lsize)
else
l_size = l_size+1
if(l_size > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_REDISTRIBUTE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group redistribute.' )
end if
l_addrs_in(l_size) = floc_in; l_addrs_out(l_size) = floc_out
if(l_size == 1)then
if(l_addrs_in(l_size) > 0)then
isize_in=size(field_in,1); jsize_in=size(field_in,2); ke_in = size(field_in,3)
end if
if(l_addrs_out(l_size) > 0)then
isize_out=size(field_out,1); jsize_out=size(field_out,2); ke_out = size(field_out,3)
endif
else
set_mismatch = .false.
set_mismatch = l_addrs_in(l_size) == 0 .AND. l_addrs_in(l_size-1) /= 0
set_mismatch = set_mismatch .OR. (l_addrs_in(l_size) > 0 .AND. l_addrs_in(l_size-1) == 0)
set_mismatch = set_mismatch .OR. (l_addrs_out(l_size) == 0 .AND. l_addrs_out(l_size-1) /= 0)
set_mismatch = set_mismatch .OR. (l_addrs_out(l_size) > 0 .AND. l_addrs_out(l_size-1) == 0)
if(l_addrs_in(l_size) > 0)then
set_mismatch = set_mismatch .OR. (isize_in /= size(field_in,1))
set_mismatch = set_mismatch .OR. (jsize_in /= size(field_in,2))
set_mismatch = set_mismatch .OR. (ke_in /= size(field_in,3))
endif
if(l_addrs_out(l_size) > 0)then
set_mismatch = set_mismatch .OR. (isize_out /= size(field_out,1))
set_mismatch = set_mismatch .OR. (jsize_out /= size(field_out,2))
set_mismatch = set_mismatch .OR. (ke_out /= size(field_out,3))
endif
if(set_mismatch)then
write( text,'(i2)' ) l_size
call mpp_error(FATAL,'MPP_REDISTRIBUTE_3D: Incompatible field at count '//text//' for group redistribute.' )
endif
endif
if(do_redist)then
if(PRESENT(dc_handle))d_comm =>dc_handle ! User has kept pointer to d_comm
if(.not.ASSOCIATED(d_comm))then ! d_comm needs initialization or lookup
d_comm =>mpp_redistribute_init_comm(domain_in,l_addrs_in(1:l_size),domain_out,l_addrs_out(1:l_size), &
isize_in,jsize_in,ke_in,isize_out,jsize_out,ke_out)
if(PRESENT(dc_handle))dc_handle =>d_comm ! User wants to keep pointer to d_comm
endif
call mpp_do_redistribute( l_addrs_in(1:l_size), l_addrs_out(1:l_size), d_comm, d_type )
l_size=0; l_addrs_in=-9999; l_addrs_out=-9999
isize_in=0; jsize_in=0; ke_in=0
isize_out=0; jsize_out=0; ke_out=0
d_comm =>NULL()
endif
endif
end subroutine mpp_redistribute_r4_3D
subroutine mpp_redistribute_r4_4D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
real(4), intent(in) :: field_in (:,:,:,:)
real(4), intent(out) :: field_out(:,:,:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
real(4) :: field3D_in (size(field_in, 1),size(field_in, 2),size(field_in ,3)*size(field_in ,4))
real(4) :: field3D_out(size(field_out,1),size(field_out,2),size(field_out,3)*size(field_out,4))
type(DomainCommunicator2D),pointer,optional :: dc_handle
pointer( ptr_in, field3D_in )
pointer( ptr_out, field3D_out )
ptr_in = 0
ptr_out = 0
if(domain_in%initialized) ptr_in = LOC(field_in )
if(domain_out%initialized) ptr_out = LOC(field_out)
call mpp_redistribute( domain_in, field3D_in, domain_out, field3D_out, complete, free, list_size, dc_handle, position )
return
end subroutine mpp_redistribute_r4_4D
subroutine mpp_redistribute_r4_5D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
real(4), intent(in) :: field_in (:,:,:,:,:)
real(4), intent(out) :: field_out(:,:,:,:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
real(4) :: field3D_in (size(field_in, 1),size(field_in, 2),size(field_in ,3)*size(field_in ,4)*size(field_in ,5))
real(4) :: field3D_out(size(field_out,1),size(field_out,2),size(field_out,3)*size(field_out,4)*size(field_out,5))
type(DomainCommunicator2D),pointer,optional :: dc_handle
pointer( ptr_in, field3D_in )
pointer( ptr_out, field3D_out )
ptr_in = 0
ptr_out = 0
if(domain_in%initialized) ptr_in = LOC(field_in )
if(domain_out%initialized) ptr_out = LOC(field_out)
call mpp_redistribute( domain_in, field3D_in, domain_out, field3D_out, complete, free, list_size, dc_handle, position )
return
end subroutine mpp_redistribute_r4_5D
! is set to false for real(4) integer.
!vector fields
subroutine mpp_update_domain2D_r4_2Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 2D field whose computational domains have been computed
real(4), intent(inout) :: fieldx(:,:), fieldy(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3Dx(size(fieldx,1),size(fieldx,2),1)
real(4) :: field3Dy(size(fieldy,1),size(fieldy,2),1)
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_update_domains( field3Dx, field3Dy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domain2D_r4_2Dv
subroutine mpp_update_domain2D_r4_3Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 3D field whose computational domains have been computed
real(4), intent(inout) :: fieldx(:,:,:), fieldy(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo, ntile
integer :: grid_offset_type
logical :: exchange_uv
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES),save :: f_addrsx=-9999, f_addrsy=-9999
logical :: do_update, is_complete
integer, save :: isize(2)=0,jsize(2)=0,ke=0,l_size=0, offset_type=0, list=0
integer, save :: whalosz, ehalosz, shalosz, nhalosz
integer :: tile, max_ntile
integer :: position_x, position_y
logical :: set_mismatch
character(len=3) :: text
real(4) :: d_type
type(overlapSpec), pointer :: updatex => NULL()
type(overlapSpec), pointer :: updatey => NULL()
type(overlapSpec), pointer :: checkx => NULL()
type(overlapSpec), pointer :: checky => NULL()
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_UPDATE_3D_V: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_UPDATE_3D_V: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_UPDATE_3D_V: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_UPDATE_3D_V: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
grid_offset_type = AGRID
if( PRESENT(gridtype) ) grid_offset_type = gridtype
exchange_uv = .false.
if(grid_offset_type == DGRID_NE) then
exchange_uv = .true.
grid_offset_type = CGRID_NE
else if( grid_offset_type == DGRID_SW ) then
exchange_uv = .true.
grid_offset_type = CGRID_SW
end if
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_UPDATE_3D_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D_V: "// &
"optional argument tile_count should be present when number of tiles on some pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_3D_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
if(list == 1 .AND. tile == 1)then
isize(1)=size(fieldx,1); jsize(1)=size(fieldx,2); ke = size(fieldx,3)
isize(2)=size(fieldy,1); jsize(2)=size(fieldy,2)
offset_type = grid_offset_type
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize(1) /= size(fieldx,1))
set_mismatch = set_mismatch .OR. (jsize(1) /= size(fieldx,2))
set_mismatch = set_mismatch .OR. (ke /= size(fieldx,3))
set_mismatch = set_mismatch .OR. (isize(2) /= size(fieldy,1))
set_mismatch = set_mismatch .OR. (jsize(2) /= size(fieldy,2))
set_mismatch = set_mismatch .OR. (ke /= size(fieldy,3))
set_mismatch = set_mismatch .OR. (grid_offset_type /= offset_type)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_3D_V: Incompatible field at count '//text//' for group vector update.' )
end if
end if
if(is_complete) then
l_size = list
list = 0
end if
if(do_update)then
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case default
call mpp_error(FATAL, "mpp_update_domains2D.h: invalid value of grid_offset_type")
end select
if(debug_update_level .NE. NO_CHECK) then
checkx => search_check_overlap(domain, position_x)
checky => search_check_overlap(domain, position_y)
if(ASSOCIATED(checkx)) then
if(exchange_uv) then
call mpp_do_check(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, &
checky, checkx, d_type, ke, flags, name)
else
call mpp_do_check(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, &
checkx, checky, d_type, ke, flags, name)
end if
endif
endif
updatex => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_x)
updatey => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_y)
if(exchange_uv) then
call mpp_do_update(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, updatey, updatex, &
d_type,ke, grid_offset_type, flags)
else
call mpp_do_update(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, updatex, updatey, &
d_type,ke,grid_offset_type, flags)
end if
end if
l_size=0; f_addrsx=-9999; f_addrsy=-9999; isize=0; jsize=0; ke=0
end if
return
end subroutine mpp_update_domain2D_r4_3Dv
subroutine mpp_update_domain2D_r4_4Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 4D field whose computational domains have been computed
real(4), intent(inout) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4))
real(4) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_update_domains( field3Dx, field3Dy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domain2D_r4_4Dv
subroutine mpp_update_domain2D_r4_5Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 5D field whose computational domains have been computed
real(4), intent(inout) :: fieldx(:,:,:,:,:), fieldy(:,:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4)*size(fieldx,5))
real(4) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4)*size(fieldy,5))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_update_domains( field3Dx, field3Dy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domain2D_r4_5Dv
# 853 "../mpp/include/mpp_domains_misc.inc" 2
# 876
# 1 "../mpp/include/mpp_update_domains2D.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_update_domain2D_i4_2D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 2D field whose computational domains have been computed
integer(4), intent(inout) :: field(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(4) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
return
end subroutine mpp_update_domain2D_i4_2D
subroutine mpp_update_domain2D_i4_3D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 3D field whose computational domains have been computed
integer(4), intent(inout) :: field(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer :: update_position, update_whalo, update_ehalo, update_shalo, update_nhalo, ntile
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES),save :: f_addrs=-9999
integer :: tile, max_ntile
character(len=3) :: text
logical :: set_mismatch, is_complete
logical :: do_update
integer, save :: isize=0, jsize=0, ke=0, l_size=0, list=0
integer, save :: pos, whalosz, ehalosz, shalosz, nhalosz
integer(4) :: d_type
type(overlapSpec), pointer :: update => NULL()
type(overlapSpec), pointer :: check => NULL()
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
!--- when there is NINETY or MINUS_NINETY rotation for some contact, the salar data can not be on E or N-cell,
if(present(position)) then
if(domain%rotated_ninety .AND. ( position == EAST .OR. position == NORTH ) ) &
call mpp_error(FATAL, 'MPP_UPDATE_3D: hen there is NINETY or MINUS_NINETY rotation, ' // &
'can not use scalar version update_domain for data on E or N-cell' )
end if
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_UPDATE_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
update_position = CENTER
if(present(position)) update_position = position
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); ke = size(field,3); pos = update_position
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize /= size(field,1))
set_mismatch = set_mismatch .OR. (jsize /= size(field,2))
set_mismatch = set_mismatch .OR. (ke /= size(field,3))
set_mismatch = set_mismatch .OR. (update_position /= pos)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_3D: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
if(debug_update_level .NE. NO_CHECK) then
check => search_check_overlap(domain, update_position)
if(ASSOCIATED(check) ) then
call mpp_do_check(f_addrs(1:l_size,1:ntile), domain, check, d_type, ke, flags, name )
endif
endif
update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
!call mpp_do_update( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke, &
! b_addrs(1:l_size,1:ntile), bsize, flags)
if ( PRESENT ( flags ) ) then
call mpp_do_update( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke, flags )
else
call mpp_do_update( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke )
endif
end if
l_size=0; f_addrs=-9999; isize=0; jsize=0; ke=0
endif
return
end subroutine mpp_update_domain2D_i4_3D
subroutine mpp_update_domain2D_i4_4D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 4D field whose computational domains have been computed
integer(4), intent(inout) :: field(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domain2D_i4_4D
subroutine mpp_update_domain2D_i4_5D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 5D field whose computational domains have been computed
integer(4), intent(inout) :: field(:,:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
return
end subroutine mpp_update_domain2D_i4_5D
subroutine mpp_redistribute_i4_2D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
integer(4), intent(in) :: field_in (:,:)
integer(4), intent(out) :: field_out(:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
integer(4) :: field3D_in (size(field_in, 1),size(field_in, 2),1)
integer(4) :: field3D_out(size(field_out,1),size(field_out,2),1)
type(DomainCommunicator2D),pointer,optional :: dc_handle
pointer( ptr_in, field3D_in )
pointer( ptr_out, field3D_out )
ptr_in = 0
ptr_out = 0
if(domain_in%initialized) ptr_in = LOC(field_in )
if(domain_out%initialized) ptr_out = LOC(field_out)
call mpp_redistribute( domain_in, field3D_in, domain_out, field3D_out, complete, free, list_size, dc_handle, position )
return
end subroutine mpp_redistribute_i4_2D
subroutine mpp_redistribute_i4_3D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
integer(4), intent(in) :: field_in (:,:,:)
integer(4), intent(out) :: field_out(:,:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
type(DomainCommunicator2D),pointer,optional :: dc_handle
type(DomainCommunicator2D),pointer,save :: d_comm =>NULL()
logical :: do_redist,free_comm
integer :: lsize
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: l_addrs_in=-9999, l_addrs_out=-9999
integer, save :: isize_in=0,jsize_in=0,ke_in=0,l_size=0
integer, save :: isize_out=0,jsize_out=0,ke_out=0
logical :: set_mismatch
integer :: ke
character(len=2) :: text
integer(4) :: d_type
integer(8) :: floc_in, floc_out
floc_in = 0
floc_out = 0
if(domain_in%initialized) floc_in = LOC(field_in)
if(domain_out%initialized) floc_out = LOC(field_out)
if(present(position)) then
if(position .NE. CENTER) call mpp_error( FATAL, &
'MPP_REDISTRIBUTE_3Dold_: only position = CENTER is implemented, contact author')
endif
do_redist=.true.; if(PRESENT(complete))do_redist=complete
free_comm=.false.; if(PRESENT(free))free_comm=free
if(free_comm)then
l_addrs_in(1) = floc_in; l_addrs_out(1) = floc_out
if(l_addrs_out(1)>0)then
ke = size(field_out,3)
else
ke = size(field_in,3)
end if
lsize=1; if(PRESENT(list_size))lsize=list_size
call mpp_redistribute_free_comm(domain_in,l_addrs_in(1),domain_out,l_addrs_out(1),ke,lsize)
else
l_size = l_size+1
if(l_size > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_REDISTRIBUTE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group redistribute.' )
end if
l_addrs_in(l_size) = floc_in; l_addrs_out(l_size) = floc_out
if(l_size == 1)then
if(l_addrs_in(l_size) > 0)then
isize_in=size(field_in,1); jsize_in=size(field_in,2); ke_in = size(field_in,3)
end if
if(l_addrs_out(l_size) > 0)then
isize_out=size(field_out,1); jsize_out=size(field_out,2); ke_out = size(field_out,3)
endif
else
set_mismatch = .false.
set_mismatch = l_addrs_in(l_size) == 0 .AND. l_addrs_in(l_size-1) /= 0
set_mismatch = set_mismatch .OR. (l_addrs_in(l_size) > 0 .AND. l_addrs_in(l_size-1) == 0)
set_mismatch = set_mismatch .OR. (l_addrs_out(l_size) == 0 .AND. l_addrs_out(l_size-1) /= 0)
set_mismatch = set_mismatch .OR. (l_addrs_out(l_size) > 0 .AND. l_addrs_out(l_size-1) == 0)
if(l_addrs_in(l_size) > 0)then
set_mismatch = set_mismatch .OR. (isize_in /= size(field_in,1))
set_mismatch = set_mismatch .OR. (jsize_in /= size(field_in,2))
set_mismatch = set_mismatch .OR. (ke_in /= size(field_in,3))
endif
if(l_addrs_out(l_size) > 0)then
set_mismatch = set_mismatch .OR. (isize_out /= size(field_out,1))
set_mismatch = set_mismatch .OR. (jsize_out /= size(field_out,2))
set_mismatch = set_mismatch .OR. (ke_out /= size(field_out,3))
endif
if(set_mismatch)then
write( text,'(i2)' ) l_size
call mpp_error(FATAL,'MPP_REDISTRIBUTE_3D: Incompatible field at count '//text//' for group redistribute.' )
endif
endif
if(do_redist)then
if(PRESENT(dc_handle))d_comm =>dc_handle ! User has kept pointer to d_comm
if(.not.ASSOCIATED(d_comm))then ! d_comm needs initialization or lookup
d_comm =>mpp_redistribute_init_comm(domain_in,l_addrs_in(1:l_size),domain_out,l_addrs_out(1:l_size), &
isize_in,jsize_in,ke_in,isize_out,jsize_out,ke_out)
if(PRESENT(dc_handle))dc_handle =>d_comm ! User wants to keep pointer to d_comm
endif
call mpp_do_redistribute( l_addrs_in(1:l_size), l_addrs_out(1:l_size), d_comm, d_type )
l_size=0; l_addrs_in=-9999; l_addrs_out=-9999
isize_in=0; jsize_in=0; ke_in=0
isize_out=0; jsize_out=0; ke_out=0
d_comm =>NULL()
endif
endif
end subroutine mpp_redistribute_i4_3D
subroutine mpp_redistribute_i4_4D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
integer(4), intent(in) :: field_in (:,:,:,:)
integer(4), intent(out) :: field_out(:,:,:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
integer(4) :: field3D_in (size(field_in, 1),size(field_in, 2),size(field_in ,3)*size(field_in ,4))
integer(4) :: field3D_out(size(field_out,1),size(field_out,2),size(field_out,3)*size(field_out,4))
type(DomainCommunicator2D),pointer,optional :: dc_handle
pointer( ptr_in, field3D_in )
pointer( ptr_out, field3D_out )
ptr_in = 0
ptr_out = 0
if(domain_in%initialized) ptr_in = LOC(field_in )
if(domain_out%initialized) ptr_out = LOC(field_out)
call mpp_redistribute( domain_in, field3D_in, domain_out, field3D_out, complete, free, list_size, dc_handle, position )
return
end subroutine mpp_redistribute_i4_4D
subroutine mpp_redistribute_i4_5D( domain_in, field_in, domain_out, field_out, complete, free, list_size, dc_handle, position )
type(domain2D), intent(in) :: domain_in, domain_out
integer(4), intent(in) :: field_in (:,:,:,:,:)
integer(4), intent(out) :: field_out(:,:,:,:,:)
logical, intent(in), optional :: complete, free
integer, intent(in), optional :: list_size
integer, intent(in), optional :: position
integer(4) :: field3D_in (size(field_in, 1),size(field_in, 2),size(field_in ,3)*size(field_in ,4)*size(field_in ,5))
integer(4) :: field3D_out(size(field_out,1),size(field_out,2),size(field_out,3)*size(field_out,4)*size(field_out,5))
type(DomainCommunicator2D),pointer,optional :: dc_handle
pointer( ptr_in, field3D_in )
pointer( ptr_out, field3D_out )
ptr_in = 0
ptr_out = 0
if(domain_in%initialized) ptr_in = LOC(field_in )
if(domain_out%initialized) ptr_out = LOC(field_out)
call mpp_redistribute( domain_in, field3D_in, domain_out, field3D_out, complete, free, list_size, dc_handle, position )
return
end subroutine mpp_redistribute_i4_5D
# 623
# 897 "../mpp/include/mpp_domains_misc.inc" 2
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_START_UPDATE_DOMAINS and MPP_COMPLETE_UPDATE_DOMAINS: !
! fill halos for 2D decomposition --- non-blocking !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# 1 "../mpp/include/mpp_update_domains2D_nonblock.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_start_update_domain2D_r8_2D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
type(domain2D), intent(inout) :: domain
real(8), intent(inout) :: field(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_r8_2D
real(8) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
mpp_start_update_domain2D_r8_2D = mpp_start_update_domains(field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
return
end function mpp_start_update_domain2D_r8_2D
function mpp_start_update_domain2D_r8_3D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
type(domain2D), intent(inout) :: domain
real(8), intent(inout) :: field(domain%x(1)%data%begin:,domain%y(1)%data%begin:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_r8_3D
!--- local variables
integer :: current_id, ke_max
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo, update_flags, update_position
integer :: tile, max_ntile, ntile, n, l
logical :: set_mismatch, is_complete
logical :: do_update, reuse_id_update
integer, save :: isize=0, jsize=0, l_size=0, list=0
integer, save :: pos, whalosz, ehalosz, shalosz, nhalosz, update_flags_saved
character(len=128) :: text, field_name
integer, save :: ke_list(MAX_DOMAIN_FIELDS, MAX_TILES)=0
integer(8), save :: f_addrs(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
type(overlapSpec), pointer :: update => NULL()
real(8) :: d_type
field_name = "unknown"
if(present(name)) field_name = name
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
update_position = CENTER
if(present(position)) then
!--- when there is NINETY or MINUS_NINETY rotation for some contact, the salar data can not be on E or N-cell,
if(domain%rotated_ninety .AND. ( position == EAST .OR. position == NORTH ) ) &
call mpp_error(FATAL, 'MPP_START_UPDATE_DOMAINS_3D: hen there is NINETY or MINUS_NINETY rotation, ' // &
'can not use scalar version update_domain for data on E or N-cell' )
update_position = position
endif
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list,tile) = LOC(field)
ke_list(list,tile) = size(field,3)
!make sure the field is not called mpp_start_update_domains. Currently we only check the address at tile = 1.
if( tile == 1 ) then
do n = 1, current_id_update
do l = 1, nonblock_data(n)%nfields
if( f_addrs(list,tile) == nonblock_data(n)%field_addrs(l)) then
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_3D is called again before calling ' //&
'mpp_complte_UPDATE_DOMAINS_3D for field '//trim(field_name))
endif
enddo
enddo
endif
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); pos = update_position
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
update_flags_saved = update_flags
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize /= size(field,1))
set_mismatch = set_mismatch .OR. (jsize /= size(field,2))
set_mismatch = set_mismatch .OR. (update_position /= pos)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
set_mismatch = set_mismatch .OR. (update_flags_saved /= update_flags)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update) then
if(num_nonblock_group_update>0) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS: "// &
" can not be called in the middle of mpp_start_group_update/mpp_complete_group_update call")
num_update = num_update + 1
if( PRESENT(update_id) ) then
if( update_id < 1 .OR. update_id > MAX_NONBLOCK_UPDATE ) then
write( text,'(a,i8,a,i8)' ) 'optional argument update_id =', update_id, &
'is less than 1 or greater than MAX_NONBLOCK_UPDATE =', MAX_NONBLOCK_UPDATE
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: '//trim(text))
endif
current_id = update_id
reuse_id_update = .true.
!--- when reuse the update_id, make sure update_flag, halo size and update_position are still the same
if( nonblock_data(current_id)%update_flags .NE. update_flags .OR. &
nonblock_data(current_id)%update_whalo .NE. update_whalo .OR. &
nonblock_data(current_id)%update_ehalo .NE. update_ehalo .OR. &
nonblock_data(current_id)%update_shalo .NE. update_shalo .OR. &
nonblock_data(current_id)%update_nhalo .NE. update_nhalo .OR. &
nonblock_data(current_id)%update_position .NE. update_position ) then
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: mismatch for optional argument for field '//trim(field_name) )
endif
else
reuse_id_update = .false.
current_id_update = current_id_update + 1
if( current_id_update > MAX_NONBLOCK_UPDATE ) then
write( text,'(a,i8,a,i8)' ) 'num_fields =', current_id_update, &
' greater than MAX_NONBLOCK_UPDATE =', MAX_NONBLOCK_UPDATE
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: '//trim(text))
endif
current_id = current_id_update
nonblock_data(current_id)%update_flags = update_flags
nonblock_data(current_id)%update_whalo = update_whalo
nonblock_data(current_id)%update_ehalo = update_ehalo
nonblock_data(current_id)%update_shalo = update_shalo
nonblock_data(current_id)%update_nhalo = update_nhalo
nonblock_data(current_id)%update_position = update_position
nonblock_data(current_id)%recv_pos = nonblock_buffer_pos
endif
nonblock_data(current_id)%nfields = l_size
nonblock_data(current_id)%field_addrs(1:l_size) = f_addrs(1:l_size,1)
mpp_start_update_domain2D_r8_3D = current_id
ke_max = maxval(ke_list(1:l_size,1:ntile))
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
call mpp_start_do_update(current_id, f_addrs(1:l_size,1:ntile), domain, update, d_type, &
ke_max, ke_list(1:l_size,1:ntile), update_flags, reuse_id_update, field_name )
endif
l_size=0; f_addrs=-9999; isize=0; jsize=0; ke_list=0
else
if(present(update_id)) then
mpp_start_update_domain2D_r8_3D = update_id
else
mpp_start_update_domain2D_r8_3D = 0
endif
endif
end function mpp_start_update_domain2D_r8_3D
!##########################################################################################
function mpp_start_update_domain2D_r8_4D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
type(domain2D), intent(inout) :: domain
real(8), intent(inout) :: field(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_r8_4D
real(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
mpp_start_update_domain2D_r8_4D = mpp_start_update_domains(field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
return
end function mpp_start_update_domain2D_r8_4D
!##########################################################################################
function mpp_start_update_domain2D_r8_5D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
type(domain2D), intent(inout) :: domain
real(8), intent(inout) :: field(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_r8_5D
real(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
pointer( ptr, field3D )
ptr = LOC(field)
mpp_start_update_domain2D_r8_5D = mpp_start_update_domains(field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
return
end function mpp_start_update_domain2D_r8_5D
!##################################################################################
subroutine mpp_complete_update_domain2D_r8_2D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
real(8), intent(inout) :: field(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
real(8) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_complete_update_domains(id_update, field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
end subroutine mpp_complete_update_domain2D_r8_2D
!##################################################################################
subroutine mpp_complete_update_domain2D_r8_3D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
real(8), intent(inout) :: field(domain%x(1)%data%begin:,domain%y(1)%data%begin:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo
integer :: update_position, update_flags
type(overlapSpec), pointer :: update => NULL()
integer :: tile, max_ntile, ntile, n
logical :: is_complete
logical :: do_update
integer :: ke_max
integer, save :: list=0, l_size=0
integer, save :: ke_list(MAX_DOMAIN_FIELDS, MAX_TILES)=0
integer(8), save :: f_addrs(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
character(len=128) :: text
real(8) :: d_type
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
update_position = CENTER
if(present(position)) update_position = position
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_COMPLETE_UPDATE_DOMAINS_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_COMPLETE_UPDATE_DOMAINS_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
!-- make sure the f_addrs match the one at mpp_start_update_domains
if( tile == 1 ) then
if( nonblock_data(id_update)%field_addrs(list) .NE. f_addrs(list, tile)) then
call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of address between mpp_start_update_domains and mpp_complete_update_domains")
endif
endif
ke_list(list,tile) = size(field,3)
!check to make sure the consistency of halo size, position and flags.
if( nonblock_data(id_update)%update_flags .NE. update_flags ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument flag between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_whalo .NE. update_whalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument whalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_ehalo .NE. update_ehalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument ehalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_shalo .NE. update_shalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument shalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_nhalo .NE. update_nhalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument nhalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_position .NE. update_position ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument position between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if(is_complete) then
l_size = list
list = 0
end if
if(do_update) then
if(l_size .NE. nonblock_data(id_update)%nfields) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of number of fields between mpp_start_update_domains and mpp_complete_update_domains")
num_update = num_update - 1
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) ) then
update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
ke_max = maxval(ke_list(1:l_size,1:ntile))
call mpp_complete_do_update(id_update, f_addrs(1:l_size,1:ntile), domain, update, d_type, &
ke_max, ke_list(1:l_size,1:ntile), update_flags)
endif
nonblock_data(id_update)%nfields = 0
nonblock_data(id_update)%field_addrs(1:l_size) = 0
l_size=0; f_addrs=-9999; ke_list=0
!--- For the last call of mpp_complete_update_domains
!--- reset everything to init state
if( num_update == 0) then
do n = 1, current_id_update
call init_nonblock_type(nonblock_data(n))
enddo
current_id_update = 0
nonblock_buffer_pos = 0
endif
endif
end subroutine mpp_complete_update_domain2D_r8_3D
!##################################################################################
subroutine mpp_complete_update_domain2D_r8_4D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
real(8), intent(inout) :: field(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
real(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_complete_update_domains(id_update, field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
end subroutine mpp_complete_update_domain2D_r8_4D
!##################################################################################
subroutine mpp_complete_update_domain2D_r8_5D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
real(8), intent(inout) :: field(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
real(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_complete_update_domains(id_update, field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
end subroutine mpp_complete_update_domain2D_r8_5D
function mpp_start_update_domain2D_r8_2Dv( fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
!updates data domain of 3D field whose computational domains have been computed
real(8), intent(inout) :: fieldx(:,:), fieldy(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_r8_2Dv
real(8) :: field3Dx(size(fieldx,1),size(fieldx,2),1)
real(8) :: field3Dy(size(fieldy,1),size(fieldy,2),1)
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
mpp_start_update_domain2D_r8_2Dv = mpp_start_update_domains(field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
return
end function mpp_start_update_domain2D_r8_2Dv
!###################################################################################
function mpp_start_update_domain2D_r8_3Dv( fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
!updates data domain of 3D field whose computational domains have been computed
real(8), intent(inout) :: fieldx(:,:,:), fieldy(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
!--- local variables
integer :: mpp_start_update_domain2D_r8_3Dv
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo
integer :: grid_offset_type, position_x, position_y, update_flags, current_id
logical :: do_update, is_complete, set_mismatch
integer :: ntile, max_ntile, tile, ke_max, n, l
logical :: exchange_uv, reuse_id_update
character(len=128) :: text, field_name
integer, save :: whalosz, ehalosz, shalosz, nhalosz
integer, save :: isize(2)=0,jsize(2)=0,l_size=0, offset_type=0, list=0
integer, save :: ke_list (MAX_DOMAIN_FIELDS, MAX_TILES)=0
integer(8), save :: f_addrsx(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
integer(8), save :: f_addrsy(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
type(overlapSpec), pointer :: updatex => NULL()
type(overlapSpec), pointer :: updatey => NULL()
real(8) :: d_type
field_name = "unknown"
if(present(name)) field_name = name
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
grid_offset_type = AGRID
if( PRESENT(gridtype) ) grid_offset_type = gridtype
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) ) then
update_flags = flags
! The following test is so that SCALAR_PAIR can be used alone with the
! same default update pattern as without.
if (BTEST(update_flags,SCALAR_BIT)) then
if (.NOT.(BTEST(update_flags,WEST) .OR. BTEST(update_flags,EAST) &
.OR. BTEST(update_flags,NORTH) .OR. BTEST(update_flags,SOUTH))) &
update_flags = update_flags + XUPDATE+YUPDATE !default with SCALAR_PAIR
end if
end if
if( BTEST(update_flags,NORTH) .AND. BTEST(domain%fold,NORTH) .AND. BTEST(grid_offset_type,SOUTH) ) &
call mpp_error( FATAL, 'MPP_START_UPDATE_DOMAINS_V: Incompatible grid offset and fold.' )
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D_V: "// &
"optional argument tile_count should be present when number of tiles on some pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
if( tile == 1 ) then
do n = 1, current_id_update
do l = 1, nonblock_data(n)%nfields
if( f_addrsx(list,tile) == nonblock_data(n)%field_addrs(l) .OR. &
f_addrsy(list,tile) == nonblock_data(n)%field_addrs2(l)) then
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V is called again before calling ' //&
'mpp_complte_UPDATE_DOMAINS_V for field '//trim(field_name))
endif
enddo
enddo
endif
ke_list(list, tile) = size(fieldx,3)
if(list == 1 .AND. tile == 1)then
isize(1)=size(fieldx,1); jsize(1)=size(fieldx,2)
isize(2)=size(fieldy,1); jsize(2)=size(fieldy,2)
offset_type = grid_offset_type
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize(1) /= size(fieldx,1))
set_mismatch = set_mismatch .OR. (jsize(1) /= size(fieldx,2))
set_mismatch = set_mismatch .OR. (isize(2) /= size(fieldy,1))
set_mismatch = set_mismatch .OR. (jsize(2) /= size(fieldy,2))
set_mismatch = set_mismatch .OR. (grid_offset_type /= offset_type)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: Incompatible field at count '//text//' for group vector update.' )
end if
end if
if(is_complete) then
l_size = list
list = 0
end if
if(do_update)then
if(num_nonblock_group_update>0) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_V: "// &
" can not be called in the middle of mpp_start_group_update/mpp_complete_group_update call")
num_update = num_update + 1
if( PRESENT(update_id) ) then
reuse_id_update = .true.
if( update_id < 1 .OR. update_id > MAX_NONBLOCK_UPDATE ) then
write( text,'(a,i8,a,i8)' ) 'optional argument update_id =', update_id, &
'is less than 1 or greater than MAX_NONBLOCK_UPDATE =', MAX_NONBLOCK_UPDATE
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: '//trim(text))
endif
current_id = update_id
!--- when reuse the update_id, make sure update_flag, halo size and update_position are still the same
if( nonblock_data(current_id)%update_flags .NE. update_flags .OR. &
nonblock_data(current_id)%update_whalo .NE. update_whalo .OR. &
nonblock_data(current_id)%update_ehalo .NE. update_ehalo .OR. &
nonblock_data(current_id)%update_shalo .NE. update_shalo .OR. &
nonblock_data(current_id)%update_nhalo .NE. update_nhalo .OR. &
nonblock_data(current_id)%update_gridtype .NE. grid_offset_type ) then
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: mismatch for optional argument for field '//trim(field_name) )
endif
else
reuse_id_update = .false.
current_id_update = current_id_update + 1
current_id = current_id_update
if( current_id_update > MAX_NONBLOCK_UPDATE ) then
write( text,'(a,i8,a,i8)' ) 'num_fields =', current_id_update, ' greater than MAX_NONBLOCK_UPDATE =', MAX_NONBLOCK_UPDATE
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: '//trim(text))
endif
nonblock_data(current_id)%update_flags = update_flags
nonblock_data(current_id)%update_whalo = update_whalo
nonblock_data(current_id)%update_ehalo = update_ehalo
nonblock_data(current_id)%update_shalo = update_shalo
nonblock_data(current_id)%update_nhalo = update_nhalo
nonblock_data(current_id)%update_gridtype = grid_offset_type
nonblock_data(current_id)%recv_pos = nonblock_buffer_pos
endif
nonblock_data(current_id)%nfields = l_size
nonblock_data(current_id)%field_addrs(1:l_size) = f_addrsx(1:l_size,1)
nonblock_data(current_id)%field_addrs2(1:l_size) = f_addrsy(1:l_size,1)
mpp_start_update_domain2D_r8_3Dv = current_id
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
exchange_uv = .false.
if(grid_offset_type == DGRID_NE) then
exchange_uv = .true.
grid_offset_type = CGRID_NE
else if( grid_offset_type == DGRID_SW ) then
exchange_uv = .true.
grid_offset_type = CGRID_SW
end if
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case default
call mpp_error(FATAL, "mpp_update_domains2D_nonblock.h: invalid value of grid_offset_type")
end select
updatex => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_x)
updatey => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_y)
ke_max = maxval(ke_list(1:l_size,1:ntile))
if(exchange_uv) then
call mpp_start_do_update(current_id, f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, &
updatey, updatex, d_type, ke_max, ke_list(1:l_size,1:ntile), grid_offset_type, &
update_flags, reuse_id_update, field_name)
else
call mpp_start_do_update(current_id, f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, &
updatex, updatey, d_type, ke_max, ke_list(1:l_size,1:ntile), grid_offset_type, &
update_flags, reuse_id_update, field_name)
endif
endif
l_size=0; f_addrsx=-9999; f_addrsy=-9999; isize=0; jsize=0; ke_list=0
else
if(present(update_id)) then
mpp_start_update_domain2D_r8_3Dv = update_id
else
mpp_start_update_domain2D_r8_3Dv = 0
endif
end if
return
end function mpp_start_update_domain2D_r8_3Dv
function mpp_start_update_domain2D_r8_4Dv( fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
!updates data domain of 3D field whose computational domains have been computed
real(8), intent(inout) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_r8_4Dv
real(8) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4))
real(8) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
mpp_start_update_domain2D_r8_4Dv = mpp_start_update_domains(field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
return
end function mpp_start_update_domain2D_r8_4Dv
function mpp_start_update_domain2D_r8_5Dv( fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
!updates data domain of 3D field whose computational domains have been computed
real(8), intent(inout) :: fieldx(:,:,:,:,:), fieldy(:,:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_r8_5Dv
real(8) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4)*size(fieldx,5))
real(8) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4)*size(fieldy,5))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
mpp_start_update_domain2D_r8_5Dv = mpp_start_update_domains(field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
return
end function mpp_start_update_domain2D_r8_5Dv
!####################################################################################
subroutine mpp_complete_update_domain2D_r8_2Dv( id_update, fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
!updates data domain of 3D field whose computational domains have been computed
integer, intent(in) :: id_update
real(8), intent(inout) :: fieldx(:,:), fieldy(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
real(8) :: field3Dx(size(fieldx,1),size(fieldx,2),1)
real(8) :: field3Dy(size(fieldy,1),size(fieldy,2),1)
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_complete_update_domains(id_update, field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
return
end subroutine mpp_complete_update_domain2D_r8_2Dv
!####################################################################################
subroutine mpp_complete_update_domain2D_r8_3Dv( id_update, fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
!updates data domain of 3D field whose computational domains have been computed
integer, intent(in) :: id_update
real(8), intent(inout) :: fieldx(:,:,:), fieldy(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo
integer :: grid_offset_type, position_x, position_y, update_flags
logical :: do_update, is_complete
integer :: ntile, max_ntile, tile, ke_max, n
logical :: exchange_uv
character(len=128) :: text
integer, save :: l_size=0, list=0
integer, save :: ke_list (MAX_DOMAIN_FIELDS, MAX_TILES)=0
integer(8), save :: f_addrsx(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
integer(8), save :: f_addrsy(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
type(overlapSpec), pointer :: updatex => NULL()
type(overlapSpec), pointer :: updatey => NULL()
real(8) :: d_type
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
grid_offset_type = AGRID
if( PRESENT(gridtype) ) grid_offset_type = gridtype
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) ) then
update_flags = flags
! The following test is so that SCALAR_PAIR can be used alone with the
! same default update pattern as without.
if (BTEST(update_flags,SCALAR_BIT)) then
if (.NOT.(BTEST(update_flags,WEST) .OR. BTEST(update_flags,EAST) &
.OR. BTEST(update_flags,NORTH) .OR. BTEST(update_flags,SOUTH))) &
update_flags = update_flags + XUPDATE+YUPDATE !default with SCALAR_PAIR
end if
end if
!check to make sure the consistency of halo size, position and flags.
if( nonblock_data(id_update)%update_flags .NE. update_flags ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D_V: "// &
"mismatch of optional argument flag between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_whalo .NE. update_whalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D_V: "// &
"mismatch of optional argument whalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_ehalo .NE. update_ehalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D_V: "// &
"mismatch of optional argument ehalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_shalo .NE. update_shalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D_V: "// &
"mismatch of optional argument shalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_nhalo .NE. update_nhalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D_V: "// &
"mismatch of optional argument nhalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_gridtype .NE. grid_offset_type ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D_V: "// &
"mismatch of optional argument gridtype between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_UPDATE_3D_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D_V: "// &
"optional argument tile_count should be present when number of tiles on some pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_3D_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
!-- make sure the f_addrs match the one at mpp_start_update_domains
if( tile == 1 ) then
if( nonblock_data(id_update)%field_addrs(list) .NE. f_addrsx(list, tile) .OR. &
nonblock_data(id_update)%field_addrs2(list) .NE. f_addrsy(list, tile)) then
call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_V: "// &
"mismatch of address between mpp_start_update_domains and mpp_complete_update_domains")
endif
endif
ke_list(list, tile) = size(fieldx,3)
if(is_complete) then
l_size = list
list = 0
end if
if(do_update)then
if(l_size .NE. nonblock_data(id_update)%nfields) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_V: "// &
"mismatch of number of fields between mpp_start_update_domains and mpp_complete_update_domains")
num_update = num_update - 1
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
exchange_uv = .false.
if(grid_offset_type == DGRID_NE) then
exchange_uv = .true.
grid_offset_type = CGRID_NE
else if( grid_offset_type == DGRID_SW ) then
exchange_uv = .true.
grid_offset_type = CGRID_SW
end if
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case default
call mpp_error(FATAL, "mpp_update_domains2D.h: invalid value of grid_offset_type")
end select
updatex => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_x)
updatey => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_y)
ke_max = maxval(ke_list(1:l_size,1:ntile))
if(exchange_uv) then
call mpp_complete_do_update(id_update, f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, &
updatey, updatex, d_type, ke_max, ke_list(1:l_size,1:ntile), &
grid_offset_type, update_flags)
else
call mpp_complete_do_update(id_update, f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, &
updatex, updatey, d_type, ke_max, ke_list(1:l_size,1:ntile), &
grid_offset_type, update_flags)
endif
endif
nonblock_data(id_update)%nfields = 0
nonblock_data(id_update)%field_addrs(1:l_size) = 0
nonblock_data(id_update)%field_addrs2(1:l_size) = 0
l_size=0; f_addrsx=-9999; f_addrsy=-9999; ke_list=0
!--- For the last call of mpp_complete_update_domains
!--- reset everything to init state
if( num_update == 0) then
do n = 1, current_id_update
call init_nonblock_type(nonblock_data(n))
enddo
current_id_update = 0
nonblock_buffer_pos = 0
endif
end if
end subroutine mpp_complete_update_domain2D_r8_3Dv
!####################################################################################
subroutine mpp_complete_update_domain2D_r8_4Dv( id_update, fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
!updates data domain of 3D field whose computational domains have been computed
integer, intent(in) :: id_update
real(8), intent(inout) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
real(8) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4))
real(8) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_complete_update_domains(id_update, field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
return
end subroutine mpp_complete_update_domain2D_r8_4Dv
!####################################################################################
subroutine mpp_complete_update_domain2D_r8_5Dv( id_update, fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
!updates data domain of 3D field whose computational domains have been computed
integer, intent(in) :: id_update
real(8), intent(inout) :: fieldx(:,:,:,:,:), fieldy(:,:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
real(8) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4)*size(fieldx,5))
real(8) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4)*size(fieldy,5))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_complete_update_domains(id_update, field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
return
end subroutine mpp_complete_update_domain2D_r8_5Dv
# 945 "../mpp/include/mpp_domains_misc.inc" 2
# 967
# 1 "../mpp/include/mpp_update_domains2D_nonblock.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_start_update_domain2D_i8_2D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
type(domain2D), intent(inout) :: domain
integer(8), intent(inout) :: field(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_i8_2D
integer(8) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
mpp_start_update_domain2D_i8_2D = mpp_start_update_domains(field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
return
end function mpp_start_update_domain2D_i8_2D
function mpp_start_update_domain2D_i8_3D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
type(domain2D), intent(inout) :: domain
integer(8), intent(inout) :: field(domain%x(1)%data%begin:,domain%y(1)%data%begin:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_i8_3D
!--- local variables
integer :: current_id, ke_max
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo, update_flags, update_position
integer :: tile, max_ntile, ntile, n, l
logical :: set_mismatch, is_complete
logical :: do_update, reuse_id_update
integer, save :: isize=0, jsize=0, l_size=0, list=0
integer, save :: pos, whalosz, ehalosz, shalosz, nhalosz, update_flags_saved
character(len=128) :: text, field_name
integer, save :: ke_list(MAX_DOMAIN_FIELDS, MAX_TILES)=0
integer(8), save :: f_addrs(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
type(overlapSpec), pointer :: update => NULL()
integer(8) :: d_type
field_name = "unknown"
if(present(name)) field_name = name
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
update_position = CENTER
if(present(position)) then
!--- when there is NINETY or MINUS_NINETY rotation for some contact, the salar data can not be on E or N-cell,
if(domain%rotated_ninety .AND. ( position == EAST .OR. position == NORTH ) ) &
call mpp_error(FATAL, 'MPP_START_UPDATE_DOMAINS_3D: hen there is NINETY or MINUS_NINETY rotation, ' // &
'can not use scalar version update_domain for data on E or N-cell' )
update_position = position
endif
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list,tile) = LOC(field)
ke_list(list,tile) = size(field,3)
!make sure the field is not called mpp_start_update_domains. Currently we only check the address at tile = 1.
if( tile == 1 ) then
do n = 1, current_id_update
do l = 1, nonblock_data(n)%nfields
if( f_addrs(list,tile) == nonblock_data(n)%field_addrs(l)) then
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_3D is called again before calling ' //&
'mpp_complte_UPDATE_DOMAINS_3D for field '//trim(field_name))
endif
enddo
enddo
endif
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); pos = update_position
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
update_flags_saved = update_flags
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize /= size(field,1))
set_mismatch = set_mismatch .OR. (jsize /= size(field,2))
set_mismatch = set_mismatch .OR. (update_position /= pos)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
set_mismatch = set_mismatch .OR. (update_flags_saved /= update_flags)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update) then
if(num_nonblock_group_update>0) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS: "// &
" can not be called in the middle of mpp_start_group_update/mpp_complete_group_update call")
num_update = num_update + 1
if( PRESENT(update_id) ) then
if( update_id < 1 .OR. update_id > MAX_NONBLOCK_UPDATE ) then
write( text,'(a,i8,a,i8)' ) 'optional argument update_id =', update_id, &
'is less than 1 or greater than MAX_NONBLOCK_UPDATE =', MAX_NONBLOCK_UPDATE
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: '//trim(text))
endif
current_id = update_id
reuse_id_update = .true.
!--- when reuse the update_id, make sure update_flag, halo size and update_position are still the same
if( nonblock_data(current_id)%update_flags .NE. update_flags .OR. &
nonblock_data(current_id)%update_whalo .NE. update_whalo .OR. &
nonblock_data(current_id)%update_ehalo .NE. update_ehalo .OR. &
nonblock_data(current_id)%update_shalo .NE. update_shalo .OR. &
nonblock_data(current_id)%update_nhalo .NE. update_nhalo .OR. &
nonblock_data(current_id)%update_position .NE. update_position ) then
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: mismatch for optional argument for field '//trim(field_name) )
endif
else
reuse_id_update = .false.
current_id_update = current_id_update + 1
if( current_id_update > MAX_NONBLOCK_UPDATE ) then
write( text,'(a,i8,a,i8)' ) 'num_fields =', current_id_update, &
' greater than MAX_NONBLOCK_UPDATE =', MAX_NONBLOCK_UPDATE
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: '//trim(text))
endif
current_id = current_id_update
nonblock_data(current_id)%update_flags = update_flags
nonblock_data(current_id)%update_whalo = update_whalo
nonblock_data(current_id)%update_ehalo = update_ehalo
nonblock_data(current_id)%update_shalo = update_shalo
nonblock_data(current_id)%update_nhalo = update_nhalo
nonblock_data(current_id)%update_position = update_position
nonblock_data(current_id)%recv_pos = nonblock_buffer_pos
endif
nonblock_data(current_id)%nfields = l_size
nonblock_data(current_id)%field_addrs(1:l_size) = f_addrs(1:l_size,1)
mpp_start_update_domain2D_i8_3D = current_id
ke_max = maxval(ke_list(1:l_size,1:ntile))
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
call mpp_start_do_update(current_id, f_addrs(1:l_size,1:ntile), domain, update, d_type, &
ke_max, ke_list(1:l_size,1:ntile), update_flags, reuse_id_update, field_name )
endif
l_size=0; f_addrs=-9999; isize=0; jsize=0; ke_list=0
else
if(present(update_id)) then
mpp_start_update_domain2D_i8_3D = update_id
else
mpp_start_update_domain2D_i8_3D = 0
endif
endif
end function mpp_start_update_domain2D_i8_3D
!##########################################################################################
function mpp_start_update_domain2D_i8_4D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
type(domain2D), intent(inout) :: domain
integer(8), intent(inout) :: field(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_i8_4D
integer(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
mpp_start_update_domain2D_i8_4D = mpp_start_update_domains(field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
return
end function mpp_start_update_domain2D_i8_4D
!##########################################################################################
function mpp_start_update_domain2D_i8_5D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
type(domain2D), intent(inout) :: domain
integer(8), intent(inout) :: field(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_i8_5D
integer(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
pointer( ptr, field3D )
ptr = LOC(field)
mpp_start_update_domain2D_i8_5D = mpp_start_update_domains(field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
return
end function mpp_start_update_domain2D_i8_5D
!##################################################################################
subroutine mpp_complete_update_domain2D_i8_2D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
integer(8), intent(inout) :: field(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
integer(8) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_complete_update_domains(id_update, field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
end subroutine mpp_complete_update_domain2D_i8_2D
!##################################################################################
subroutine mpp_complete_update_domain2D_i8_3D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
integer(8), intent(inout) :: field(domain%x(1)%data%begin:,domain%y(1)%data%begin:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo
integer :: update_position, update_flags
type(overlapSpec), pointer :: update => NULL()
integer :: tile, max_ntile, ntile, n
logical :: is_complete
logical :: do_update
integer :: ke_max
integer, save :: list=0, l_size=0
integer, save :: ke_list(MAX_DOMAIN_FIELDS, MAX_TILES)=0
integer(8), save :: f_addrs(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
character(len=128) :: text
integer(8) :: d_type
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
update_position = CENTER
if(present(position)) update_position = position
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_COMPLETE_UPDATE_DOMAINS_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_COMPLETE_UPDATE_DOMAINS_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
!-- make sure the f_addrs match the one at mpp_start_update_domains
if( tile == 1 ) then
if( nonblock_data(id_update)%field_addrs(list) .NE. f_addrs(list, tile)) then
call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of address between mpp_start_update_domains and mpp_complete_update_domains")
endif
endif
ke_list(list,tile) = size(field,3)
!check to make sure the consistency of halo size, position and flags.
if( nonblock_data(id_update)%update_flags .NE. update_flags ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument flag between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_whalo .NE. update_whalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument whalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_ehalo .NE. update_ehalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument ehalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_shalo .NE. update_shalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument shalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_nhalo .NE. update_nhalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument nhalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_position .NE. update_position ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument position between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if(is_complete) then
l_size = list
list = 0
end if
if(do_update) then
if(l_size .NE. nonblock_data(id_update)%nfields) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of number of fields between mpp_start_update_domains and mpp_complete_update_domains")
num_update = num_update - 1
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) ) then
update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
ke_max = maxval(ke_list(1:l_size,1:ntile))
call mpp_complete_do_update(id_update, f_addrs(1:l_size,1:ntile), domain, update, d_type, &
ke_max, ke_list(1:l_size,1:ntile), update_flags)
endif
nonblock_data(id_update)%nfields = 0
nonblock_data(id_update)%field_addrs(1:l_size) = 0
l_size=0; f_addrs=-9999; ke_list=0
!--- For the last call of mpp_complete_update_domains
!--- reset everything to init state
if( num_update == 0) then
do n = 1, current_id_update
call init_nonblock_type(nonblock_data(n))
enddo
current_id_update = 0
nonblock_buffer_pos = 0
endif
endif
end subroutine mpp_complete_update_domain2D_i8_3D
!##################################################################################
subroutine mpp_complete_update_domain2D_i8_4D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
integer(8), intent(inout) :: field(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
integer(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_complete_update_domains(id_update, field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
end subroutine mpp_complete_update_domain2D_i8_4D
!##################################################################################
subroutine mpp_complete_update_domain2D_i8_5D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
integer(8), intent(inout) :: field(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
integer(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_complete_update_domains(id_update, field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
end subroutine mpp_complete_update_domain2D_i8_5D
# 1076
# 990 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_update_domains2D_nonblock.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_start_update_domain2D_r4_2D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
type(domain2D), intent(inout) :: domain
real(4), intent(inout) :: field(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_r4_2D
real(4) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
mpp_start_update_domain2D_r4_2D = mpp_start_update_domains(field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
return
end function mpp_start_update_domain2D_r4_2D
function mpp_start_update_domain2D_r4_3D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
type(domain2D), intent(inout) :: domain
real(4), intent(inout) :: field(domain%x(1)%data%begin:,domain%y(1)%data%begin:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_r4_3D
!--- local variables
integer :: current_id, ke_max
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo, update_flags, update_position
integer :: tile, max_ntile, ntile, n, l
logical :: set_mismatch, is_complete
logical :: do_update, reuse_id_update
integer, save :: isize=0, jsize=0, l_size=0, list=0
integer, save :: pos, whalosz, ehalosz, shalosz, nhalosz, update_flags_saved
character(len=128) :: text, field_name
integer, save :: ke_list(MAX_DOMAIN_FIELDS, MAX_TILES)=0
integer(8), save :: f_addrs(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
type(overlapSpec), pointer :: update => NULL()
real(4) :: d_type
field_name = "unknown"
if(present(name)) field_name = name
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
update_position = CENTER
if(present(position)) then
!--- when there is NINETY or MINUS_NINETY rotation for some contact, the salar data can not be on E or N-cell,
if(domain%rotated_ninety .AND. ( position == EAST .OR. position == NORTH ) ) &
call mpp_error(FATAL, 'MPP_START_UPDATE_DOMAINS_3D: hen there is NINETY or MINUS_NINETY rotation, ' // &
'can not use scalar version update_domain for data on E or N-cell' )
update_position = position
endif
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list,tile) = LOC(field)
ke_list(list,tile) = size(field,3)
!make sure the field is not called mpp_start_update_domains. Currently we only check the address at tile = 1.
if( tile == 1 ) then
do n = 1, current_id_update
do l = 1, nonblock_data(n)%nfields
if( f_addrs(list,tile) == nonblock_data(n)%field_addrs(l)) then
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_3D is called again before calling ' //&
'mpp_complte_UPDATE_DOMAINS_3D for field '//trim(field_name))
endif
enddo
enddo
endif
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); pos = update_position
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
update_flags_saved = update_flags
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize /= size(field,1))
set_mismatch = set_mismatch .OR. (jsize /= size(field,2))
set_mismatch = set_mismatch .OR. (update_position /= pos)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
set_mismatch = set_mismatch .OR. (update_flags_saved /= update_flags)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update) then
if(num_nonblock_group_update>0) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS: "// &
" can not be called in the middle of mpp_start_group_update/mpp_complete_group_update call")
num_update = num_update + 1
if( PRESENT(update_id) ) then
if( update_id < 1 .OR. update_id > MAX_NONBLOCK_UPDATE ) then
write( text,'(a,i8,a,i8)' ) 'optional argument update_id =', update_id, &
'is less than 1 or greater than MAX_NONBLOCK_UPDATE =', MAX_NONBLOCK_UPDATE
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: '//trim(text))
endif
current_id = update_id
reuse_id_update = .true.
!--- when reuse the update_id, make sure update_flag, halo size and update_position are still the same
if( nonblock_data(current_id)%update_flags .NE. update_flags .OR. &
nonblock_data(current_id)%update_whalo .NE. update_whalo .OR. &
nonblock_data(current_id)%update_ehalo .NE. update_ehalo .OR. &
nonblock_data(current_id)%update_shalo .NE. update_shalo .OR. &
nonblock_data(current_id)%update_nhalo .NE. update_nhalo .OR. &
nonblock_data(current_id)%update_position .NE. update_position ) then
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: mismatch for optional argument for field '//trim(field_name) )
endif
else
reuse_id_update = .false.
current_id_update = current_id_update + 1
if( current_id_update > MAX_NONBLOCK_UPDATE ) then
write( text,'(a,i8,a,i8)' ) 'num_fields =', current_id_update, &
' greater than MAX_NONBLOCK_UPDATE =', MAX_NONBLOCK_UPDATE
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: '//trim(text))
endif
current_id = current_id_update
nonblock_data(current_id)%update_flags = update_flags
nonblock_data(current_id)%update_whalo = update_whalo
nonblock_data(current_id)%update_ehalo = update_ehalo
nonblock_data(current_id)%update_shalo = update_shalo
nonblock_data(current_id)%update_nhalo = update_nhalo
nonblock_data(current_id)%update_position = update_position
nonblock_data(current_id)%recv_pos = nonblock_buffer_pos
endif
nonblock_data(current_id)%nfields = l_size
nonblock_data(current_id)%field_addrs(1:l_size) = f_addrs(1:l_size,1)
mpp_start_update_domain2D_r4_3D = current_id
ke_max = maxval(ke_list(1:l_size,1:ntile))
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
call mpp_start_do_update(current_id, f_addrs(1:l_size,1:ntile), domain, update, d_type, &
ke_max, ke_list(1:l_size,1:ntile), update_flags, reuse_id_update, field_name )
endif
l_size=0; f_addrs=-9999; isize=0; jsize=0; ke_list=0
else
if(present(update_id)) then
mpp_start_update_domain2D_r4_3D = update_id
else
mpp_start_update_domain2D_r4_3D = 0
endif
endif
end function mpp_start_update_domain2D_r4_3D
!##########################################################################################
function mpp_start_update_domain2D_r4_4D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
type(domain2D), intent(inout) :: domain
real(4), intent(inout) :: field(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_r4_4D
real(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
mpp_start_update_domain2D_r4_4D = mpp_start_update_domains(field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
return
end function mpp_start_update_domain2D_r4_4D
!##########################################################################################
function mpp_start_update_domain2D_r4_5D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
type(domain2D), intent(inout) :: domain
real(4), intent(inout) :: field(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_r4_5D
real(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
pointer( ptr, field3D )
ptr = LOC(field)
mpp_start_update_domain2D_r4_5D = mpp_start_update_domains(field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
return
end function mpp_start_update_domain2D_r4_5D
!##################################################################################
subroutine mpp_complete_update_domain2D_r4_2D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
real(4), intent(inout) :: field(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
real(4) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_complete_update_domains(id_update, field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
end subroutine mpp_complete_update_domain2D_r4_2D
!##################################################################################
subroutine mpp_complete_update_domain2D_r4_3D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
real(4), intent(inout) :: field(domain%x(1)%data%begin:,domain%y(1)%data%begin:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo
integer :: update_position, update_flags
type(overlapSpec), pointer :: update => NULL()
integer :: tile, max_ntile, ntile, n
logical :: is_complete
logical :: do_update
integer :: ke_max
integer, save :: list=0, l_size=0
integer, save :: ke_list(MAX_DOMAIN_FIELDS, MAX_TILES)=0
integer(8), save :: f_addrs(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
character(len=128) :: text
real(4) :: d_type
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
update_position = CENTER
if(present(position)) update_position = position
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_COMPLETE_UPDATE_DOMAINS_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_COMPLETE_UPDATE_DOMAINS_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
!-- make sure the f_addrs match the one at mpp_start_update_domains
if( tile == 1 ) then
if( nonblock_data(id_update)%field_addrs(list) .NE. f_addrs(list, tile)) then
call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of address between mpp_start_update_domains and mpp_complete_update_domains")
endif
endif
ke_list(list,tile) = size(field,3)
!check to make sure the consistency of halo size, position and flags.
if( nonblock_data(id_update)%update_flags .NE. update_flags ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument flag between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_whalo .NE. update_whalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument whalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_ehalo .NE. update_ehalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument ehalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_shalo .NE. update_shalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument shalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_nhalo .NE. update_nhalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument nhalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_position .NE. update_position ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument position between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if(is_complete) then
l_size = list
list = 0
end if
if(do_update) then
if(l_size .NE. nonblock_data(id_update)%nfields) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of number of fields between mpp_start_update_domains and mpp_complete_update_domains")
num_update = num_update - 1
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) ) then
update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
ke_max = maxval(ke_list(1:l_size,1:ntile))
call mpp_complete_do_update(id_update, f_addrs(1:l_size,1:ntile), domain, update, d_type, &
ke_max, ke_list(1:l_size,1:ntile), update_flags)
endif
nonblock_data(id_update)%nfields = 0
nonblock_data(id_update)%field_addrs(1:l_size) = 0
l_size=0; f_addrs=-9999; ke_list=0
!--- For the last call of mpp_complete_update_domains
!--- reset everything to init state
if( num_update == 0) then
do n = 1, current_id_update
call init_nonblock_type(nonblock_data(n))
enddo
current_id_update = 0
nonblock_buffer_pos = 0
endif
endif
end subroutine mpp_complete_update_domain2D_r4_3D
!##################################################################################
subroutine mpp_complete_update_domain2D_r4_4D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
real(4), intent(inout) :: field(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
real(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_complete_update_domains(id_update, field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
end subroutine mpp_complete_update_domain2D_r4_4D
!##################################################################################
subroutine mpp_complete_update_domain2D_r4_5D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
real(4), intent(inout) :: field(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
real(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_complete_update_domains(id_update, field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
end subroutine mpp_complete_update_domain2D_r4_5D
function mpp_start_update_domain2D_r4_2Dv( fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
!updates data domain of 3D field whose computational domains have been computed
real(4), intent(inout) :: fieldx(:,:), fieldy(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_r4_2Dv
real(4) :: field3Dx(size(fieldx,1),size(fieldx,2),1)
real(4) :: field3Dy(size(fieldy,1),size(fieldy,2),1)
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
mpp_start_update_domain2D_r4_2Dv = mpp_start_update_domains(field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
return
end function mpp_start_update_domain2D_r4_2Dv
!###################################################################################
function mpp_start_update_domain2D_r4_3Dv( fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
!updates data domain of 3D field whose computational domains have been computed
real(4), intent(inout) :: fieldx(:,:,:), fieldy(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
!--- local variables
integer :: mpp_start_update_domain2D_r4_3Dv
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo
integer :: grid_offset_type, position_x, position_y, update_flags, current_id
logical :: do_update, is_complete, set_mismatch
integer :: ntile, max_ntile, tile, ke_max, n, l
logical :: exchange_uv, reuse_id_update
character(len=128) :: text, field_name
integer, save :: whalosz, ehalosz, shalosz, nhalosz
integer, save :: isize(2)=0,jsize(2)=0,l_size=0, offset_type=0, list=0
integer, save :: ke_list (MAX_DOMAIN_FIELDS, MAX_TILES)=0
integer(8), save :: f_addrsx(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
integer(8), save :: f_addrsy(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
type(overlapSpec), pointer :: updatex => NULL()
type(overlapSpec), pointer :: updatey => NULL()
real(4) :: d_type
field_name = "unknown"
if(present(name)) field_name = name
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
grid_offset_type = AGRID
if( PRESENT(gridtype) ) grid_offset_type = gridtype
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) ) then
update_flags = flags
! The following test is so that SCALAR_PAIR can be used alone with the
! same default update pattern as without.
if (BTEST(update_flags,SCALAR_BIT)) then
if (.NOT.(BTEST(update_flags,WEST) .OR. BTEST(update_flags,EAST) &
.OR. BTEST(update_flags,NORTH) .OR. BTEST(update_flags,SOUTH))) &
update_flags = update_flags + XUPDATE+YUPDATE !default with SCALAR_PAIR
end if
end if
if( BTEST(update_flags,NORTH) .AND. BTEST(domain%fold,NORTH) .AND. BTEST(grid_offset_type,SOUTH) ) &
call mpp_error( FATAL, 'MPP_START_UPDATE_DOMAINS_V: Incompatible grid offset and fold.' )
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D_V: "// &
"optional argument tile_count should be present when number of tiles on some pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
if( tile == 1 ) then
do n = 1, current_id_update
do l = 1, nonblock_data(n)%nfields
if( f_addrsx(list,tile) == nonblock_data(n)%field_addrs(l) .OR. &
f_addrsy(list,tile) == nonblock_data(n)%field_addrs2(l)) then
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V is called again before calling ' //&
'mpp_complte_UPDATE_DOMAINS_V for field '//trim(field_name))
endif
enddo
enddo
endif
ke_list(list, tile) = size(fieldx,3)
if(list == 1 .AND. tile == 1)then
isize(1)=size(fieldx,1); jsize(1)=size(fieldx,2)
isize(2)=size(fieldy,1); jsize(2)=size(fieldy,2)
offset_type = grid_offset_type
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize(1) /= size(fieldx,1))
set_mismatch = set_mismatch .OR. (jsize(1) /= size(fieldx,2))
set_mismatch = set_mismatch .OR. (isize(2) /= size(fieldy,1))
set_mismatch = set_mismatch .OR. (jsize(2) /= size(fieldy,2))
set_mismatch = set_mismatch .OR. (grid_offset_type /= offset_type)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: Incompatible field at count '//text//' for group vector update.' )
end if
end if
if(is_complete) then
l_size = list
list = 0
end if
if(do_update)then
if(num_nonblock_group_update>0) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_V: "// &
" can not be called in the middle of mpp_start_group_update/mpp_complete_group_update call")
num_update = num_update + 1
if( PRESENT(update_id) ) then
reuse_id_update = .true.
if( update_id < 1 .OR. update_id > MAX_NONBLOCK_UPDATE ) then
write( text,'(a,i8,a,i8)' ) 'optional argument update_id =', update_id, &
'is less than 1 or greater than MAX_NONBLOCK_UPDATE =', MAX_NONBLOCK_UPDATE
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: '//trim(text))
endif
current_id = update_id
!--- when reuse the update_id, make sure update_flag, halo size and update_position are still the same
if( nonblock_data(current_id)%update_flags .NE. update_flags .OR. &
nonblock_data(current_id)%update_whalo .NE. update_whalo .OR. &
nonblock_data(current_id)%update_ehalo .NE. update_ehalo .OR. &
nonblock_data(current_id)%update_shalo .NE. update_shalo .OR. &
nonblock_data(current_id)%update_nhalo .NE. update_nhalo .OR. &
nonblock_data(current_id)%update_gridtype .NE. grid_offset_type ) then
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: mismatch for optional argument for field '//trim(field_name) )
endif
else
reuse_id_update = .false.
current_id_update = current_id_update + 1
current_id = current_id_update
if( current_id_update > MAX_NONBLOCK_UPDATE ) then
write( text,'(a,i8,a,i8)' ) 'num_fields =', current_id_update, ' greater than MAX_NONBLOCK_UPDATE =', MAX_NONBLOCK_UPDATE
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: '//trim(text))
endif
nonblock_data(current_id)%update_flags = update_flags
nonblock_data(current_id)%update_whalo = update_whalo
nonblock_data(current_id)%update_ehalo = update_ehalo
nonblock_data(current_id)%update_shalo = update_shalo
nonblock_data(current_id)%update_nhalo = update_nhalo
nonblock_data(current_id)%update_gridtype = grid_offset_type
nonblock_data(current_id)%recv_pos = nonblock_buffer_pos
endif
nonblock_data(current_id)%nfields = l_size
nonblock_data(current_id)%field_addrs(1:l_size) = f_addrsx(1:l_size,1)
nonblock_data(current_id)%field_addrs2(1:l_size) = f_addrsy(1:l_size,1)
mpp_start_update_domain2D_r4_3Dv = current_id
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
exchange_uv = .false.
if(grid_offset_type == DGRID_NE) then
exchange_uv = .true.
grid_offset_type = CGRID_NE
else if( grid_offset_type == DGRID_SW ) then
exchange_uv = .true.
grid_offset_type = CGRID_SW
end if
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case default
call mpp_error(FATAL, "mpp_update_domains2D_nonblock.h: invalid value of grid_offset_type")
end select
updatex => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_x)
updatey => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_y)
ke_max = maxval(ke_list(1:l_size,1:ntile))
if(exchange_uv) then
call mpp_start_do_update(current_id, f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, &
updatey, updatex, d_type, ke_max, ke_list(1:l_size,1:ntile), grid_offset_type, &
update_flags, reuse_id_update, field_name)
else
call mpp_start_do_update(current_id, f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, &
updatex, updatey, d_type, ke_max, ke_list(1:l_size,1:ntile), grid_offset_type, &
update_flags, reuse_id_update, field_name)
endif
endif
l_size=0; f_addrsx=-9999; f_addrsy=-9999; isize=0; jsize=0; ke_list=0
else
if(present(update_id)) then
mpp_start_update_domain2D_r4_3Dv = update_id
else
mpp_start_update_domain2D_r4_3Dv = 0
endif
end if
return
end function mpp_start_update_domain2D_r4_3Dv
function mpp_start_update_domain2D_r4_4Dv( fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
!updates data domain of 3D field whose computational domains have been computed
real(4), intent(inout) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_r4_4Dv
real(4) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4))
real(4) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
mpp_start_update_domain2D_r4_4Dv = mpp_start_update_domains(field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
return
end function mpp_start_update_domain2D_r4_4Dv
function mpp_start_update_domain2D_r4_5Dv( fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
!updates data domain of 3D field whose computational domains have been computed
real(4), intent(inout) :: fieldx(:,:,:,:,:), fieldy(:,:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_r4_5Dv
real(4) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4)*size(fieldx,5))
real(4) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4)*size(fieldy,5))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
mpp_start_update_domain2D_r4_5Dv = mpp_start_update_domains(field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
return
end function mpp_start_update_domain2D_r4_5Dv
!####################################################################################
subroutine mpp_complete_update_domain2D_r4_2Dv( id_update, fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
!updates data domain of 3D field whose computational domains have been computed
integer, intent(in) :: id_update
real(4), intent(inout) :: fieldx(:,:), fieldy(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
real(4) :: field3Dx(size(fieldx,1),size(fieldx,2),1)
real(4) :: field3Dy(size(fieldy,1),size(fieldy,2),1)
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_complete_update_domains(id_update, field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
return
end subroutine mpp_complete_update_domain2D_r4_2Dv
!####################################################################################
subroutine mpp_complete_update_domain2D_r4_3Dv( id_update, fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
!updates data domain of 3D field whose computational domains have been computed
integer, intent(in) :: id_update
real(4), intent(inout) :: fieldx(:,:,:), fieldy(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo
integer :: grid_offset_type, position_x, position_y, update_flags
logical :: do_update, is_complete
integer :: ntile, max_ntile, tile, ke_max, n
logical :: exchange_uv
character(len=128) :: text
integer, save :: l_size=0, list=0
integer, save :: ke_list (MAX_DOMAIN_FIELDS, MAX_TILES)=0
integer(8), save :: f_addrsx(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
integer(8), save :: f_addrsy(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
type(overlapSpec), pointer :: updatex => NULL()
type(overlapSpec), pointer :: updatey => NULL()
real(4) :: d_type
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D_V: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
grid_offset_type = AGRID
if( PRESENT(gridtype) ) grid_offset_type = gridtype
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) ) then
update_flags = flags
! The following test is so that SCALAR_PAIR can be used alone with the
! same default update pattern as without.
if (BTEST(update_flags,SCALAR_BIT)) then
if (.NOT.(BTEST(update_flags,WEST) .OR. BTEST(update_flags,EAST) &
.OR. BTEST(update_flags,NORTH) .OR. BTEST(update_flags,SOUTH))) &
update_flags = update_flags + XUPDATE+YUPDATE !default with SCALAR_PAIR
end if
end if
!check to make sure the consistency of halo size, position and flags.
if( nonblock_data(id_update)%update_flags .NE. update_flags ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D_V: "// &
"mismatch of optional argument flag between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_whalo .NE. update_whalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D_V: "// &
"mismatch of optional argument whalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_ehalo .NE. update_ehalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D_V: "// &
"mismatch of optional argument ehalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_shalo .NE. update_shalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D_V: "// &
"mismatch of optional argument shalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_nhalo .NE. update_nhalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D_V: "// &
"mismatch of optional argument nhalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_gridtype .NE. grid_offset_type ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D_V: "// &
"mismatch of optional argument gridtype between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_UPDATE_3D_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D_V: "// &
"optional argument tile_count should be present when number of tiles on some pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_3D_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
!-- make sure the f_addrs match the one at mpp_start_update_domains
if( tile == 1 ) then
if( nonblock_data(id_update)%field_addrs(list) .NE. f_addrsx(list, tile) .OR. &
nonblock_data(id_update)%field_addrs2(list) .NE. f_addrsy(list, tile)) then
call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_V: "// &
"mismatch of address between mpp_start_update_domains and mpp_complete_update_domains")
endif
endif
ke_list(list, tile) = size(fieldx,3)
if(is_complete) then
l_size = list
list = 0
end if
if(do_update)then
if(l_size .NE. nonblock_data(id_update)%nfields) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_V: "// &
"mismatch of number of fields between mpp_start_update_domains and mpp_complete_update_domains")
num_update = num_update - 1
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
exchange_uv = .false.
if(grid_offset_type == DGRID_NE) then
exchange_uv = .true.
grid_offset_type = CGRID_NE
else if( grid_offset_type == DGRID_SW ) then
exchange_uv = .true.
grid_offset_type = CGRID_SW
end if
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case default
call mpp_error(FATAL, "mpp_update_domains2D.h: invalid value of grid_offset_type")
end select
updatex => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_x)
updatey => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_y)
ke_max = maxval(ke_list(1:l_size,1:ntile))
if(exchange_uv) then
call mpp_complete_do_update(id_update, f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, &
updatey, updatex, d_type, ke_max, ke_list(1:l_size,1:ntile), &
grid_offset_type, update_flags)
else
call mpp_complete_do_update(id_update, f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, &
updatex, updatey, d_type, ke_max, ke_list(1:l_size,1:ntile), &
grid_offset_type, update_flags)
endif
endif
nonblock_data(id_update)%nfields = 0
nonblock_data(id_update)%field_addrs(1:l_size) = 0
nonblock_data(id_update)%field_addrs2(1:l_size) = 0
l_size=0; f_addrsx=-9999; f_addrsy=-9999; ke_list=0
!--- For the last call of mpp_complete_update_domains
!--- reset everything to init state
if( num_update == 0) then
do n = 1, current_id_update
call init_nonblock_type(nonblock_data(n))
enddo
current_id_update = 0
nonblock_buffer_pos = 0
endif
end if
end subroutine mpp_complete_update_domain2D_r4_3Dv
!####################################################################################
subroutine mpp_complete_update_domain2D_r4_4Dv( id_update, fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
!updates data domain of 3D field whose computational domains have been computed
integer, intent(in) :: id_update
real(4), intent(inout) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
real(4) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4))
real(4) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_complete_update_domains(id_update, field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
return
end subroutine mpp_complete_update_domain2D_r4_4Dv
!####################################################################################
subroutine mpp_complete_update_domain2D_r4_5Dv( id_update, fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
!updates data domain of 3D field whose computational domains have been computed
integer, intent(in) :: id_update
real(4), intent(inout) :: fieldx(:,:,:,:,:), fieldy(:,:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
real(4) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4)*size(fieldx,5))
real(4) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4)*size(fieldy,5))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_complete_update_domains(id_update, field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
return
end subroutine mpp_complete_update_domain2D_r4_5Dv
# 1032 "../mpp/include/mpp_domains_misc.inc" 2
# 1055
# 1 "../mpp/include/mpp_update_domains2D_nonblock.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_start_update_domain2D_i4_2D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
type(domain2D), intent(inout) :: domain
integer(4), intent(inout) :: field(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_i4_2D
integer(4) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
mpp_start_update_domain2D_i4_2D = mpp_start_update_domains(field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
return
end function mpp_start_update_domain2D_i4_2D
function mpp_start_update_domain2D_i4_3D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
type(domain2D), intent(inout) :: domain
integer(4), intent(inout) :: field(domain%x(1)%data%begin:,domain%y(1)%data%begin:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_i4_3D
!--- local variables
integer :: current_id, ke_max
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo, update_flags, update_position
integer :: tile, max_ntile, ntile, n, l
logical :: set_mismatch, is_complete
logical :: do_update, reuse_id_update
integer, save :: isize=0, jsize=0, l_size=0, list=0
integer, save :: pos, whalosz, ehalosz, shalosz, nhalosz, update_flags_saved
character(len=128) :: text, field_name
integer, save :: ke_list(MAX_DOMAIN_FIELDS, MAX_TILES)=0
integer(8), save :: f_addrs(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
type(overlapSpec), pointer :: update => NULL()
integer(4) :: d_type
field_name = "unknown"
if(present(name)) field_name = name
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS_3D: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
update_position = CENTER
if(present(position)) then
!--- when there is NINETY or MINUS_NINETY rotation for some contact, the salar data can not be on E or N-cell,
if(domain%rotated_ninety .AND. ( position == EAST .OR. position == NORTH ) ) &
call mpp_error(FATAL, 'MPP_START_UPDATE_DOMAINS_3D: hen there is NINETY or MINUS_NINETY rotation, ' // &
'can not use scalar version update_domain for data on E or N-cell' )
update_position = position
endif
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list,tile) = LOC(field)
ke_list(list,tile) = size(field,3)
!make sure the field is not called mpp_start_update_domains. Currently we only check the address at tile = 1.
if( tile == 1 ) then
do n = 1, current_id_update
do l = 1, nonblock_data(n)%nfields
if( f_addrs(list,tile) == nonblock_data(n)%field_addrs(l)) then
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_3D is called again before calling ' //&
'mpp_complte_UPDATE_DOMAINS_3D for field '//trim(field_name))
endif
enddo
enddo
endif
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); pos = update_position
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
update_flags_saved = update_flags
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize /= size(field,1))
set_mismatch = set_mismatch .OR. (jsize /= size(field,2))
set_mismatch = set_mismatch .OR. (update_position /= pos)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
set_mismatch = set_mismatch .OR. (update_flags_saved /= update_flags)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update) then
if(num_nonblock_group_update>0) call mpp_error(FATAL, "MPP_START_UPDATE_DOMAINS: "// &
" can not be called in the middle of mpp_start_group_update/mpp_complete_group_update call")
num_update = num_update + 1
if( PRESENT(update_id) ) then
if( update_id < 1 .OR. update_id > MAX_NONBLOCK_UPDATE ) then
write( text,'(a,i8,a,i8)' ) 'optional argument update_id =', update_id, &
'is less than 1 or greater than MAX_NONBLOCK_UPDATE =', MAX_NONBLOCK_UPDATE
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: '//trim(text))
endif
current_id = update_id
reuse_id_update = .true.
!--- when reuse the update_id, make sure update_flag, halo size and update_position are still the same
if( nonblock_data(current_id)%update_flags .NE. update_flags .OR. &
nonblock_data(current_id)%update_whalo .NE. update_whalo .OR. &
nonblock_data(current_id)%update_ehalo .NE. update_ehalo .OR. &
nonblock_data(current_id)%update_shalo .NE. update_shalo .OR. &
nonblock_data(current_id)%update_nhalo .NE. update_nhalo .OR. &
nonblock_data(current_id)%update_position .NE. update_position ) then
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: mismatch for optional argument for field '//trim(field_name) )
endif
else
reuse_id_update = .false.
current_id_update = current_id_update + 1
if( current_id_update > MAX_NONBLOCK_UPDATE ) then
write( text,'(a,i8,a,i8)' ) 'num_fields =', current_id_update, &
' greater than MAX_NONBLOCK_UPDATE =', MAX_NONBLOCK_UPDATE
call mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: '//trim(text))
endif
current_id = current_id_update
nonblock_data(current_id)%update_flags = update_flags
nonblock_data(current_id)%update_whalo = update_whalo
nonblock_data(current_id)%update_ehalo = update_ehalo
nonblock_data(current_id)%update_shalo = update_shalo
nonblock_data(current_id)%update_nhalo = update_nhalo
nonblock_data(current_id)%update_position = update_position
nonblock_data(current_id)%recv_pos = nonblock_buffer_pos
endif
nonblock_data(current_id)%nfields = l_size
nonblock_data(current_id)%field_addrs(1:l_size) = f_addrs(1:l_size,1)
mpp_start_update_domain2D_i4_3D = current_id
ke_max = maxval(ke_list(1:l_size,1:ntile))
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
call mpp_start_do_update(current_id, f_addrs(1:l_size,1:ntile), domain, update, d_type, &
ke_max, ke_list(1:l_size,1:ntile), update_flags, reuse_id_update, field_name )
endif
l_size=0; f_addrs=-9999; isize=0; jsize=0; ke_list=0
else
if(present(update_id)) then
mpp_start_update_domain2D_i4_3D = update_id
else
mpp_start_update_domain2D_i4_3D = 0
endif
endif
end function mpp_start_update_domain2D_i4_3D
!##########################################################################################
function mpp_start_update_domain2D_i4_4D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
type(domain2D), intent(inout) :: domain
integer(4), intent(inout) :: field(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_i4_4D
integer(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
mpp_start_update_domain2D_i4_4D = mpp_start_update_domains(field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
return
end function mpp_start_update_domain2D_i4_4D
!##########################################################################################
function mpp_start_update_domain2D_i4_5D( field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete)
type(domain2D), intent(inout) :: domain
integer(4), intent(inout) :: field(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer, intent(in), optional :: update_id
logical, intent(in), optional :: complete
integer :: mpp_start_update_domain2D_i4_5D
integer(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
pointer( ptr, field3D )
ptr = LOC(field)
mpp_start_update_domain2D_i4_5D = mpp_start_update_domains(field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, update_id, complete )
return
end function mpp_start_update_domain2D_i4_5D
!##################################################################################
subroutine mpp_complete_update_domain2D_i4_2D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
integer(4), intent(inout) :: field(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
integer(4) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_complete_update_domains(id_update, field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
end subroutine mpp_complete_update_domain2D_i4_2D
!##################################################################################
subroutine mpp_complete_update_domain2D_i4_3D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
integer(4), intent(inout) :: field(domain%x(1)%data%begin:,domain%y(1)%data%begin:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo
integer :: update_position, update_flags
type(overlapSpec), pointer :: update => NULL()
integer :: tile, max_ntile, ntile, n
logical :: is_complete
logical :: do_update
integer :: ke_max
integer, save :: list=0, l_size=0
integer, save :: ke_list(MAX_DOMAIN_FIELDS, MAX_TILES)=0
integer(8), save :: f_addrs(MAX_DOMAIN_FIELDS, MAX_TILES)=-9999
character(len=128) :: text
integer(4) :: d_type
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
update_position = CENTER
if(present(position)) update_position = position
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_COMPLETE_UPDATE_DOMAINS_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_COMPLETE_UPDATE_DOMAINS_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
!-- make sure the f_addrs match the one at mpp_start_update_domains
if( tile == 1 ) then
if( nonblock_data(id_update)%field_addrs(list) .NE. f_addrs(list, tile)) then
call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of address between mpp_start_update_domains and mpp_complete_update_domains")
endif
endif
ke_list(list,tile) = size(field,3)
!check to make sure the consistency of halo size, position and flags.
if( nonblock_data(id_update)%update_flags .NE. update_flags ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument flag between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_whalo .NE. update_whalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument whalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_ehalo .NE. update_ehalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument ehalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_shalo .NE. update_shalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument shalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_nhalo .NE. update_nhalo ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument nhalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if( nonblock_data(id_update)%update_position .NE. update_position ) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of optional argument position between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
if(is_complete) then
l_size = list
list = 0
end if
if(do_update) then
if(l_size .NE. nonblock_data(id_update)%nfields) call mpp_error(FATAL, "MPP_COMPLETE_UPDATE_DOMAINS_3D: "// &
"mismatch of number of fields between mpp_start_update_domains and mpp_complete_update_domains")
num_update = num_update - 1
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) ) then
update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
ke_max = maxval(ke_list(1:l_size,1:ntile))
call mpp_complete_do_update(id_update, f_addrs(1:l_size,1:ntile), domain, update, d_type, &
ke_max, ke_list(1:l_size,1:ntile), update_flags)
endif
nonblock_data(id_update)%nfields = 0
nonblock_data(id_update)%field_addrs(1:l_size) = 0
l_size=0; f_addrs=-9999; ke_list=0
!--- For the last call of mpp_complete_update_domains
!--- reset everything to init state
if( num_update == 0) then
do n = 1, current_id_update
call init_nonblock_type(nonblock_data(n))
enddo
current_id_update = 0
nonblock_buffer_pos = 0
endif
endif
end subroutine mpp_complete_update_domain2D_i4_3D
!##################################################################################
subroutine mpp_complete_update_domain2D_i4_4D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
integer(4), intent(inout) :: field(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
integer(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_complete_update_domains(id_update, field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
end subroutine mpp_complete_update_domain2D_i4_4D
!##################################################################################
subroutine mpp_complete_update_domain2D_i4_5D( id_update, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
integer, intent(in) :: id_update
type(domain2D), intent(inout) :: domain
integer(4), intent(inout) :: field(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: complete
integer(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_complete_update_domains(id_update, field3D, domain, flags, position, &
whalo, ehalo, shalo, nhalo, name, tile_count, complete )
end subroutine mpp_complete_update_domain2D_i4_5D
# 1076
# 1077 "../mpp/include/mpp_domains_misc.inc" 2
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! mpp_start_do_update and mpp_complete_do_update !
! private routine. To be called in mpp_start_update_domains !
! and mpp_complete_update_domains !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# 1 "../mpp/include/mpp_do_update_nonblock.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_start_do_update_r8_3D(id_update, f_addrs, domain, update, d_type, ke_max, ke_list, flags, reuse_id_update, name)
integer, intent(in) :: id_update
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: update
real(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke_max
integer, intent(in) :: ke_list(:,:)
logical, intent(in) :: reuse_id_update
character(len=*), intent(in) :: name
integer, intent(in) :: flags
!--- local variables
integer :: i, j, k, m, n, l, dir, tMe
integer :: buffer_pos, msgsize, from_pe, to_pe, pos
integer :: is, ie, js, je, sendsize, recvsize
logical :: send(8), recv(8), update_edge_only
integer :: l_size, ke_sum, my_id_update
integer :: request
integer :: send_msgsize(MAXLIST)
character(len=128) :: text
real(8) :: buffer(size(mpp_domains_stack_nonblock(:)))
real(8) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke_max)
pointer( ptr, buffer )
pointer(ptr_field, field)
update_edge_only = BTEST(flags, EDGEONLY)
recv = .false.
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
l_size = size(f_addrs,1)
ke_sum = sum(ke_list)
ptr = LOC(mpp_domains_stack_nonblock)
buffer_pos = nonblock_data(id_update)%recv_pos
if( update%nrecv > MAX_REQUEST ) then
write( text,'(a,i8,a,i8)' ) 'update%nrecv =', update%nrecv, ' greater than MAX_REQEUST =', MAX_REQUEST
call mpp_error(FATAL,'MPP_START_DO_UPDATE: '//trim(text))
endif
if( update%nsend > MAX_REQUEST ) then
write( text,'(a,i8,a,i8)' ) 'update%nsend =', update%nsend, ' greater than MAX_REQEUST =', MAX_REQUEST
call mpp_error(FATAL,'MPP_START_DO_UPDATE: '//trim(text))
endif
! pre-postrecv
!--- make sure the domain stack size is big enough.
recvsize = 0
do m = 1, update%nrecv
nonblock_data(id_update)%size_recv(m) = 0
if( update%recv(m)%count == 0 )cycle
msgsize = 0
do n = 1, update%recv(m)%count
dir = update%recv(m)%dir(n)
if(recv(dir)) then
msgsize = msgsize + update%recv(m)%msgsize(n)
end if
end do
if( msgsize.GT.0 )then
msgsize = msgsize*ke_sum
recvsize = recvsize + msgsize
nonblock_data(id_update)%size_recv(m) = msgsize
nonblock_data(id_update)%buffer_pos_recv(m) = buffer_pos
buffer_pos = buffer_pos + msgsize
end if
end do
sendsize = 0
do m = 1, update%nsend
if( update%send(m)%count == 0 )cycle
! make sure the stacksize is big enough
msgsize = 0
do n = 1, update%send(m)%count
dir = update%send(m)%dir(n)
if( send(dir) ) msgsize = msgsize + update%send(m)%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke_sum
sendsize = sendsize + msgsize
nonblock_data(id_update)%buffer_pos_send(m) = buffer_pos
buffer_pos = buffer_pos + msgsize
end if
end do
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, &
nonblock_data(id_update)%recv_pos+recvsize+sendsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_START_DO_UPDATE: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
if( reuse_id_update ) then
if(recvsize .NE. nonblock_data(id_update)%recv_msgsize) then
call mpp_error(FATAL,'MPP_START_DO_UPDATE: mismatch of recv msgsize for field '//trim(name) )
endif
if(sendsize .NE. nonblock_data(id_update)%send_msgsize) then
call mpp_error(FATAL,'MPP_START_DO_UPDATE: mismatch of send msgsize for field '//trim(name) )
endif
else
nonblock_data(id_update)%recv_msgsize = recvsize
nonblock_data(id_update)%send_msgsize = sendsize
nonblock_data(id_update)%send_pos = nonblock_data(id_update)%recv_pos + recvsize
nonblock_buffer_pos = nonblock_buffer_pos + recvsize + sendsize
endif
! pre-postrecv
call mpp_clock_begin(recv_clock_nonblock)
do m = 1, update%nrecv
msgsize = nonblock_data(id_update)%size_recv(m)
if( msgsize.GT.0 )then
from_pe = update%recv(m)%pe
buffer_pos = nonblock_data(id_update)%buffer_pos_recv(m)
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., &
tag=id_update, request=request)
nonblock_data(id_update)%request_recv(m) = request
nonblock_data(id_update)%type_recv(m) = MPI_REAL8
end if
end do ! end do m = 1, update%nrecv
call mpp_clock_end(recv_clock_nonblock)
! send
call mpp_clock_begin(send_pack_clock_nonblock)
!$OMP parallel do schedule(dynamic) default(shared) private(buffer_pos,pos,dir,tMe,is,ie,js,je,ptr_field,to_pe, &
!$OMP msgsize,request)
do m = 1, update%nsend
send_msgsize(m) = 0
if( update%send(m)%count == 0 )cycle
buffer_pos = nonblock_data(id_update)%buffer_pos_send(m)
pos = buffer_pos
do n = 1, update%send(m)%count
dir = update%send(m)%dir(n)
if( send(dir) ) then
tMe = update%send(m)%tileMe(n)
is = update%send(m)%is(n); ie = update%send(m)%ie(n)
js = update%send(m)%js(n); je = update%send(m)%je(n)
select case( update%send(m)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
enddo
case( MINUS_NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, update%send(m)%count
send_msgsize(m) = pos - buffer_pos
enddo
!$OMP end parallel do
do m = 1, update%nsend
msgsize = send_msgsize(m)
if( msgsize .GT.0 )then
buffer_pos = nonblock_data(id_update)%buffer_pos_send(m)
to_pe = update%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize , to_pe=to_pe, &
tag=id_update, request=request)
nonblock_data(id_update)%request_send(m) = request
end if
end do ! end do ist = 0,nlist-1
call mpp_clock_end(send_pack_clock_nonblock)
return
end subroutine mpp_start_do_update_r8_3D
!###############################################################################
subroutine mpp_complete_do_update_r8_3D(id_update, f_addrs, domain, update, d_type, ke_max, ke_list, flags)
integer, intent(in) :: id_update
integer(8), intent(in) :: f_addrs(:,:)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: update
integer, intent(in) :: ke_max
integer, intent(in) :: ke_list(:,:)
real(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: flags
!--- local variables
integer :: i, j, k, m, n, l, dir, count, tMe, tNbr
integer :: buffer_pos, msgsize, from_pe, pos
integer :: is, ie, js, je
logical :: send(8), recv(8), update_edge_only
integer :: l_size, ke_sum, sendsize, recvsize
character(len=128) :: text
real(8) :: recv_buffer(size(mpp_domains_stack_nonblock(:)))
real(8) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke_max)
pointer( ptr, recv_buffer )
pointer(ptr_field, field)
update_edge_only = BTEST(flags, EDGEONLY)
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
ke_sum = sum(ke_list)
l_size = size(f_addrs,1)
ptr = LOC(mpp_domains_stack_nonblock)
count = update%nrecv
if(count > 0) then
call mpp_clock_begin(wait_clock_nonblock)
call mpp_sync_self(check=EVENT_RECV, request=nonblock_data(id_update)%request_recv(1:count), &
msg_size=nonblock_data(id_update)%size_recv(1:count), &
msg_type=nonblock_data(id_update)%type_recv(1:count) )
call mpp_clock_end(wait_clock_nonblock)
nonblock_data(id_update)%request_recv(:) = MPI_REQUEST_NULL
# 314
nonblock_data(id_update)%type_recv(:) = 0
endif
!--unpack the data
call mpp_clock_begin(unpk_clock_nonblock)
!$OMP parallel do schedule(dynamic) default(shared) private(dir,buffer_pos,pos,tMe,is,ie,js,je,msgsize, &
!$OMP ptr_field)
do m = update%nrecv, 1, -1
if( update%recv(m)%count == 0 )cycle
buffer_pos = nonblock_data(id_update)%buffer_pos_recv(m) + nonblock_data(id_update)%size_recv(m)
pos = buffer_pos
do n = update%recv(m)%count, 1, -1
dir = update%recv(m)%dir(n)
if( recv(dir) ) then
tMe = update%recv(m)%tileMe(n)
is = update%recv(m)%is(n); ie = update%recv(m)%ie(n)
js = update%recv(m)%js(n); je = update%recv(m)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke_sum
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = recv_buffer(pos)
end do
end do
end do
end do
end if
end do ! do n = 1, update%recv(m)%count
end do
!$OMP end parallel do
call mpp_clock_end(unpk_clock_nonblock)
count = update%nrecv
if(count > 0) then
nonblock_data(id_update)%size_recv(:) = 0
endif
count = update%nsend
if(count > 0) then
call mpp_clock_begin(wait_clock_nonblock)
call mpp_sync_self(check=EVENT_SEND, request=nonblock_data(id_update)%request_send(1:count))
call mpp_clock_end(wait_clock_nonblock)
nonblock_data(id_update)%request_send_count = 0
nonblock_data(id_update)%request_send(:) = MPI_REQUEST_NULL
# 368
endif
! call init_nonblock_type(nonblock_data(id_update))
return
end subroutine mpp_complete_do_update_r8_3D
# 1098 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_updateV_nonblock.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_start_do_update_r8_3Dv(id_update, f_addrsx, f_addrsy, domain, update_x, update_y, &
d_type, ke_max, ke_list, gridtype, flags, reuse_id_update, name)
integer, intent(in) :: id_update
integer(8), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: update_x, update_y
integer, intent(in) :: ke_max
integer, intent(in) :: ke_list(:,:)
real(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: gridtype
logical, intent(in) :: reuse_id_update
character(len=*), intent(in) :: name
integer, intent(in) :: flags
!---local variable ------------------------------------------
integer :: i, j, k, l, is, ie, js, je, n, m
integer :: pos, nlist, msgsize, tile, l_size
integer :: to_pe, from_pe, buffer_pos
integer :: tMe, dir, ke_sum
logical :: send(8), recv(8), update_edge_only
character(len=128) :: text
integer :: ind_x, ind_y
integer :: nsend, nrecv, sendsize, recvsize
integer :: request
integer :: send_msgsize(update_x%nsend+update_y%nsend)
integer :: ind_send_x(update_x%nsend+update_y%nsend), ind_send_y(update_x%nsend+update_y%nsend)
integer :: ind_recv_x(update_x%nrecv+update_y%nrecv), ind_recv_y(update_x%nrecv+update_y%nrecv)
integer :: from_pe_list(update_x%nrecv+update_y%nrecv), to_pe_list(update_x%nsend+update_y%nsend)
integer :: start_pos_recv(update_x%nrecv+update_y%nrecv), start_pos_send(update_x%nsend+update_y%nsend)
real(8) :: fieldx(update_x%xbegin:update_x%xend, update_x%ybegin:update_x%yend,ke_max)
real(8) :: fieldy(update_y%xbegin:update_y%xend, update_y%ybegin:update_y%yend,ke_max)
real(8) :: buffer(size(mpp_domains_stack_nonblock(:)))
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
pointer( ptr, buffer )
update_edge_only = BTEST(flags, EDGEONLY)
recv = .false.
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
ke_sum = sum(ke_list)
l_size = size(f_addrsx,1)
nlist = size(domain%list(:))
ptr = LOC(mpp_domains_stack_nonblock)
nrecv = get_vector_recv(domain, update_x, update_y, ind_recv_x, ind_recv_y, start_pos_recv, from_pe_list)
nsend = get_vector_send(domain, update_x, update_y, ind_send_x, ind_send_y, start_pos_send, to_pe_list)
if( nrecv > MAX_REQUEST ) then
write( text,'(a,i8,a,i8)' ) 'nrecv =', nrecv, ' greater than MAX_REQEUST =', MAX_REQUEST
call mpp_error(FATAL,'MPP_START_DO_UPDATE_V: '//trim(text))
endif
if( nsend > MAX_REQUEST ) then
write( text,'(a,i8,a,i8)' ) 'nsend =', nsend, ' greater than MAX_REQEUST =', MAX_REQUEST
call mpp_error(FATAL,'MPP_START_DO_UPDATE_V: '//trim(text))
endif
!--- make sure the domain stack size is big enough.
buffer_pos = nonblock_data(id_update)%recv_pos
recvsize = 0
do m = 1, nrecv
msgsize = 0
nonblock_data(id_update)%size_recv(m) = 0
ind_x = ind_recv_x(m)
ind_y = ind_recv_y(m)
if(ind_x >= 0) then
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
msgsize = msgsize + update_x%recv(ind_x)%msgsize(n)
end if
end do
endif
if(ind_y >= 0) then
do n = 1, update_y%recv(ind_y)%count
dir = update_y%recv(ind_y)%dir(n)
if(recv(dir)) then
msgsize = msgsize + update_y%recv(ind_y)%msgsize(n)
end if
end do
endif
if( msgsize.GT.0 )then
msgsize = msgsize*ke_sum
recvsize = recvsize + msgsize
nonblock_data(id_update)%size_recv(m) = msgsize
nonblock_data(id_update)%buffer_pos_recv(m) = buffer_pos
buffer_pos = buffer_pos + msgsize
end if
end do
sendsize = 0
do m = 1, nsend
msgsize = 0
ind_x = ind_send_x(m)
ind_y = ind_send_y(m)
if(ind_x >= 0) then
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if(send(dir)) then
msgsize = msgsize + update_x%send(ind_x)%msgsize(n)
end if
end do
endif
if(ind_y >= 0) then
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if(send(dir)) then
msgsize = msgsize + update_y%send(ind_y)%msgsize(n)
end if
end do
endif
if( msgsize.GT.0 )then
msgsize = msgsize*ke_sum
sendsize = sendsize + msgsize
nonblock_data(id_update)%buffer_pos_send(m) = buffer_pos
buffer_pos = buffer_pos + msgsize
end if
end do
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, &
nonblock_data(id_update)%recv_pos+recvsize+sendsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_START_DO_UPDATE_V: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
if( reuse_id_update ) then
if(recvsize .NE. nonblock_data(id_update)%recv_msgsize) then
call mpp_error(FATAL,'MPP_START_DO_UPDATE: mismatch of recv msgsize for field '//trim(name) )
endif
if(sendsize .NE. nonblock_data(id_update)%send_msgsize) then
call mpp_error(FATAL,'MPP_START_DO_UPDATE: mismatch of send msgsize for field '//trim(name) )
endif
else
nonblock_data(id_update)%recv_msgsize = recvsize
nonblock_data(id_update)%send_msgsize = sendsize
nonblock_data(id_update)%send_pos = nonblock_data(id_update)%recv_pos + recvsize
nonblock_buffer_pos = nonblock_buffer_pos + recvsize + sendsize
endif
!--- recv
call mpp_clock_begin(recv_clock_nonblock)
do m = 1, nrecv
msgsize = nonblock_data(id_update)%size_recv(m)
from_pe = from_pe_list(m)
if( msgsize .GT. 0 )then
buffer_pos = nonblock_data(id_update)%buffer_pos_recv(m)
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.false., &
tag=id_update, request=request)
nonblock_data(id_update)%request_recv(m) = request
nonblock_data(id_update)%type_recv(m) = MPI_REAL8
end if
end do
call mpp_clock_end(recv_clock_nonblock)
!--- send
call mpp_clock_begin(send_pack_clock_nonblock)
!$OMP parallel do schedule(dynamic) default(shared) private(ind_x,ind_y,buffer_pos,pos,dir,tMe, &
!$OMP is,ie,js,je,ptr_fieldx,ptr_fieldy)
do m = 1, nsend
send_msgsize(m) = 0
ind_x = ind_send_x(m)
ind_y = ind_send_y(m)
buffer_pos = nonblock_data(id_update)%buffer_pos_send(m)
pos = buffer_pos
select case( gridtype )
case(BGRID_NE, BGRID_SW, AGRID)
if(ind_x >= 0) then
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
tMe = update_x%send(ind_x)%tileMe(n)
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
select case( update_x%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 2
buffer(pos-1) = fieldx(i,j,k)
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = is, ie
do j = je, js, -1
pos = pos + 2
buffer(pos-1) = fieldy(i,j,k)
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = is, ie
do j = je, js, -1
pos = pos + 2
buffer(pos-1) = -fieldy(i,j,k)
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
end if
case( NINETY )
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = ie, is, -1
do j = js, je
pos = pos + 2
buffer(pos-1) = fieldy(i,j,k)
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = ie, is, -1
do j = js, je
pos = pos + 2
buffer(pos-1) = fieldy(i,j,k)
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case( ONE_HUNDRED_EIGHTY )
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 2
buffer(pos-1) = fieldx(i,j,k)
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 2
buffer(pos-1) = -fieldx(i,j,k)
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select ! select case( rotation(n) )
end if ! if( send(dir) )
end do ! do n = 1, update_x%send(ind_x)%count
endif
case(CGRID_NE, CGRID_SW)
if(ind_x>=0) then
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
tMe = update_x%send(ind_x)%tileMe(n)
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
select case( update_x%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
case(NINETY)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1, ke_list(l,tMe)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case(ONE_HUNDRED_EIGHTY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
end select
end if
end do
endif
if(ind_y>=0) then
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if( send(dir) ) then
tMe = update_y%send(ind_y)%tileMe(n)
is = update_y%send(ind_y)%is(n); ie = update_y%send(ind_y)%ie(n)
js = update_y%send(ind_y)%js(n); je = update_y%send(ind_y)%je(n)
select case( update_y%send(ind_y)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case(NINETY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case(ONE_HUNDRED_EIGHTY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select
endif
enddo
endif
end select
send_msgsize(m) = pos - buffer_pos
enddo
!$OMP end parallel do
do m = 1, nsend
msgsize = send_msgsize(m)
to_pe = to_pe_list(m)
buffer_pos = nonblock_data(id_update)%buffer_pos_send(m)
if( msgsize .GT.0 )then
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, &
tag=id_update, request=request )
nonblock_data(id_update)%request_send(m) = request
end if
end do
call mpp_clock_end(send_pack_clock_nonblock)
end subroutine mpp_start_do_update_r8_3Dv
!###############################################################################
subroutine mpp_complete_do_update_r8_3Dv(id_update, f_addrsx, f_addrsy, domain, update_x, update_y, &
d_type, ke_max, ke_list, gridtype, flags)
integer, intent(in) :: id_update
integer(8), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: update_x, update_y
integer, intent(in) :: ke_max
integer, intent(in) :: ke_list(:,:)
real(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: gridtype
integer, intent(in) :: flags
!--- local variables
real(8) :: fieldx(update_x%xbegin:update_x%xend, update_x%ybegin:update_x%yend,ke_max)
real(8) :: fieldy(update_y%xbegin:update_y%xend, update_y%ybegin:update_y%yend,ke_max)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
real(8) :: recv_buffer(size(mpp_domains_stack_nonblock(:)))
pointer( ptr, recv_buffer )
integer :: i, j, k, l, is, ie, js, je, n, ke_sum, l_size, m
integer :: pos, nlist, msgsize, tile, buffer_pos
integer :: ind_x, ind_y, nrecv, nsend
integer :: ind_recv_x(update_x%nrecv+update_y%nrecv), ind_recv_y(update_x%nrecv+update_y%nrecv)
integer :: start_pos_recv(update_x%nrecv+update_y%nrecv)
integer :: from_pe_list(update_x%nrecv+update_y%nrecv)
logical :: recv(8), send(8), update_edge_only
integer :: shift, midpoint
integer :: tMe, dir
update_edge_only = BTEST(flags, EDGEONLY)
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
ke_sum = sum(ke_list)
l_size = size(f_addrsx,1)
nlist = size(domain%list(:))
ptr = LOC(mpp_domains_stack_nonblock)
nrecv = get_vector_recv(domain, update_x, update_y, ind_recv_x, ind_recv_y, start_pos_recv, from_pe_list)
if(nrecv > 0) then
call mpp_clock_begin(wait_clock_nonblock)
call mpp_sync_self(check=EVENT_RECV, request=nonblock_data(id_update)%request_recv(1:nrecv), &
msg_size=nonblock_data(id_update)%size_recv(1:nrecv), &
msg_type=nonblock_data(id_update)%type_recv(1:nrecv) )
call mpp_clock_end(wait_clock_nonblock)
nonblock_data(id_update)%request_recv(:) = MPI_REQUEST_NULL
# 607
nonblock_data(id_update)%type_recv(:) = 0
endif
call mpp_clock_begin(unpk_clock_nonblock)
!$OMP parallel do schedule(dynamic) default(shared) private(ind_x,ind_y,buffer_pos,pos,dir,tMe,is,ie,js,je, &
!$OMP msgsize,ptr_fieldx,ptr_fieldy)
do m = nrecv,1,-1
ind_x = ind_recv_x(m)
ind_y = ind_recv_y(m)
buffer_pos = nonblock_data(id_update)%buffer_pos_recv(m)+nonblock_data(id_update)%size_recv(m)
pos = buffer_pos
select case ( gridtype )
case(BGRID_NE, BGRID_SW, AGRID)
if(ind_x>=0) then
do n = update_x%recv(ind_x)%count, 1, -1
dir = update_x%recv(ind_x)%dir(n)
if( recv(dir) ) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke_sum*2
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 2
fieldx(i,j,k) = recv_buffer(pos-1)
fieldy(i,j,k) = recv_buffer(pos)
end do
end do
enddo
end do
end if ! end if( recv(dir) )
end do ! do dir=8,1,-1
endif
case(CGRID_NE, CGRID_SW)
if(ind_y>=0) then
do n = update_y%recv(ind_y)%count, 1, -1
dir = update_y%recv(ind_y)%dir(n)
if( recv(dir) ) then
tMe = update_y%recv(ind_y)%tileMe(n)
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke_sum
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1, l_size ! loop over number of fields
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k) = recv_buffer(pos)
end do
end do
end do
end do
end if
end do
endif
if(ind_x>=0) then
do n = update_x%recv(ind_x)%count, 1, -1
dir = update_x%recv(ind_x)%dir(n)
if( recv(dir) ) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke_sum
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k) = recv_buffer(pos)
end do
end do
end do
end do
end if
end do
endif
end select
end do
!$OMP end parallel do
call mpp_clock_end(unpk_clock_nonblock)
! ---northern boundary fold
shift = 0
tMe = 1
if(domain%symmetry) shift = 1
if( BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then
j = domain%y(1)%global%end+shift
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
j = domain%y(1)%global%end+shift
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
if( .NOT. domain%symmetry ) is = is - 1
do i = is ,ie, midpoint
if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
enddo
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%end+shift
if ( recv(7) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
if(domain%symmetry) then
is = domain%x(1)%global%begin
else
is = domain%x(1)%global%begin - 1
end if
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO_UPDATE_V: folded-north BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = domain%x(1)%data%begin,is-1
fieldx(i,j,k) = fieldx(2*is-i,j,k)
fieldy(i,j,k) = fieldy(2*is-i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO_UPDATE_V: folded-north CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = domain%x(1)%data%begin,is-1
fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
ie = domain%x(1)%data%end
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,SOUTH) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then ! ---southern boundary fold
! NOTE: symmetry is assumed for fold-south boundary
j = domain%y(1)%global%begin
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
j = domain%y(1)%global%begin
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
do i = is ,ie, midpoint
if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%begin
if ( recv(3) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO_UPDATE_V: folded-south BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = domain%x(1)%data%begin,is-1
fieldx(i,j,k) = fieldx(2*is-i,j,k)
fieldy(i,j,k) = fieldy(2*is-i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO_UPDATE_V: folded-south CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = domain%x(1)%data%begin,is-1
fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
ie = domain%x(1)%data%end
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,WEST) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then ! ---eastern boundary fold
! NOTE: symmetry is assumed for fold-west boundary
i = domain%x(1)%global%begin
if( domain%x(1)%data%begin.LE.i .AND. i.LE.domain%x(1)%data%end+shift )then !fold is within domain
midpoint = (domain%y(1)%global%begin+domain%y(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
i = domain%x(1)%global%begin
js = domain%y(1)%global%begin; je = domain%y(1)%global%end+shift
do j = js ,je, midpoint
if( domain%y(1)%data%begin.LE.j .AND. j.LE. domain%y(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off south edge, when update south or west direction
i = domain%x(1)%global%begin
if ( recv(3) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin.GT.domain%y(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO__UPDATE_V: folded-west BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i,2*js-j,k)
fieldy(i,j,k) = fieldy(i,2*js-j,k)
end do
end do
end do
end if
case(CGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin-1.GT.domain%y(1)%data%end ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO__UPDATE_V: folded-west CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke_list(l,tMe)
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i, 2*js-j-1,k)
end do
end do
end do
end if
end select
end if
!off north edge
js = domain%y(1)%global%end
if(domain%y(1)%cyclic .AND. js.LT.domain%y(1)%data%end )then
je = domain%y(1)%data%end
js = js + 1
select case(gridtype)
case(BGRID_NE)
js = js + shift
je = je + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do j = js,je
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke_list(l,tMe)
do j = js, je
fieldx(i,j,k) = -fieldx(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,EAST) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then ! ---eastern boundary fold
! NOTE: symmetry is assumed for fold-west boundary
i = domain%x(1)%global%end+shift
if( domain%x(1)%data%begin.LE.i .AND. i.LE.domain%x(1)%data%end+shift )then !fold is within domain
midpoint = (domain%y(1)%global%begin+domain%y(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
i = domain%x(1)%global%end+shift
js = domain%y(1)%global%begin; je = domain%y(1)%global%end+shift
do j = js ,je, midpoint
if( domain%y(1)%data%begin.LE.j .AND. j.LE. domain%y(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off south edge, when update south or west direction
i = domain%x(1)%global%end+shift
if ( recv(3) .OR. recv(1) ) then
select case(gridtype)
case(BGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin.GT.domain%y(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO__UPDATE_V: folded-east BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i,2*js-j,k)
fieldy(i,j,k) = fieldy(i,2*js-j,k)
end do
end do
end do
end if
case(CGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin-1.GT.domain%y(1)%data%end ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO__UPDATE_V: folded-east CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke_list(l,tMe)
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i, 2*js-j-1,k)
end do
end do
end do
end if
end select
end if
!off north edge
js = domain%y(1)%global%end
if(domain%y(1)%cyclic .AND. js.LT.domain%y(1)%data%end )then
je = domain%y(1)%data%end
js = js + 1
select case(gridtype)
case(BGRID_NE)
js = js + shift
je = je + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do j = js,je
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke_list(l,tMe)
do j = js, je
fieldx(i,j,k) = -fieldx(i,j,k)
end do
end do
end do
end select
end if
end if
end if
if(nrecv>0) then
nonblock_data(id_update)%size_recv(:) = 0
endif
nsend = update_x%nsend+update_y%nsend
if(nsend > 0) then
call mpp_clock_begin(wait_clock_nonblock)
call mpp_sync_self(check=EVENT_SEND, request=nonblock_data(id_update)%request_send(1:nsend))
call mpp_clock_end(wait_clock_nonblock)
nonblock_data(id_update)%request_send_count = 0
nonblock_data(id_update)%request_send(:) = MPI_REQUEST_NULL
# 1102
endif
return
end subroutine mpp_complete_do_update_r8_3Dv
# 1099 "../mpp/include/mpp_domains_misc.inc" 2
# 1110
# 1 "../mpp/include/mpp_do_update_nonblock.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_start_do_update_i8_3D(id_update, f_addrs, domain, update, d_type, ke_max, ke_list, flags, reuse_id_update, name)
integer, intent(in) :: id_update
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: update
integer(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke_max
integer, intent(in) :: ke_list(:,:)
logical, intent(in) :: reuse_id_update
character(len=*), intent(in) :: name
integer, intent(in) :: flags
!--- local variables
integer :: i, j, k, m, n, l, dir, tMe
integer :: buffer_pos, msgsize, from_pe, to_pe, pos
integer :: is, ie, js, je, sendsize, recvsize
logical :: send(8), recv(8), update_edge_only
integer :: l_size, ke_sum, my_id_update
integer :: request
integer :: send_msgsize(MAXLIST)
character(len=128) :: text
integer(8) :: buffer(size(mpp_domains_stack_nonblock(:)))
integer(8) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke_max)
pointer( ptr, buffer )
pointer(ptr_field, field)
update_edge_only = BTEST(flags, EDGEONLY)
recv = .false.
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
l_size = size(f_addrs,1)
ke_sum = sum(ke_list)
ptr = LOC(mpp_domains_stack_nonblock)
buffer_pos = nonblock_data(id_update)%recv_pos
if( update%nrecv > MAX_REQUEST ) then
write( text,'(a,i8,a,i8)' ) 'update%nrecv =', update%nrecv, ' greater than MAX_REQEUST =', MAX_REQUEST
call mpp_error(FATAL,'MPP_START_DO_UPDATE: '//trim(text))
endif
if( update%nsend > MAX_REQUEST ) then
write( text,'(a,i8,a,i8)' ) 'update%nsend =', update%nsend, ' greater than MAX_REQEUST =', MAX_REQUEST
call mpp_error(FATAL,'MPP_START_DO_UPDATE: '//trim(text))
endif
! pre-postrecv
!--- make sure the domain stack size is big enough.
recvsize = 0
do m = 1, update%nrecv
nonblock_data(id_update)%size_recv(m) = 0
if( update%recv(m)%count == 0 )cycle
msgsize = 0
do n = 1, update%recv(m)%count
dir = update%recv(m)%dir(n)
if(recv(dir)) then
msgsize = msgsize + update%recv(m)%msgsize(n)
end if
end do
if( msgsize.GT.0 )then
msgsize = msgsize*ke_sum
recvsize = recvsize + msgsize
nonblock_data(id_update)%size_recv(m) = msgsize
nonblock_data(id_update)%buffer_pos_recv(m) = buffer_pos
buffer_pos = buffer_pos + msgsize
end if
end do
sendsize = 0
do m = 1, update%nsend
if( update%send(m)%count == 0 )cycle
! make sure the stacksize is big enough
msgsize = 0
do n = 1, update%send(m)%count
dir = update%send(m)%dir(n)
if( send(dir) ) msgsize = msgsize + update%send(m)%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke_sum
sendsize = sendsize + msgsize
nonblock_data(id_update)%buffer_pos_send(m) = buffer_pos
buffer_pos = buffer_pos + msgsize
end if
end do
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, &
nonblock_data(id_update)%recv_pos+recvsize+sendsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_START_DO_UPDATE: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
if( reuse_id_update ) then
if(recvsize .NE. nonblock_data(id_update)%recv_msgsize) then
call mpp_error(FATAL,'MPP_START_DO_UPDATE: mismatch of recv msgsize for field '//trim(name) )
endif
if(sendsize .NE. nonblock_data(id_update)%send_msgsize) then
call mpp_error(FATAL,'MPP_START_DO_UPDATE: mismatch of send msgsize for field '//trim(name) )
endif
else
nonblock_data(id_update)%recv_msgsize = recvsize
nonblock_data(id_update)%send_msgsize = sendsize
nonblock_data(id_update)%send_pos = nonblock_data(id_update)%recv_pos + recvsize
nonblock_buffer_pos = nonblock_buffer_pos + recvsize + sendsize
endif
! pre-postrecv
call mpp_clock_begin(recv_clock_nonblock)
do m = 1, update%nrecv
msgsize = nonblock_data(id_update)%size_recv(m)
if( msgsize.GT.0 )then
from_pe = update%recv(m)%pe
buffer_pos = nonblock_data(id_update)%buffer_pos_recv(m)
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., &
tag=id_update, request=request)
nonblock_data(id_update)%request_recv(m) = request
nonblock_data(id_update)%type_recv(m) = MPI_INTEGER8
end if
end do ! end do m = 1, update%nrecv
call mpp_clock_end(recv_clock_nonblock)
! send
call mpp_clock_begin(send_pack_clock_nonblock)
!$OMP parallel do schedule(dynamic) default(shared) private(buffer_pos,pos,dir,tMe,is,ie,js,je,ptr_field,to_pe, &
!$OMP msgsize,request)
do m = 1, update%nsend
send_msgsize(m) = 0
if( update%send(m)%count == 0 )cycle
buffer_pos = nonblock_data(id_update)%buffer_pos_send(m)
pos = buffer_pos
do n = 1, update%send(m)%count
dir = update%send(m)%dir(n)
if( send(dir) ) then
tMe = update%send(m)%tileMe(n)
is = update%send(m)%is(n); ie = update%send(m)%ie(n)
js = update%send(m)%js(n); je = update%send(m)%je(n)
select case( update%send(m)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
enddo
case( MINUS_NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, update%send(m)%count
send_msgsize(m) = pos - buffer_pos
enddo
!$OMP end parallel do
do m = 1, update%nsend
msgsize = send_msgsize(m)
if( msgsize .GT.0 )then
buffer_pos = nonblock_data(id_update)%buffer_pos_send(m)
to_pe = update%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize , to_pe=to_pe, &
tag=id_update, request=request)
nonblock_data(id_update)%request_send(m) = request
end if
end do ! end do ist = 0,nlist-1
call mpp_clock_end(send_pack_clock_nonblock)
return
end subroutine mpp_start_do_update_i8_3D
!###############################################################################
subroutine mpp_complete_do_update_i8_3D(id_update, f_addrs, domain, update, d_type, ke_max, ke_list, flags)
integer, intent(in) :: id_update
integer(8), intent(in) :: f_addrs(:,:)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: update
integer, intent(in) :: ke_max
integer, intent(in) :: ke_list(:,:)
integer(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: flags
!--- local variables
integer :: i, j, k, m, n, l, dir, count, tMe, tNbr
integer :: buffer_pos, msgsize, from_pe, pos
integer :: is, ie, js, je
logical :: send(8), recv(8), update_edge_only
integer :: l_size, ke_sum, sendsize, recvsize
character(len=128) :: text
integer(8) :: recv_buffer(size(mpp_domains_stack_nonblock(:)))
integer(8) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke_max)
pointer( ptr, recv_buffer )
pointer(ptr_field, field)
update_edge_only = BTEST(flags, EDGEONLY)
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
ke_sum = sum(ke_list)
l_size = size(f_addrs,1)
ptr = LOC(mpp_domains_stack_nonblock)
count = update%nrecv
if(count > 0) then
call mpp_clock_begin(wait_clock_nonblock)
call mpp_sync_self(check=EVENT_RECV, request=nonblock_data(id_update)%request_recv(1:count), &
msg_size=nonblock_data(id_update)%size_recv(1:count), &
msg_type=nonblock_data(id_update)%type_recv(1:count) )
call mpp_clock_end(wait_clock_nonblock)
nonblock_data(id_update)%request_recv(:) = MPI_REQUEST_NULL
# 314
nonblock_data(id_update)%type_recv(:) = 0
endif
!--unpack the data
call mpp_clock_begin(unpk_clock_nonblock)
!$OMP parallel do schedule(dynamic) default(shared) private(dir,buffer_pos,pos,tMe,is,ie,js,je,msgsize, &
!$OMP ptr_field)
do m = update%nrecv, 1, -1
if( update%recv(m)%count == 0 )cycle
buffer_pos = nonblock_data(id_update)%buffer_pos_recv(m) + nonblock_data(id_update)%size_recv(m)
pos = buffer_pos
do n = update%recv(m)%count, 1, -1
dir = update%recv(m)%dir(n)
if( recv(dir) ) then
tMe = update%recv(m)%tileMe(n)
is = update%recv(m)%is(n); ie = update%recv(m)%ie(n)
js = update%recv(m)%js(n); je = update%recv(m)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke_sum
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = recv_buffer(pos)
end do
end do
end do
end do
end if
end do ! do n = 1, update%recv(m)%count
end do
!$OMP end parallel do
call mpp_clock_end(unpk_clock_nonblock)
count = update%nrecv
if(count > 0) then
nonblock_data(id_update)%size_recv(:) = 0
endif
count = update%nsend
if(count > 0) then
call mpp_clock_begin(wait_clock_nonblock)
call mpp_sync_self(check=EVENT_SEND, request=nonblock_data(id_update)%request_send(1:count))
call mpp_clock_end(wait_clock_nonblock)
nonblock_data(id_update)%request_send_count = 0
nonblock_data(id_update)%request_send(:) = MPI_REQUEST_NULL
# 368
endif
! call init_nonblock_type(nonblock_data(id_update))
return
end subroutine mpp_complete_do_update_i8_3D
# 1122 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_update_nonblock.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_start_do_update_r4_3D(id_update, f_addrs, domain, update, d_type, ke_max, ke_list, flags, reuse_id_update, name)
integer, intent(in) :: id_update
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: update
real(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke_max
integer, intent(in) :: ke_list(:,:)
logical, intent(in) :: reuse_id_update
character(len=*), intent(in) :: name
integer, intent(in) :: flags
!--- local variables
integer :: i, j, k, m, n, l, dir, tMe
integer :: buffer_pos, msgsize, from_pe, to_pe, pos
integer :: is, ie, js, je, sendsize, recvsize
logical :: send(8), recv(8), update_edge_only
integer :: l_size, ke_sum, my_id_update
integer :: request
integer :: send_msgsize(MAXLIST)
character(len=128) :: text
real(4) :: buffer(size(mpp_domains_stack_nonblock(:)))
real(4) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke_max)
pointer( ptr, buffer )
pointer(ptr_field, field)
update_edge_only = BTEST(flags, EDGEONLY)
recv = .false.
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
l_size = size(f_addrs,1)
ke_sum = sum(ke_list)
ptr = LOC(mpp_domains_stack_nonblock)
buffer_pos = nonblock_data(id_update)%recv_pos
if( update%nrecv > MAX_REQUEST ) then
write( text,'(a,i8,a,i8)' ) 'update%nrecv =', update%nrecv, ' greater than MAX_REQEUST =', MAX_REQUEST
call mpp_error(FATAL,'MPP_START_DO_UPDATE: '//trim(text))
endif
if( update%nsend > MAX_REQUEST ) then
write( text,'(a,i8,a,i8)' ) 'update%nsend =', update%nsend, ' greater than MAX_REQEUST =', MAX_REQUEST
call mpp_error(FATAL,'MPP_START_DO_UPDATE: '//trim(text))
endif
! pre-postrecv
!--- make sure the domain stack size is big enough.
recvsize = 0
do m = 1, update%nrecv
nonblock_data(id_update)%size_recv(m) = 0
if( update%recv(m)%count == 0 )cycle
msgsize = 0
do n = 1, update%recv(m)%count
dir = update%recv(m)%dir(n)
if(recv(dir)) then
msgsize = msgsize + update%recv(m)%msgsize(n)
end if
end do
if( msgsize.GT.0 )then
msgsize = msgsize*ke_sum
recvsize = recvsize + msgsize
nonblock_data(id_update)%size_recv(m) = msgsize
nonblock_data(id_update)%buffer_pos_recv(m) = buffer_pos
buffer_pos = buffer_pos + msgsize
end if
end do
sendsize = 0
do m = 1, update%nsend
if( update%send(m)%count == 0 )cycle
! make sure the stacksize is big enough
msgsize = 0
do n = 1, update%send(m)%count
dir = update%send(m)%dir(n)
if( send(dir) ) msgsize = msgsize + update%send(m)%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke_sum
sendsize = sendsize + msgsize
nonblock_data(id_update)%buffer_pos_send(m) = buffer_pos
buffer_pos = buffer_pos + msgsize
end if
end do
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, &
nonblock_data(id_update)%recv_pos+recvsize+sendsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_START_DO_UPDATE: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
if( reuse_id_update ) then
if(recvsize .NE. nonblock_data(id_update)%recv_msgsize) then
call mpp_error(FATAL,'MPP_START_DO_UPDATE: mismatch of recv msgsize for field '//trim(name) )
endif
if(sendsize .NE. nonblock_data(id_update)%send_msgsize) then
call mpp_error(FATAL,'MPP_START_DO_UPDATE: mismatch of send msgsize for field '//trim(name) )
endif
else
nonblock_data(id_update)%recv_msgsize = recvsize
nonblock_data(id_update)%send_msgsize = sendsize
nonblock_data(id_update)%send_pos = nonblock_data(id_update)%recv_pos + recvsize
nonblock_buffer_pos = nonblock_buffer_pos + recvsize + sendsize
endif
! pre-postrecv
call mpp_clock_begin(recv_clock_nonblock)
do m = 1, update%nrecv
msgsize = nonblock_data(id_update)%size_recv(m)
if( msgsize.GT.0 )then
from_pe = update%recv(m)%pe
buffer_pos = nonblock_data(id_update)%buffer_pos_recv(m)
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., &
tag=id_update, request=request)
nonblock_data(id_update)%request_recv(m) = request
nonblock_data(id_update)%type_recv(m) = MPI_REAL4
end if
end do ! end do m = 1, update%nrecv
call mpp_clock_end(recv_clock_nonblock)
! send
call mpp_clock_begin(send_pack_clock_nonblock)
!$OMP parallel do schedule(dynamic) default(shared) private(buffer_pos,pos,dir,tMe,is,ie,js,je,ptr_field,to_pe, &
!$OMP msgsize,request)
do m = 1, update%nsend
send_msgsize(m) = 0
if( update%send(m)%count == 0 )cycle
buffer_pos = nonblock_data(id_update)%buffer_pos_send(m)
pos = buffer_pos
do n = 1, update%send(m)%count
dir = update%send(m)%dir(n)
if( send(dir) ) then
tMe = update%send(m)%tileMe(n)
is = update%send(m)%is(n); ie = update%send(m)%ie(n)
js = update%send(m)%js(n); je = update%send(m)%je(n)
select case( update%send(m)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
enddo
case( MINUS_NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, update%send(m)%count
send_msgsize(m) = pos - buffer_pos
enddo
!$OMP end parallel do
do m = 1, update%nsend
msgsize = send_msgsize(m)
if( msgsize .GT.0 )then
buffer_pos = nonblock_data(id_update)%buffer_pos_send(m)
to_pe = update%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize , to_pe=to_pe, &
tag=id_update, request=request)
nonblock_data(id_update)%request_send(m) = request
end if
end do ! end do ist = 0,nlist-1
call mpp_clock_end(send_pack_clock_nonblock)
return
end subroutine mpp_start_do_update_r4_3D
!###############################################################################
subroutine mpp_complete_do_update_r4_3D(id_update, f_addrs, domain, update, d_type, ke_max, ke_list, flags)
integer, intent(in) :: id_update
integer(8), intent(in) :: f_addrs(:,:)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: update
integer, intent(in) :: ke_max
integer, intent(in) :: ke_list(:,:)
real(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: flags
!--- local variables
integer :: i, j, k, m, n, l, dir, count, tMe, tNbr
integer :: buffer_pos, msgsize, from_pe, pos
integer :: is, ie, js, je
logical :: send(8), recv(8), update_edge_only
integer :: l_size, ke_sum, sendsize, recvsize
character(len=128) :: text
real(4) :: recv_buffer(size(mpp_domains_stack_nonblock(:)))
real(4) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke_max)
pointer( ptr, recv_buffer )
pointer(ptr_field, field)
update_edge_only = BTEST(flags, EDGEONLY)
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
ke_sum = sum(ke_list)
l_size = size(f_addrs,1)
ptr = LOC(mpp_domains_stack_nonblock)
count = update%nrecv
if(count > 0) then
call mpp_clock_begin(wait_clock_nonblock)
call mpp_sync_self(check=EVENT_RECV, request=nonblock_data(id_update)%request_recv(1:count), &
msg_size=nonblock_data(id_update)%size_recv(1:count), &
msg_type=nonblock_data(id_update)%type_recv(1:count) )
call mpp_clock_end(wait_clock_nonblock)
nonblock_data(id_update)%request_recv(:) = MPI_REQUEST_NULL
# 314
nonblock_data(id_update)%type_recv(:) = 0
endif
!--unpack the data
call mpp_clock_begin(unpk_clock_nonblock)
!$OMP parallel do schedule(dynamic) default(shared) private(dir,buffer_pos,pos,tMe,is,ie,js,je,msgsize, &
!$OMP ptr_field)
do m = update%nrecv, 1, -1
if( update%recv(m)%count == 0 )cycle
buffer_pos = nonblock_data(id_update)%buffer_pos_recv(m) + nonblock_data(id_update)%size_recv(m)
pos = buffer_pos
do n = update%recv(m)%count, 1, -1
dir = update%recv(m)%dir(n)
if( recv(dir) ) then
tMe = update%recv(m)%tileMe(n)
is = update%recv(m)%is(n); ie = update%recv(m)%ie(n)
js = update%recv(m)%js(n); je = update%recv(m)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke_sum
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = recv_buffer(pos)
end do
end do
end do
end do
end if
end do ! do n = 1, update%recv(m)%count
end do
!$OMP end parallel do
call mpp_clock_end(unpk_clock_nonblock)
count = update%nrecv
if(count > 0) then
nonblock_data(id_update)%size_recv(:) = 0
endif
count = update%nsend
if(count > 0) then
call mpp_clock_begin(wait_clock_nonblock)
call mpp_sync_self(check=EVENT_SEND, request=nonblock_data(id_update)%request_send(1:count))
call mpp_clock_end(wait_clock_nonblock)
nonblock_data(id_update)%request_send_count = 0
nonblock_data(id_update)%request_send(:) = MPI_REQUEST_NULL
# 368
endif
! call init_nonblock_type(nonblock_data(id_update))
return
end subroutine mpp_complete_do_update_r4_3D
# 1138 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_updateV_nonblock.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_start_do_update_r4_3Dv(id_update, f_addrsx, f_addrsy, domain, update_x, update_y, &
d_type, ke_max, ke_list, gridtype, flags, reuse_id_update, name)
integer, intent(in) :: id_update
integer(8), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: update_x, update_y
integer, intent(in) :: ke_max
integer, intent(in) :: ke_list(:,:)
real(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: gridtype
logical, intent(in) :: reuse_id_update
character(len=*), intent(in) :: name
integer, intent(in) :: flags
!---local variable ------------------------------------------
integer :: i, j, k, l, is, ie, js, je, n, m
integer :: pos, nlist, msgsize, tile, l_size
integer :: to_pe, from_pe, buffer_pos
integer :: tMe, dir, ke_sum
logical :: send(8), recv(8), update_edge_only
character(len=128) :: text
integer :: ind_x, ind_y
integer :: nsend, nrecv, sendsize, recvsize
integer :: request
integer :: send_msgsize(update_x%nsend+update_y%nsend)
integer :: ind_send_x(update_x%nsend+update_y%nsend), ind_send_y(update_x%nsend+update_y%nsend)
integer :: ind_recv_x(update_x%nrecv+update_y%nrecv), ind_recv_y(update_x%nrecv+update_y%nrecv)
integer :: from_pe_list(update_x%nrecv+update_y%nrecv), to_pe_list(update_x%nsend+update_y%nsend)
integer :: start_pos_recv(update_x%nrecv+update_y%nrecv), start_pos_send(update_x%nsend+update_y%nsend)
real(4) :: fieldx(update_x%xbegin:update_x%xend, update_x%ybegin:update_x%yend,ke_max)
real(4) :: fieldy(update_y%xbegin:update_y%xend, update_y%ybegin:update_y%yend,ke_max)
real(4) :: buffer(size(mpp_domains_stack_nonblock(:)))
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
pointer( ptr, buffer )
update_edge_only = BTEST(flags, EDGEONLY)
recv = .false.
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
ke_sum = sum(ke_list)
l_size = size(f_addrsx,1)
nlist = size(domain%list(:))
ptr = LOC(mpp_domains_stack_nonblock)
nrecv = get_vector_recv(domain, update_x, update_y, ind_recv_x, ind_recv_y, start_pos_recv, from_pe_list)
nsend = get_vector_send(domain, update_x, update_y, ind_send_x, ind_send_y, start_pos_send, to_pe_list)
if( nrecv > MAX_REQUEST ) then
write( text,'(a,i8,a,i8)' ) 'nrecv =', nrecv, ' greater than MAX_REQEUST =', MAX_REQUEST
call mpp_error(FATAL,'MPP_START_DO_UPDATE_V: '//trim(text))
endif
if( nsend > MAX_REQUEST ) then
write( text,'(a,i8,a,i8)' ) 'nsend =', nsend, ' greater than MAX_REQEUST =', MAX_REQUEST
call mpp_error(FATAL,'MPP_START_DO_UPDATE_V: '//trim(text))
endif
!--- make sure the domain stack size is big enough.
buffer_pos = nonblock_data(id_update)%recv_pos
recvsize = 0
do m = 1, nrecv
msgsize = 0
nonblock_data(id_update)%size_recv(m) = 0
ind_x = ind_recv_x(m)
ind_y = ind_recv_y(m)
if(ind_x >= 0) then
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
msgsize = msgsize + update_x%recv(ind_x)%msgsize(n)
end if
end do
endif
if(ind_y >= 0) then
do n = 1, update_y%recv(ind_y)%count
dir = update_y%recv(ind_y)%dir(n)
if(recv(dir)) then
msgsize = msgsize + update_y%recv(ind_y)%msgsize(n)
end if
end do
endif
if( msgsize.GT.0 )then
msgsize = msgsize*ke_sum
recvsize = recvsize + msgsize
nonblock_data(id_update)%size_recv(m) = msgsize
nonblock_data(id_update)%buffer_pos_recv(m) = buffer_pos
buffer_pos = buffer_pos + msgsize
end if
end do
sendsize = 0
do m = 1, nsend
msgsize = 0
ind_x = ind_send_x(m)
ind_y = ind_send_y(m)
if(ind_x >= 0) then
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if(send(dir)) then
msgsize = msgsize + update_x%send(ind_x)%msgsize(n)
end if
end do
endif
if(ind_y >= 0) then
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if(send(dir)) then
msgsize = msgsize + update_y%send(ind_y)%msgsize(n)
end if
end do
endif
if( msgsize.GT.0 )then
msgsize = msgsize*ke_sum
sendsize = sendsize + msgsize
nonblock_data(id_update)%buffer_pos_send(m) = buffer_pos
buffer_pos = buffer_pos + msgsize
end if
end do
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, &
nonblock_data(id_update)%recv_pos+recvsize+sendsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_START_DO_UPDATE_V: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
if( reuse_id_update ) then
if(recvsize .NE. nonblock_data(id_update)%recv_msgsize) then
call mpp_error(FATAL,'MPP_START_DO_UPDATE: mismatch of recv msgsize for field '//trim(name) )
endif
if(sendsize .NE. nonblock_data(id_update)%send_msgsize) then
call mpp_error(FATAL,'MPP_START_DO_UPDATE: mismatch of send msgsize for field '//trim(name) )
endif
else
nonblock_data(id_update)%recv_msgsize = recvsize
nonblock_data(id_update)%send_msgsize = sendsize
nonblock_data(id_update)%send_pos = nonblock_data(id_update)%recv_pos + recvsize
nonblock_buffer_pos = nonblock_buffer_pos + recvsize + sendsize
endif
!--- recv
call mpp_clock_begin(recv_clock_nonblock)
do m = 1, nrecv
msgsize = nonblock_data(id_update)%size_recv(m)
from_pe = from_pe_list(m)
if( msgsize .GT. 0 )then
buffer_pos = nonblock_data(id_update)%buffer_pos_recv(m)
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.false., &
tag=id_update, request=request)
nonblock_data(id_update)%request_recv(m) = request
nonblock_data(id_update)%type_recv(m) = MPI_REAL4
end if
end do
call mpp_clock_end(recv_clock_nonblock)
!--- send
call mpp_clock_begin(send_pack_clock_nonblock)
!$OMP parallel do schedule(dynamic) default(shared) private(ind_x,ind_y,buffer_pos,pos,dir,tMe, &
!$OMP is,ie,js,je,ptr_fieldx,ptr_fieldy)
do m = 1, nsend
send_msgsize(m) = 0
ind_x = ind_send_x(m)
ind_y = ind_send_y(m)
buffer_pos = nonblock_data(id_update)%buffer_pos_send(m)
pos = buffer_pos
select case( gridtype )
case(BGRID_NE, BGRID_SW, AGRID)
if(ind_x >= 0) then
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
tMe = update_x%send(ind_x)%tileMe(n)
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
select case( update_x%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 2
buffer(pos-1) = fieldx(i,j,k)
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = is, ie
do j = je, js, -1
pos = pos + 2
buffer(pos-1) = fieldy(i,j,k)
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = is, ie
do j = je, js, -1
pos = pos + 2
buffer(pos-1) = -fieldy(i,j,k)
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
end if
case( NINETY )
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = ie, is, -1
do j = js, je
pos = pos + 2
buffer(pos-1) = fieldy(i,j,k)
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = ie, is, -1
do j = js, je
pos = pos + 2
buffer(pos-1) = fieldy(i,j,k)
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case( ONE_HUNDRED_EIGHTY )
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 2
buffer(pos-1) = fieldx(i,j,k)
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 2
buffer(pos-1) = -fieldx(i,j,k)
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select ! select case( rotation(n) )
end if ! if( send(dir) )
end do ! do n = 1, update_x%send(ind_x)%count
endif
case(CGRID_NE, CGRID_SW)
if(ind_x>=0) then
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
tMe = update_x%send(ind_x)%tileMe(n)
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
select case( update_x%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
case(NINETY)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1, ke_list(l,tMe)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case(ONE_HUNDRED_EIGHTY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
end select
end if
end do
endif
if(ind_y>=0) then
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if( send(dir) ) then
tMe = update_y%send(ind_y)%tileMe(n)
is = update_y%send(ind_y)%is(n); ie = update_y%send(ind_y)%ie(n)
js = update_y%send(ind_y)%js(n); je = update_y%send(ind_y)%je(n)
select case( update_y%send(ind_y)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case(NINETY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case(ONE_HUNDRED_EIGHTY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select
endif
enddo
endif
end select
send_msgsize(m) = pos - buffer_pos
enddo
!$OMP end parallel do
do m = 1, nsend
msgsize = send_msgsize(m)
to_pe = to_pe_list(m)
buffer_pos = nonblock_data(id_update)%buffer_pos_send(m)
if( msgsize .GT.0 )then
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, &
tag=id_update, request=request )
nonblock_data(id_update)%request_send(m) = request
end if
end do
call mpp_clock_end(send_pack_clock_nonblock)
end subroutine mpp_start_do_update_r4_3Dv
!###############################################################################
subroutine mpp_complete_do_update_r4_3Dv(id_update, f_addrsx, f_addrsy, domain, update_x, update_y, &
d_type, ke_max, ke_list, gridtype, flags)
integer, intent(in) :: id_update
integer(8), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: update_x, update_y
integer, intent(in) :: ke_max
integer, intent(in) :: ke_list(:,:)
real(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: gridtype
integer, intent(in) :: flags
!--- local variables
real(4) :: fieldx(update_x%xbegin:update_x%xend, update_x%ybegin:update_x%yend,ke_max)
real(4) :: fieldy(update_y%xbegin:update_y%xend, update_y%ybegin:update_y%yend,ke_max)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
real(4) :: recv_buffer(size(mpp_domains_stack_nonblock(:)))
pointer( ptr, recv_buffer )
integer :: i, j, k, l, is, ie, js, je, n, ke_sum, l_size, m
integer :: pos, nlist, msgsize, tile, buffer_pos
integer :: ind_x, ind_y, nrecv, nsend
integer :: ind_recv_x(update_x%nrecv+update_y%nrecv), ind_recv_y(update_x%nrecv+update_y%nrecv)
integer :: start_pos_recv(update_x%nrecv+update_y%nrecv)
integer :: from_pe_list(update_x%nrecv+update_y%nrecv)
logical :: recv(8), send(8), update_edge_only
integer :: shift, midpoint
integer :: tMe, dir
update_edge_only = BTEST(flags, EDGEONLY)
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
ke_sum = sum(ke_list)
l_size = size(f_addrsx,1)
nlist = size(domain%list(:))
ptr = LOC(mpp_domains_stack_nonblock)
nrecv = get_vector_recv(domain, update_x, update_y, ind_recv_x, ind_recv_y, start_pos_recv, from_pe_list)
if(nrecv > 0) then
call mpp_clock_begin(wait_clock_nonblock)
call mpp_sync_self(check=EVENT_RECV, request=nonblock_data(id_update)%request_recv(1:nrecv), &
msg_size=nonblock_data(id_update)%size_recv(1:nrecv), &
msg_type=nonblock_data(id_update)%type_recv(1:nrecv) )
call mpp_clock_end(wait_clock_nonblock)
nonblock_data(id_update)%request_recv(:) = MPI_REQUEST_NULL
# 607
nonblock_data(id_update)%type_recv(:) = 0
endif
call mpp_clock_begin(unpk_clock_nonblock)
!$OMP parallel do schedule(dynamic) default(shared) private(ind_x,ind_y,buffer_pos,pos,dir,tMe,is,ie,js,je, &
!$OMP msgsize,ptr_fieldx,ptr_fieldy)
do m = nrecv,1,-1
ind_x = ind_recv_x(m)
ind_y = ind_recv_y(m)
buffer_pos = nonblock_data(id_update)%buffer_pos_recv(m)+nonblock_data(id_update)%size_recv(m)
pos = buffer_pos
select case ( gridtype )
case(BGRID_NE, BGRID_SW, AGRID)
if(ind_x>=0) then
do n = update_x%recv(ind_x)%count, 1, -1
dir = update_x%recv(ind_x)%dir(n)
if( recv(dir) ) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke_sum*2
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 2
fieldx(i,j,k) = recv_buffer(pos-1)
fieldy(i,j,k) = recv_buffer(pos)
end do
end do
enddo
end do
end if ! end if( recv(dir) )
end do ! do dir=8,1,-1
endif
case(CGRID_NE, CGRID_SW)
if(ind_y>=0) then
do n = update_y%recv(ind_y)%count, 1, -1
dir = update_y%recv(ind_y)%dir(n)
if( recv(dir) ) then
tMe = update_y%recv(ind_y)%tileMe(n)
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke_sum
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1, l_size ! loop over number of fields
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k) = recv_buffer(pos)
end do
end do
end do
end do
end if
end do
endif
if(ind_x>=0) then
do n = update_x%recv(ind_x)%count, 1, -1
dir = update_x%recv(ind_x)%dir(n)
if( recv(dir) ) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke_sum
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k) = recv_buffer(pos)
end do
end do
end do
end do
end if
end do
endif
end select
end do
!$OMP end parallel do
call mpp_clock_end(unpk_clock_nonblock)
! ---northern boundary fold
shift = 0
tMe = 1
if(domain%symmetry) shift = 1
if( BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then
j = domain%y(1)%global%end+shift
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
j = domain%y(1)%global%end+shift
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
if( .NOT. domain%symmetry ) is = is - 1
do i = is ,ie, midpoint
if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
enddo
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%end+shift
if ( recv(7) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
if(domain%symmetry) then
is = domain%x(1)%global%begin
else
is = domain%x(1)%global%begin - 1
end if
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO_UPDATE_V: folded-north BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = domain%x(1)%data%begin,is-1
fieldx(i,j,k) = fieldx(2*is-i,j,k)
fieldy(i,j,k) = fieldy(2*is-i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO_UPDATE_V: folded-north CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = domain%x(1)%data%begin,is-1
fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
ie = domain%x(1)%data%end
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,SOUTH) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then ! ---southern boundary fold
! NOTE: symmetry is assumed for fold-south boundary
j = domain%y(1)%global%begin
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
j = domain%y(1)%global%begin
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
do i = is ,ie, midpoint
if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%begin
if ( recv(3) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO_UPDATE_V: folded-south BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = domain%x(1)%data%begin,is-1
fieldx(i,j,k) = fieldx(2*is-i,j,k)
fieldy(i,j,k) = fieldy(2*is-i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO_UPDATE_V: folded-south CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = domain%x(1)%data%begin,is-1
fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
ie = domain%x(1)%data%end
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,WEST) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then ! ---eastern boundary fold
! NOTE: symmetry is assumed for fold-west boundary
i = domain%x(1)%global%begin
if( domain%x(1)%data%begin.LE.i .AND. i.LE.domain%x(1)%data%end+shift )then !fold is within domain
midpoint = (domain%y(1)%global%begin+domain%y(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
i = domain%x(1)%global%begin
js = domain%y(1)%global%begin; je = domain%y(1)%global%end+shift
do j = js ,je, midpoint
if( domain%y(1)%data%begin.LE.j .AND. j.LE. domain%y(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off south edge, when update south or west direction
i = domain%x(1)%global%begin
if ( recv(3) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin.GT.domain%y(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO__UPDATE_V: folded-west BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i,2*js-j,k)
fieldy(i,j,k) = fieldy(i,2*js-j,k)
end do
end do
end do
end if
case(CGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin-1.GT.domain%y(1)%data%end ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO__UPDATE_V: folded-west CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke_list(l,tMe)
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i, 2*js-j-1,k)
end do
end do
end do
end if
end select
end if
!off north edge
js = domain%y(1)%global%end
if(domain%y(1)%cyclic .AND. js.LT.domain%y(1)%data%end )then
je = domain%y(1)%data%end
js = js + 1
select case(gridtype)
case(BGRID_NE)
js = js + shift
je = je + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do j = js,je
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke_list(l,tMe)
do j = js, je
fieldx(i,j,k) = -fieldx(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,EAST) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then ! ---eastern boundary fold
! NOTE: symmetry is assumed for fold-west boundary
i = domain%x(1)%global%end+shift
if( domain%x(1)%data%begin.LE.i .AND. i.LE.domain%x(1)%data%end+shift )then !fold is within domain
midpoint = (domain%y(1)%global%begin+domain%y(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
i = domain%x(1)%global%end+shift
js = domain%y(1)%global%begin; je = domain%y(1)%global%end+shift
do j = js ,je, midpoint
if( domain%y(1)%data%begin.LE.j .AND. j.LE. domain%y(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off south edge, when update south or west direction
i = domain%x(1)%global%end+shift
if ( recv(3) .OR. recv(1) ) then
select case(gridtype)
case(BGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin.GT.domain%y(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO__UPDATE_V: folded-east BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i,2*js-j,k)
fieldy(i,j,k) = fieldy(i,2*js-j,k)
end do
end do
end do
end if
case(CGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin-1.GT.domain%y(1)%data%end ) &
call mpp_error( FATAL, 'MPP_COMPLETE_DO__UPDATE_V: folded-east CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke_list(l,tMe)
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i, 2*js-j-1,k)
end do
end do
end do
end if
end select
end if
!off north edge
js = domain%y(1)%global%end
if(domain%y(1)%cyclic .AND. js.LT.domain%y(1)%data%end )then
je = domain%y(1)%data%end
js = js + 1
select case(gridtype)
case(BGRID_NE)
js = js + shift
je = je + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke_list(l,tMe)
do j = js,je
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke_list(l,tMe)
do j = js, je
fieldx(i,j,k) = -fieldx(i,j,k)
end do
end do
end do
end select
end if
end if
end if
if(nrecv>0) then
nonblock_data(id_update)%size_recv(:) = 0
endif
nsend = update_x%nsend+update_y%nsend
if(nsend > 0) then
call mpp_clock_begin(wait_clock_nonblock)
call mpp_sync_self(check=EVENT_SEND, request=nonblock_data(id_update)%request_send(1:nsend))
call mpp_clock_end(wait_clock_nonblock)
nonblock_data(id_update)%request_send_count = 0
nonblock_data(id_update)%request_send(:) = MPI_REQUEST_NULL
# 1102
endif
return
end subroutine mpp_complete_do_update_r4_3Dv
# 1139 "../mpp/include/mpp_domains_misc.inc" 2
# 1151
# 1 "../mpp/include/mpp_do_update_nonblock.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_start_do_update_i4_3D(id_update, f_addrs, domain, update, d_type, ke_max, ke_list, flags, reuse_id_update, name)
integer, intent(in) :: id_update
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: update
integer(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke_max
integer, intent(in) :: ke_list(:,:)
logical, intent(in) :: reuse_id_update
character(len=*), intent(in) :: name
integer, intent(in) :: flags
!--- local variables
integer :: i, j, k, m, n, l, dir, tMe
integer :: buffer_pos, msgsize, from_pe, to_pe, pos
integer :: is, ie, js, je, sendsize, recvsize
logical :: send(8), recv(8), update_edge_only
integer :: l_size, ke_sum, my_id_update
integer :: request
integer :: send_msgsize(MAXLIST)
character(len=128) :: text
integer(4) :: buffer(size(mpp_domains_stack_nonblock(:)))
integer(4) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke_max)
pointer( ptr, buffer )
pointer(ptr_field, field)
update_edge_only = BTEST(flags, EDGEONLY)
recv = .false.
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
l_size = size(f_addrs,1)
ke_sum = sum(ke_list)
ptr = LOC(mpp_domains_stack_nonblock)
buffer_pos = nonblock_data(id_update)%recv_pos
if( update%nrecv > MAX_REQUEST ) then
write( text,'(a,i8,a,i8)' ) 'update%nrecv =', update%nrecv, ' greater than MAX_REQEUST =', MAX_REQUEST
call mpp_error(FATAL,'MPP_START_DO_UPDATE: '//trim(text))
endif
if( update%nsend > MAX_REQUEST ) then
write( text,'(a,i8,a,i8)' ) 'update%nsend =', update%nsend, ' greater than MAX_REQEUST =', MAX_REQUEST
call mpp_error(FATAL,'MPP_START_DO_UPDATE: '//trim(text))
endif
! pre-postrecv
!--- make sure the domain stack size is big enough.
recvsize = 0
do m = 1, update%nrecv
nonblock_data(id_update)%size_recv(m) = 0
if( update%recv(m)%count == 0 )cycle
msgsize = 0
do n = 1, update%recv(m)%count
dir = update%recv(m)%dir(n)
if(recv(dir)) then
msgsize = msgsize + update%recv(m)%msgsize(n)
end if
end do
if( msgsize.GT.0 )then
msgsize = msgsize*ke_sum
recvsize = recvsize + msgsize
nonblock_data(id_update)%size_recv(m) = msgsize
nonblock_data(id_update)%buffer_pos_recv(m) = buffer_pos
buffer_pos = buffer_pos + msgsize
end if
end do
sendsize = 0
do m = 1, update%nsend
if( update%send(m)%count == 0 )cycle
! make sure the stacksize is big enough
msgsize = 0
do n = 1, update%send(m)%count
dir = update%send(m)%dir(n)
if( send(dir) ) msgsize = msgsize + update%send(m)%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke_sum
sendsize = sendsize + msgsize
nonblock_data(id_update)%buffer_pos_send(m) = buffer_pos
buffer_pos = buffer_pos + msgsize
end if
end do
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, &
nonblock_data(id_update)%recv_pos+recvsize+sendsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_START_DO_UPDATE: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
if( reuse_id_update ) then
if(recvsize .NE. nonblock_data(id_update)%recv_msgsize) then
call mpp_error(FATAL,'MPP_START_DO_UPDATE: mismatch of recv msgsize for field '//trim(name) )
endif
if(sendsize .NE. nonblock_data(id_update)%send_msgsize) then
call mpp_error(FATAL,'MPP_START_DO_UPDATE: mismatch of send msgsize for field '//trim(name) )
endif
else
nonblock_data(id_update)%recv_msgsize = recvsize
nonblock_data(id_update)%send_msgsize = sendsize
nonblock_data(id_update)%send_pos = nonblock_data(id_update)%recv_pos + recvsize
nonblock_buffer_pos = nonblock_buffer_pos + recvsize + sendsize
endif
! pre-postrecv
call mpp_clock_begin(recv_clock_nonblock)
do m = 1, update%nrecv
msgsize = nonblock_data(id_update)%size_recv(m)
if( msgsize.GT.0 )then
from_pe = update%recv(m)%pe
buffer_pos = nonblock_data(id_update)%buffer_pos_recv(m)
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., &
tag=id_update, request=request)
nonblock_data(id_update)%request_recv(m) = request
nonblock_data(id_update)%type_recv(m) = MPI_INTEGER4
end if
end do ! end do m = 1, update%nrecv
call mpp_clock_end(recv_clock_nonblock)
! send
call mpp_clock_begin(send_pack_clock_nonblock)
!$OMP parallel do schedule(dynamic) default(shared) private(buffer_pos,pos,dir,tMe,is,ie,js,je,ptr_field,to_pe, &
!$OMP msgsize,request)
do m = 1, update%nsend
send_msgsize(m) = 0
if( update%send(m)%count == 0 )cycle
buffer_pos = nonblock_data(id_update)%buffer_pos_send(m)
pos = buffer_pos
do n = 1, update%send(m)%count
dir = update%send(m)%dir(n)
if( send(dir) ) then
tMe = update%send(m)%tileMe(n)
is = update%send(m)%is(n); ie = update%send(m)%ie(n)
js = update%send(m)%js(n); je = update%send(m)%je(n)
select case( update%send(m)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
enddo
case( MINUS_NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, update%send(m)%count
send_msgsize(m) = pos - buffer_pos
enddo
!$OMP end parallel do
do m = 1, update%nsend
msgsize = send_msgsize(m)
if( msgsize .GT.0 )then
buffer_pos = nonblock_data(id_update)%buffer_pos_send(m)
to_pe = update%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize , to_pe=to_pe, &
tag=id_update, request=request)
nonblock_data(id_update)%request_send(m) = request
end if
end do ! end do ist = 0,nlist-1
call mpp_clock_end(send_pack_clock_nonblock)
return
end subroutine mpp_start_do_update_i4_3D
!###############################################################################
subroutine mpp_complete_do_update_i4_3D(id_update, f_addrs, domain, update, d_type, ke_max, ke_list, flags)
integer, intent(in) :: id_update
integer(8), intent(in) :: f_addrs(:,:)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: update
integer, intent(in) :: ke_max
integer, intent(in) :: ke_list(:,:)
integer(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: flags
!--- local variables
integer :: i, j, k, m, n, l, dir, count, tMe, tNbr
integer :: buffer_pos, msgsize, from_pe, pos
integer :: is, ie, js, je
logical :: send(8), recv(8), update_edge_only
integer :: l_size, ke_sum, sendsize, recvsize
character(len=128) :: text
integer(4) :: recv_buffer(size(mpp_domains_stack_nonblock(:)))
integer(4) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke_max)
pointer( ptr, recv_buffer )
pointer(ptr_field, field)
update_edge_only = BTEST(flags, EDGEONLY)
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
ke_sum = sum(ke_list)
l_size = size(f_addrs,1)
ptr = LOC(mpp_domains_stack_nonblock)
count = update%nrecv
if(count > 0) then
call mpp_clock_begin(wait_clock_nonblock)
call mpp_sync_self(check=EVENT_RECV, request=nonblock_data(id_update)%request_recv(1:count), &
msg_size=nonblock_data(id_update)%size_recv(1:count), &
msg_type=nonblock_data(id_update)%type_recv(1:count) )
call mpp_clock_end(wait_clock_nonblock)
nonblock_data(id_update)%request_recv(:) = MPI_REQUEST_NULL
# 314
nonblock_data(id_update)%type_recv(:) = 0
endif
!--unpack the data
call mpp_clock_begin(unpk_clock_nonblock)
!$OMP parallel do schedule(dynamic) default(shared) private(dir,buffer_pos,pos,tMe,is,ie,js,je,msgsize, &
!$OMP ptr_field)
do m = update%nrecv, 1, -1
if( update%recv(m)%count == 0 )cycle
buffer_pos = nonblock_data(id_update)%buffer_pos_recv(m) + nonblock_data(id_update)%size_recv(m)
pos = buffer_pos
do n = update%recv(m)%count, 1, -1
dir = update%recv(m)%dir(n)
if( recv(dir) ) then
tMe = update%recv(m)%tileMe(n)
is = update%recv(m)%is(n); ie = update%recv(m)%ie(n)
js = update%recv(m)%js(n); je = update%recv(m)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke_sum
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke_list(l,tMe)
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = recv_buffer(pos)
end do
end do
end do
end do
end if
end do ! do n = 1, update%recv(m)%count
end do
!$OMP end parallel do
call mpp_clock_end(unpk_clock_nonblock)
count = update%nrecv
if(count > 0) then
nonblock_data(id_update)%size_recv(:) = 0
endif
count = update%nsend
if(count > 0) then
call mpp_clock_begin(wait_clock_nonblock)
call mpp_sync_self(check=EVENT_SEND, request=nonblock_data(id_update)%request_send(1:count))
call mpp_clock_end(wait_clock_nonblock)
nonblock_data(id_update)%request_send_count = 0
nonblock_data(id_update)%request_send(:) = MPI_REQUEST_NULL
# 368
endif
! call init_nonblock_type(nonblock_data(id_update))
return
end subroutine mpp_complete_do_update_i4_3D
# 1162 "../mpp/include/mpp_domains_misc.inc" 2
!*******************************************************
# 1 "../mpp/include/mpp_do_update.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_r8_3d( f_addrs, domain, update, d_type, ke, flags)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: update
real(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer, optional, intent(in) :: flags
real(8) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
pointer(ptr_field, field)
integer :: update_flags
type(overlap_type), pointer :: overPtr => NULL()
character(len=8) :: text
!equate to mpp_domains_stack
real(8) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos
!receive domains saved here for unpacking
!for non-blocking version, could be recomputed
integer, allocatable :: msg1(:), msg2(:), msg3(:)
logical :: send(8), recv(8), update_edge_only
integer :: to_pe, from_pe, pos, msgsize
integer :: n, l_size, l, m, i, j, k
integer :: is, ie, js, je, tMe, dir
integer :: start, start1, start2, index, is1, ie1, js1, je1, ni, nj, total
integer :: buffer_recv_size, nlist, outunit
integer :: send_start_pos
integer :: send_msgsize(MAXLIST)
outunit = stdout()
ptr = LOC(mpp_domains_stack)
l_size = size(f_addrs,1)
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
update_edge_only = BTEST(update_flags, EDGEONLY)
recv = .false.
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
if(debug_message_passing) then
nlist = size(domain%list(:))
allocate(msg1(0:nlist-1), msg2(0:nlist-1), msg3(0:nlist-1) )
msg1 = 0
msg2 = 0
msg3 = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
from_pe = update%recv(m)%pe
l = from_pe-mpp_root_pe()
msg2(l) = msgsize
enddo
do m = 1, update%nsend
overPtr => update%send(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(send(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
l = overPtr%pe - mpp_root_pe()
msg3(l) = msgsize
enddo
call mpp_alltoall(msg3, 1, msg1, 1)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_update: mismatch on send and recv size")
endif
enddo
write(outunit,*)"NOTE from mpp_do_update: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2, msg3)
endif
!recv
buffer_pos = 0
call mpp_clock_begin(recv_clock)
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = overPtr%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do ! end do m = 1, update%nrecv
call mpp_clock_end(recv_clock)
buffer_recv_size = buffer_pos
send_start_pos = buffer_pos
! pack
call mpp_clock_begin(pack_clock)
do m = 1, update%nsend
send_msgsize(m) = 0
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_START_UPDATE_DOMAINS: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
end if
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
select case( overPtr%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, overPtr%count
send_msgsize(m) = pos-buffer_pos
buffer_pos = pos
end do ! end do m = 1, nsend
call mpp_clock_end(pack_clock)
buffer_pos = send_start_pos
call mpp_clock_begin(send_clock)
do m = 1, update%nsend
msgsize = send_msgsize(m)
if(msgsize == 0) cycle
to_pe = update%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end do ! end do ist = 0,nlist-1
call mpp_clock_end(send_clock)
!unpack recv
!unpack halos in reverse order
! ptr_rfield = f_addrs(1)
call mpp_clock_begin(wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(wait_clock)
buffer_pos = buffer_recv_size
call mpp_clock_begin(unpk_clock)
do m = update%nrecv, 1, -1
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
do n = overPtr%count, 1, -1
dir = overPtr%dir(n)
if( recv(dir) ) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = buffer(pos)
end do
end do
end do
end do
endif
end do ! do n = 1, overPtr%count
end do
call mpp_clock_end(unpk_clock)
call mpp_clock_begin(wait_clock)
call mpp_sync_self( )
call mpp_clock_end(wait_clock)
return
end subroutine mpp_do_update_r8_3d
# 1175 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_updateV.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_r8_3dv(f_addrsx,f_addrsy, domain, update_x, update_y, &
d_type, ke, gridtype, flags)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: update_x, update_y
integer, intent(in) :: ke
real(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: gridtype
integer, intent(in), optional :: flags
real(8) :: fieldx(update_x%xbegin:update_x%xend, update_x%ybegin:update_x%yend,ke)
real(8) :: fieldy(update_y%xbegin:update_y%xend, update_y%ybegin:update_y%yend,ke)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
integer :: update_flags
integer :: l_size, l, i, j, k, is, ie, js, je, n, m
integer :: pos, nlist, msgsize, isd, ied, jsd, jed
integer :: to_pe, from_pe, midpoint
integer :: tMe, dir
integer :: send_start_pos, nsend
integer :: send_msgsize(2*MAXLIST)
integer :: send_pe(2*MAXLIST)
integer, allocatable :: msg1(:), msg2(:), msg3(:)
logical :: send(8), recv(8), update_edge_only
real(8) :: buffer(size(mpp_domains_stack(:)))
pointer(ptr,buffer )
integer :: buffer_pos
character(len=8) :: text
integer :: buffer_recv_size, shift
integer :: rank_x, rank_y, ind_x, ind_y, cur_rank
integer :: nsend_x, nsend_y, nrecv_x, nrecv_y, outunit
outunit = stdout()
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) ) then
update_flags = flags
! The following test is so that SCALAR_PAIR can be used alone with the
! same default update pattern as without.
if (BTEST(update_flags,SCALAR_BIT)) then
if (.NOT.(BTEST(update_flags,WEST) .OR. BTEST(update_flags,EAST) &
.OR. BTEST(update_flags,NORTH) .OR. BTEST(update_flags,SOUTH))) &
update_flags = update_flags + XUPDATE+YUPDATE !default with SCALAR_PAIR
end if
end if
if( BTEST(update_flags,NORTH) .AND. BTEST(domain%fold,NORTH) .AND. BTEST(gridtype,SOUTH) ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: Incompatible grid offset and fold.' )
update_edge_only = BTEST(update_flags, EDGEONLY)
recv = .false.
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
l_size = size(f_addrsx,1)
nlist = size(domain%list(:))
ptr = LOC(mpp_domains_stack)
!recv
nsend_x = update_x%nsend
nsend_y = update_y%nsend
nrecv_x = update_x%nrecv
nrecv_y = update_y%nrecv
if(debug_message_passing) then
allocate(msg1(0:nlist-1), msg2(0:nlist-1), msg3(0:nlist-1) )
msg1 = 0
msg2 = 0
msg3 = 0
cur_rank = get_rank_recv(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y)
msgsize = 0
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = update_y%recv(ind_y)%pe
do n = 1, update_y%recv(ind_y)%count
dir = update_y%recv(ind_y)%dir(n)
if(recv(dir)) then
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = update_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
m = from_pe-mpp_root_pe()
msg2(m) = msgsize
end do
cur_rank = get_rank_send(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
msgsize = 0
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = update_y%send(ind_y)%pe
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if( send(dir) ) then
is = update_y%send(ind_y)%is(n); ie = update_y%send(ind_y)%ie(n)
js = update_y%send(ind_y)%js(n); je = update_y%send(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = update_y%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
m = to_pe-mpp_root_pe()
msg3(m) = msgsize
cur_rank = min(rank_x, rank_y)
enddo
call mpp_alltoall(msg3, 1, msg1, 1)
! call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_updateV: mismatch on send and recv size")
endif
enddo
! call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_updateV: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2, msg3)
endif
!--- recv
buffer_pos = 0
cur_rank = get_rank_recv(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
call mpp_clock_begin(recv_clock)
do while (ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y)
msgsize = 0
select case(gridtype)
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*2
ind_x = ind_x+1
ind_y = ind_x
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = update_y%recv(ind_y)%pe
do n = 1, update_y%recv(ind_y)%count
dir = update_y%recv(ind_y)%dir(n)
if(recv(dir)) then
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = update_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
end select
cur_rank = max(rank_x, rank_y)
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, buffer_pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.false., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do
call mpp_clock_end(recv_clock)
buffer_recv_size = buffer_pos
send_start_pos = buffer_pos
!--- send
cur_rank = get_rank_send(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
nsend = 0
call mpp_clock_begin(pack_clock)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
pos = buffer_pos
!--- make sure the domain stack size is big enough
msgsize = 0
if(cur_rank == rank_x) then
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) msgsize = msgsize + update_x%send(ind_x)%msgsize(n)
enddo
endif
if(cur_rank == rank_y) then
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if( send(dir) ) msgsize = msgsize + update_y%send(ind_y)%msgsize(n)
enddo
endif
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
end if
select case( gridtype )
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
tMe = update_x%send(ind_x)%tileMe(n)
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
select case( update_x%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 2
buffer(pos-1) = fieldx(i,j,k)
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 2
buffer(pos-1) = fieldy(i,j,k)
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 2
buffer(pos-1) = -fieldy(i,j,k)
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
end if
case( NINETY )
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 2
buffer(pos-1) = fieldy(i,j,k)
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 2
buffer(pos-1) = fieldy(i,j,k)
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case( ONE_HUNDRED_EIGHTY )
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 2
buffer(pos-1) = fieldx(i,j,k)
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 2
buffer(pos-1) = -fieldx(i,j,k)
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select ! select case( rotation(n) )
end if ! if( send(dir) )
end do ! do n = 1, update_x%send(ind_x)%count
ind_x = ind_x+1
ind_y = ind_x
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
tMe = update_x%send(ind_x)%tileMe(n)
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
select case( update_x%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
case(NINETY)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case(ONE_HUNDRED_EIGHTY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
end select
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = update_y%send(ind_y)%pe
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if( send(dir) ) then
tMe = update_y%send(ind_y)%tileMe(n)
is = update_y%send(ind_y)%is(n); ie = update_y%send(ind_y)%ie(n)
js = update_y%send(ind_y)%js(n); je = update_y%send(ind_y)%je(n)
select case( update_y%send(ind_y)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case(NINETY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case(ONE_HUNDRED_EIGHTY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select
endif
enddo
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = update_y%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
end select
cur_rank = min(rank_x, rank_y)
nsend = nsend + 1
send_pe(nsend) = to_pe
send_msgsize(nsend) = pos - buffer_pos
buffer_pos = pos
end do
buffer_pos = send_start_pos
call mpp_clock_end(pack_clock)
call mpp_clock_begin(send_clock)
do m = 1, nsend
msgsize = send_msgsize(m)
if( msgsize.GT.0 )then
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=send_pe(m), tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do
call mpp_clock_end(send_clock)
!unpack recv
!unpack halos in reverse order
call mpp_clock_begin(wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(wait_clock)
buffer_pos = buffer_recv_size
cur_rank = get_rank_unpack(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
call mpp_clock_begin(unpk_clock)
do while (ind_x > 0 .OR. ind_y > 0)
pos = buffer_pos
select case ( gridtype )
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
do n = update_x%recv(ind_x)%count, 1, -1
dir = update_x%recv(ind_x)%dir(n)
if( recv(dir) ) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*2*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 2
fieldx(i,j,k) = buffer(pos-1)
fieldy(i,j,k) = buffer(pos)
end do
end do
end do
end do
end if ! end if( recv(dir) )
end do ! do dir=8,1,-1
ind_x = ind_x-1
ind_y = ind_x
if(ind_x .GT. 0) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_y) then
do n = update_y%recv(ind_y)%count, 1, -1
dir = update_y%recv(ind_y)%dir(n)
if( recv(dir) ) then
tMe = update_y%recv(ind_y)%tileMe(n)
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k) = buffer(pos)
end do
end do
end do
end do
end if
end do
ind_y = ind_y-1
if(ind_y .GT. 0) then
rank_y = update_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
if(cur_rank == rank_x) then
do n = update_x%recv(ind_x)%count, 1, -1
dir = update_x%recv(ind_x)%dir(n)
if( recv(dir) ) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k) = buffer(pos)
end do
end do
end do
end do
end if
end do
ind_x = ind_x-1
if(ind_x .GT. 0) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
end select
cur_rank = min(rank_x, rank_y)
end do
call mpp_clock_end(unpk_clock)
! ---northern boundary fold
shift = 0
if(domain%symmetry) shift = 1
if( BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then
isd = domain%x(1)%compute%begin - update_x%whalo;
ied = domain%x(1)%compute%end + update_x%ehalo;
jsd = domain%y(1)%compute%begin - update_y%shalo;
jed = domain%y(1)%compute%end + update_y%nhalo;
j = domain%y(1)%global%end+shift
if( jsd .LE. j .AND. j.LE.jed+shift )then !fold is within domain
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
j = domain%y(1)%global%end+shift
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
if( .NOT. domain%symmetry ) is = is - 1
do i = is ,ie, midpoint
if( isd.LE.i .AND. i.LE. ied+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%end+shift
if ( recv(7) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
if(domain%symmetry) then
is = domain%x(1)%global%begin
else
is = domain%x(1)%global%begin - 1
end if
if( is.GT.isd )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = isd,is-1
fieldx(i,j,k) = fieldx(2*is-i,j,k)
fieldy(i,j,k) = fieldy(2*is-i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.isd )then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = isd,is-1
fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.ied )then
ie = ied
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,SOUTH) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then ! ---southern boundary fold
! NOTE: symmetry is assumed for fold-south boundary
j = domain%y(1)%global%begin
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
j = domain%y(1)%global%begin
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
do i = is ,ie, midpoint
if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%begin
if ( recv(3) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-south BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = domain%x(1)%data%begin,is-1
fieldx(i,j,k) = fieldx(2*is-i,j,k)
fieldy(i,j,k) = fieldy(2*is-i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-south CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = domain%x(1)%data%begin,is-1
fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
ie = domain%x(1)%data%end
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,WEST) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then ! ---eastern boundary fold
! NOTE: symmetry is assumed for fold-west boundary
i = domain%x(1)%global%begin
if( domain%x(1)%data%begin.LE.i .AND. i.LE.domain%x(1)%data%end+shift )then !fold is within domain
midpoint = (domain%y(1)%global%begin+domain%y(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
i = domain%x(1)%global%begin
js = domain%y(1)%global%begin; je = domain%y(1)%global%end+shift
do j = js ,je, midpoint
if( domain%y(1)%data%begin.LE.j .AND. j.LE. domain%y(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off south edge, when update south or west direction
i = domain%x(1)%global%begin
if ( recv(3) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin.GT.domain%y(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-west BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i,2*js-j,k)
fieldy(i,j,k) = fieldy(i,2*js-j,k)
end do
end do
end do
end if
case(CGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin-1.GT.domain%y(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-west CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i, 2*js-j-1,k)
end do
end do
end do
end if
end select
end if
!off north edge
js = domain%y(1)%global%end
if(domain%y(1)%cyclic .AND. js.LT.domain%y(1)%data%end )then
je = domain%y(1)%data%end
js = js + 1
select case(gridtype)
case(BGRID_NE)
js = js + shift
je = je + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = js,je
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = js, je
fieldx(i,j,k) = -fieldx(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,EAST) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then ! ---eastern boundary fold
! NOTE: symmetry is assumed for fold-west boundary
i = domain%x(1)%global%end+shift
if( domain%x(1)%data%begin.LE.i .AND. i.LE.domain%x(1)%data%end+shift )then !fold is within domain
midpoint = (domain%y(1)%global%begin+domain%y(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
i = domain%x(1)%global%end+shift
js = domain%y(1)%global%begin; je = domain%y(1)%global%end+shift
do j = js ,je, midpoint
if( domain%y(1)%data%begin.LE.j .AND. j.LE. domain%y(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off south edge, when update south or west direction
i = domain%x(1)%global%end+shift
if ( recv(3) .OR. recv(1) ) then
select case(gridtype)
case(BGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin.GT.domain%y(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-east BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i,2*js-j,k)
fieldy(i,j,k) = fieldy(i,2*js-j,k)
end do
end do
end do
end if
case(CGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin-1.GT.domain%y(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-east CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i, 2*js-j-1,k)
end do
end do
end do
end if
end select
end if
!off north edge
js = domain%y(1)%global%end
if(domain%y(1)%cyclic .AND. js.LT.domain%y(1)%data%end )then
je = domain%y(1)%data%end
js = js + 1
select case(gridtype)
case(BGRID_NE)
js = js + shift
je = je + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = js,je
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = js, je
fieldx(i,j,k) = -fieldx(i,j,k)
end do
end do
end do
end select
end if
end if
end if
call mpp_clock_begin(wait_clock)
call mpp_sync_self( )
call mpp_clock_end(wait_clock)
return
end subroutine mpp_do_update_r8_3dv
# 1176 "../mpp/include/mpp_domains_misc.inc" 2
# 1185
# 1 "../mpp/include/mpp_do_update.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_i8_3d( f_addrs, domain, update, d_type, ke, flags)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: update
integer(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer, optional, intent(in) :: flags
integer(8) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
pointer(ptr_field, field)
integer :: update_flags
type(overlap_type), pointer :: overPtr => NULL()
character(len=8) :: text
!equate to mpp_domains_stack
integer(8) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos
!receive domains saved here for unpacking
!for non-blocking version, could be recomputed
integer, allocatable :: msg1(:), msg2(:), msg3(:)
logical :: send(8), recv(8), update_edge_only
integer :: to_pe, from_pe, pos, msgsize
integer :: n, l_size, l, m, i, j, k
integer :: is, ie, js, je, tMe, dir
integer :: start, start1, start2, index, is1, ie1, js1, je1, ni, nj, total
integer :: buffer_recv_size, nlist, outunit
integer :: send_start_pos
integer :: send_msgsize(MAXLIST)
outunit = stdout()
ptr = LOC(mpp_domains_stack)
l_size = size(f_addrs,1)
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
update_edge_only = BTEST(update_flags, EDGEONLY)
recv = .false.
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
if(debug_message_passing) then
nlist = size(domain%list(:))
allocate(msg1(0:nlist-1), msg2(0:nlist-1), msg3(0:nlist-1) )
msg1 = 0
msg2 = 0
msg3 = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
from_pe = update%recv(m)%pe
l = from_pe-mpp_root_pe()
msg2(l) = msgsize
enddo
do m = 1, update%nsend
overPtr => update%send(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(send(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
l = overPtr%pe - mpp_root_pe()
msg3(l) = msgsize
enddo
call mpp_alltoall(msg3, 1, msg1, 1)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_update: mismatch on send and recv size")
endif
enddo
write(outunit,*)"NOTE from mpp_do_update: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2, msg3)
endif
!recv
buffer_pos = 0
call mpp_clock_begin(recv_clock)
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = overPtr%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do ! end do m = 1, update%nrecv
call mpp_clock_end(recv_clock)
buffer_recv_size = buffer_pos
send_start_pos = buffer_pos
! pack
call mpp_clock_begin(pack_clock)
do m = 1, update%nsend
send_msgsize(m) = 0
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_START_UPDATE_DOMAINS: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
end if
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
select case( overPtr%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, overPtr%count
send_msgsize(m) = pos-buffer_pos
buffer_pos = pos
end do ! end do m = 1, nsend
call mpp_clock_end(pack_clock)
buffer_pos = send_start_pos
call mpp_clock_begin(send_clock)
do m = 1, update%nsend
msgsize = send_msgsize(m)
if(msgsize == 0) cycle
to_pe = update%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end do ! end do ist = 0,nlist-1
call mpp_clock_end(send_clock)
!unpack recv
!unpack halos in reverse order
! ptr_rfield = f_addrs(1)
call mpp_clock_begin(wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(wait_clock)
buffer_pos = buffer_recv_size
call mpp_clock_begin(unpk_clock)
do m = update%nrecv, 1, -1
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
do n = overPtr%count, 1, -1
dir = overPtr%dir(n)
if( recv(dir) ) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = buffer(pos)
end do
end do
end do
end do
endif
end do ! do n = 1, overPtr%count
end do
call mpp_clock_end(unpk_clock)
call mpp_clock_begin(wait_clock)
call mpp_sync_self( )
call mpp_clock_end(wait_clock)
return
end subroutine mpp_do_update_i8_3d
# 1193 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_update.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_r4_3d( f_addrs, domain, update, d_type, ke, flags)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: update
real(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer, optional, intent(in) :: flags
real(4) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
pointer(ptr_field, field)
integer :: update_flags
type(overlap_type), pointer :: overPtr => NULL()
character(len=8) :: text
!equate to mpp_domains_stack
real(4) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos
!receive domains saved here for unpacking
!for non-blocking version, could be recomputed
integer, allocatable :: msg1(:), msg2(:), msg3(:)
logical :: send(8), recv(8), update_edge_only
integer :: to_pe, from_pe, pos, msgsize
integer :: n, l_size, l, m, i, j, k
integer :: is, ie, js, je, tMe, dir
integer :: start, start1, start2, index, is1, ie1, js1, je1, ni, nj, total
integer :: buffer_recv_size, nlist, outunit
integer :: send_start_pos
integer :: send_msgsize(MAXLIST)
outunit = stdout()
ptr = LOC(mpp_domains_stack)
l_size = size(f_addrs,1)
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
update_edge_only = BTEST(update_flags, EDGEONLY)
recv = .false.
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
if(debug_message_passing) then
nlist = size(domain%list(:))
allocate(msg1(0:nlist-1), msg2(0:nlist-1), msg3(0:nlist-1) )
msg1 = 0
msg2 = 0
msg3 = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
from_pe = update%recv(m)%pe
l = from_pe-mpp_root_pe()
msg2(l) = msgsize
enddo
do m = 1, update%nsend
overPtr => update%send(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(send(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
l = overPtr%pe - mpp_root_pe()
msg3(l) = msgsize
enddo
call mpp_alltoall(msg3, 1, msg1, 1)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_update: mismatch on send and recv size")
endif
enddo
write(outunit,*)"NOTE from mpp_do_update: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2, msg3)
endif
!recv
buffer_pos = 0
call mpp_clock_begin(recv_clock)
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = overPtr%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do ! end do m = 1, update%nrecv
call mpp_clock_end(recv_clock)
buffer_recv_size = buffer_pos
send_start_pos = buffer_pos
! pack
call mpp_clock_begin(pack_clock)
do m = 1, update%nsend
send_msgsize(m) = 0
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_START_UPDATE_DOMAINS: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
end if
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
select case( overPtr%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, overPtr%count
send_msgsize(m) = pos-buffer_pos
buffer_pos = pos
end do ! end do m = 1, nsend
call mpp_clock_end(pack_clock)
buffer_pos = send_start_pos
call mpp_clock_begin(send_clock)
do m = 1, update%nsend
msgsize = send_msgsize(m)
if(msgsize == 0) cycle
to_pe = update%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end do ! end do ist = 0,nlist-1
call mpp_clock_end(send_clock)
!unpack recv
!unpack halos in reverse order
! ptr_rfield = f_addrs(1)
call mpp_clock_begin(wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(wait_clock)
buffer_pos = buffer_recv_size
call mpp_clock_begin(unpk_clock)
do m = update%nrecv, 1, -1
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
do n = overPtr%count, 1, -1
dir = overPtr%dir(n)
if( recv(dir) ) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = buffer(pos)
end do
end do
end do
end do
endif
end do ! do n = 1, overPtr%count
end do
call mpp_clock_end(unpk_clock)
call mpp_clock_begin(wait_clock)
call mpp_sync_self( )
call mpp_clock_end(wait_clock)
return
end subroutine mpp_do_update_r4_3d
# 1207 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_updateV.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_r4_3dv(f_addrsx,f_addrsy, domain, update_x, update_y, &
d_type, ke, gridtype, flags)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: update_x, update_y
integer, intent(in) :: ke
real(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: gridtype
integer, intent(in), optional :: flags
real(4) :: fieldx(update_x%xbegin:update_x%xend, update_x%ybegin:update_x%yend,ke)
real(4) :: fieldy(update_y%xbegin:update_y%xend, update_y%ybegin:update_y%yend,ke)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
integer :: update_flags
integer :: l_size, l, i, j, k, is, ie, js, je, n, m
integer :: pos, nlist, msgsize, isd, ied, jsd, jed
integer :: to_pe, from_pe, midpoint
integer :: tMe, dir
integer :: send_start_pos, nsend
integer :: send_msgsize(2*MAXLIST)
integer :: send_pe(2*MAXLIST)
integer, allocatable :: msg1(:), msg2(:), msg3(:)
logical :: send(8), recv(8), update_edge_only
real(4) :: buffer(size(mpp_domains_stack(:)))
pointer(ptr,buffer )
integer :: buffer_pos
character(len=8) :: text
integer :: buffer_recv_size, shift
integer :: rank_x, rank_y, ind_x, ind_y, cur_rank
integer :: nsend_x, nsend_y, nrecv_x, nrecv_y, outunit
outunit = stdout()
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) ) then
update_flags = flags
! The following test is so that SCALAR_PAIR can be used alone with the
! same default update pattern as without.
if (BTEST(update_flags,SCALAR_BIT)) then
if (.NOT.(BTEST(update_flags,WEST) .OR. BTEST(update_flags,EAST) &
.OR. BTEST(update_flags,NORTH) .OR. BTEST(update_flags,SOUTH))) &
update_flags = update_flags + XUPDATE+YUPDATE !default with SCALAR_PAIR
end if
end if
if( BTEST(update_flags,NORTH) .AND. BTEST(domain%fold,NORTH) .AND. BTEST(gridtype,SOUTH) ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: Incompatible grid offset and fold.' )
update_edge_only = BTEST(update_flags, EDGEONLY)
recv = .false.
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
l_size = size(f_addrsx,1)
nlist = size(domain%list(:))
ptr = LOC(mpp_domains_stack)
!recv
nsend_x = update_x%nsend
nsend_y = update_y%nsend
nrecv_x = update_x%nrecv
nrecv_y = update_y%nrecv
if(debug_message_passing) then
allocate(msg1(0:nlist-1), msg2(0:nlist-1), msg3(0:nlist-1) )
msg1 = 0
msg2 = 0
msg3 = 0
cur_rank = get_rank_recv(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y)
msgsize = 0
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = update_y%recv(ind_y)%pe
do n = 1, update_y%recv(ind_y)%count
dir = update_y%recv(ind_y)%dir(n)
if(recv(dir)) then
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = update_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
m = from_pe-mpp_root_pe()
msg2(m) = msgsize
end do
cur_rank = get_rank_send(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
msgsize = 0
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = update_y%send(ind_y)%pe
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if( send(dir) ) then
is = update_y%send(ind_y)%is(n); ie = update_y%send(ind_y)%ie(n)
js = update_y%send(ind_y)%js(n); je = update_y%send(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = update_y%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
m = to_pe-mpp_root_pe()
msg3(m) = msgsize
cur_rank = min(rank_x, rank_y)
enddo
call mpp_alltoall(msg3, 1, msg1, 1)
! call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_updateV: mismatch on send and recv size")
endif
enddo
! call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_updateV: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2, msg3)
endif
!--- recv
buffer_pos = 0
cur_rank = get_rank_recv(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
call mpp_clock_begin(recv_clock)
do while (ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y)
msgsize = 0
select case(gridtype)
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*2
ind_x = ind_x+1
ind_y = ind_x
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = update_y%recv(ind_y)%pe
do n = 1, update_y%recv(ind_y)%count
dir = update_y%recv(ind_y)%dir(n)
if(recv(dir)) then
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = update_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
end select
cur_rank = max(rank_x, rank_y)
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, buffer_pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.false., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do
call mpp_clock_end(recv_clock)
buffer_recv_size = buffer_pos
send_start_pos = buffer_pos
!--- send
cur_rank = get_rank_send(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
nsend = 0
call mpp_clock_begin(pack_clock)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
pos = buffer_pos
!--- make sure the domain stack size is big enough
msgsize = 0
if(cur_rank == rank_x) then
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) msgsize = msgsize + update_x%send(ind_x)%msgsize(n)
enddo
endif
if(cur_rank == rank_y) then
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if( send(dir) ) msgsize = msgsize + update_y%send(ind_y)%msgsize(n)
enddo
endif
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
end if
select case( gridtype )
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
tMe = update_x%send(ind_x)%tileMe(n)
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
select case( update_x%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 2
buffer(pos-1) = fieldx(i,j,k)
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 2
buffer(pos-1) = fieldy(i,j,k)
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 2
buffer(pos-1) = -fieldy(i,j,k)
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
end if
case( NINETY )
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 2
buffer(pos-1) = fieldy(i,j,k)
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 2
buffer(pos-1) = fieldy(i,j,k)
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case( ONE_HUNDRED_EIGHTY )
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 2
buffer(pos-1) = fieldx(i,j,k)
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 2
buffer(pos-1) = -fieldx(i,j,k)
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select ! select case( rotation(n) )
end if ! if( send(dir) )
end do ! do n = 1, update_x%send(ind_x)%count
ind_x = ind_x+1
ind_y = ind_x
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
tMe = update_x%send(ind_x)%tileMe(n)
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
select case( update_x%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
case(NINETY)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case(ONE_HUNDRED_EIGHTY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
end select
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = update_y%send(ind_y)%pe
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if( send(dir) ) then
tMe = update_y%send(ind_y)%tileMe(n)
is = update_y%send(ind_y)%is(n); ie = update_y%send(ind_y)%ie(n)
js = update_y%send(ind_y)%js(n); je = update_y%send(ind_y)%je(n)
select case( update_y%send(ind_y)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case(NINETY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case(ONE_HUNDRED_EIGHTY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select
endif
enddo
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = update_y%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
end select
cur_rank = min(rank_x, rank_y)
nsend = nsend + 1
send_pe(nsend) = to_pe
send_msgsize(nsend) = pos - buffer_pos
buffer_pos = pos
end do
buffer_pos = send_start_pos
call mpp_clock_end(pack_clock)
call mpp_clock_begin(send_clock)
do m = 1, nsend
msgsize = send_msgsize(m)
if( msgsize.GT.0 )then
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=send_pe(m), tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do
call mpp_clock_end(send_clock)
!unpack recv
!unpack halos in reverse order
call mpp_clock_begin(wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(wait_clock)
buffer_pos = buffer_recv_size
cur_rank = get_rank_unpack(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
call mpp_clock_begin(unpk_clock)
do while (ind_x > 0 .OR. ind_y > 0)
pos = buffer_pos
select case ( gridtype )
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
do n = update_x%recv(ind_x)%count, 1, -1
dir = update_x%recv(ind_x)%dir(n)
if( recv(dir) ) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*2*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 2
fieldx(i,j,k) = buffer(pos-1)
fieldy(i,j,k) = buffer(pos)
end do
end do
end do
end do
end if ! end if( recv(dir) )
end do ! do dir=8,1,-1
ind_x = ind_x-1
ind_y = ind_x
if(ind_x .GT. 0) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_y) then
do n = update_y%recv(ind_y)%count, 1, -1
dir = update_y%recv(ind_y)%dir(n)
if( recv(dir) ) then
tMe = update_y%recv(ind_y)%tileMe(n)
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k) = buffer(pos)
end do
end do
end do
end do
end if
end do
ind_y = ind_y-1
if(ind_y .GT. 0) then
rank_y = update_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
if(cur_rank == rank_x) then
do n = update_x%recv(ind_x)%count, 1, -1
dir = update_x%recv(ind_x)%dir(n)
if( recv(dir) ) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k) = buffer(pos)
end do
end do
end do
end do
end if
end do
ind_x = ind_x-1
if(ind_x .GT. 0) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
end select
cur_rank = min(rank_x, rank_y)
end do
call mpp_clock_end(unpk_clock)
! ---northern boundary fold
shift = 0
if(domain%symmetry) shift = 1
if( BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then
isd = domain%x(1)%compute%begin - update_x%whalo;
ied = domain%x(1)%compute%end + update_x%ehalo;
jsd = domain%y(1)%compute%begin - update_y%shalo;
jed = domain%y(1)%compute%end + update_y%nhalo;
j = domain%y(1)%global%end+shift
if( jsd .LE. j .AND. j.LE.jed+shift )then !fold is within domain
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
j = domain%y(1)%global%end+shift
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
if( .NOT. domain%symmetry ) is = is - 1
do i = is ,ie, midpoint
if( isd.LE.i .AND. i.LE. ied+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%end+shift
if ( recv(7) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
if(domain%symmetry) then
is = domain%x(1)%global%begin
else
is = domain%x(1)%global%begin - 1
end if
if( is.GT.isd )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = isd,is-1
fieldx(i,j,k) = fieldx(2*is-i,j,k)
fieldy(i,j,k) = fieldy(2*is-i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.isd )then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = isd,is-1
fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.ied )then
ie = ied
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,SOUTH) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then ! ---southern boundary fold
! NOTE: symmetry is assumed for fold-south boundary
j = domain%y(1)%global%begin
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
j = domain%y(1)%global%begin
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
do i = is ,ie, midpoint
if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%begin
if ( recv(3) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-south BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = domain%x(1)%data%begin,is-1
fieldx(i,j,k) = fieldx(2*is-i,j,k)
fieldy(i,j,k) = fieldy(2*is-i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-south CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = domain%x(1)%data%begin,is-1
fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
ie = domain%x(1)%data%end
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,WEST) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then ! ---eastern boundary fold
! NOTE: symmetry is assumed for fold-west boundary
i = domain%x(1)%global%begin
if( domain%x(1)%data%begin.LE.i .AND. i.LE.domain%x(1)%data%end+shift )then !fold is within domain
midpoint = (domain%y(1)%global%begin+domain%y(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
i = domain%x(1)%global%begin
js = domain%y(1)%global%begin; je = domain%y(1)%global%end+shift
do j = js ,je, midpoint
if( domain%y(1)%data%begin.LE.j .AND. j.LE. domain%y(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off south edge, when update south or west direction
i = domain%x(1)%global%begin
if ( recv(3) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin.GT.domain%y(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-west BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i,2*js-j,k)
fieldy(i,j,k) = fieldy(i,2*js-j,k)
end do
end do
end do
end if
case(CGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin-1.GT.domain%y(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-west CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i, 2*js-j-1,k)
end do
end do
end do
end if
end select
end if
!off north edge
js = domain%y(1)%global%end
if(domain%y(1)%cyclic .AND. js.LT.domain%y(1)%data%end )then
je = domain%y(1)%data%end
js = js + 1
select case(gridtype)
case(BGRID_NE)
js = js + shift
je = je + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = js,je
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = js, je
fieldx(i,j,k) = -fieldx(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,EAST) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then ! ---eastern boundary fold
! NOTE: symmetry is assumed for fold-west boundary
i = domain%x(1)%global%end+shift
if( domain%x(1)%data%begin.LE.i .AND. i.LE.domain%x(1)%data%end+shift )then !fold is within domain
midpoint = (domain%y(1)%global%begin+domain%y(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
i = domain%x(1)%global%end+shift
js = domain%y(1)%global%begin; je = domain%y(1)%global%end+shift
do j = js ,je, midpoint
if( domain%y(1)%data%begin.LE.j .AND. j.LE. domain%y(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off south edge, when update south or west direction
i = domain%x(1)%global%end+shift
if ( recv(3) .OR. recv(1) ) then
select case(gridtype)
case(BGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin.GT.domain%y(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-east BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i,2*js-j,k)
fieldy(i,j,k) = fieldy(i,2*js-j,k)
end do
end do
end do
end if
case(CGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin-1.GT.domain%y(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-east CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i,j,k) = fieldx(i, 2*js-j-1,k)
end do
end do
end do
end if
end select
end if
!off north edge
js = domain%y(1)%global%end
if(domain%y(1)%cyclic .AND. js.LT.domain%y(1)%data%end )then
je = domain%y(1)%data%end
js = js + 1
select case(gridtype)
case(BGRID_NE)
js = js + shift
je = je + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = js,je
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = js, je
fieldx(i,j,k) = -fieldx(i,j,k)
end do
end do
end do
end select
end if
end if
end if
call mpp_clock_begin(wait_clock)
call mpp_sync_self( )
call mpp_clock_end(wait_clock)
return
end subroutine mpp_do_update_r4_3dv
# 1208 "../mpp/include/mpp_domains_misc.inc" 2
# 1218
# 1 "../mpp/include/mpp_do_update.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_i4_3d( f_addrs, domain, update, d_type, ke, flags)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: update
integer(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer, optional, intent(in) :: flags
integer(4) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
pointer(ptr_field, field)
integer :: update_flags
type(overlap_type), pointer :: overPtr => NULL()
character(len=8) :: text
!equate to mpp_domains_stack
integer(4) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos
!receive domains saved here for unpacking
!for non-blocking version, could be recomputed
integer, allocatable :: msg1(:), msg2(:), msg3(:)
logical :: send(8), recv(8), update_edge_only
integer :: to_pe, from_pe, pos, msgsize
integer :: n, l_size, l, m, i, j, k
integer :: is, ie, js, je, tMe, dir
integer :: start, start1, start2, index, is1, ie1, js1, je1, ni, nj, total
integer :: buffer_recv_size, nlist, outunit
integer :: send_start_pos
integer :: send_msgsize(MAXLIST)
outunit = stdout()
ptr = LOC(mpp_domains_stack)
l_size = size(f_addrs,1)
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
update_edge_only = BTEST(update_flags, EDGEONLY)
recv = .false.
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
if(debug_message_passing) then
nlist = size(domain%list(:))
allocate(msg1(0:nlist-1), msg2(0:nlist-1), msg3(0:nlist-1) )
msg1 = 0
msg2 = 0
msg3 = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
from_pe = update%recv(m)%pe
l = from_pe-mpp_root_pe()
msg2(l) = msgsize
enddo
do m = 1, update%nsend
overPtr => update%send(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(send(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
l = overPtr%pe - mpp_root_pe()
msg3(l) = msgsize
enddo
call mpp_alltoall(msg3, 1, msg1, 1)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_update: mismatch on send and recv size")
endif
enddo
write(outunit,*)"NOTE from mpp_do_update: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2, msg3)
endif
!recv
buffer_pos = 0
call mpp_clock_begin(recv_clock)
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = overPtr%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do ! end do m = 1, update%nrecv
call mpp_clock_end(recv_clock)
buffer_recv_size = buffer_pos
send_start_pos = buffer_pos
! pack
call mpp_clock_begin(pack_clock)
do m = 1, update%nsend
send_msgsize(m) = 0
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_START_UPDATE_DOMAINS: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
end if
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
select case( overPtr%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, overPtr%count
send_msgsize(m) = pos-buffer_pos
buffer_pos = pos
end do ! end do m = 1, nsend
call mpp_clock_end(pack_clock)
buffer_pos = send_start_pos
call mpp_clock_begin(send_clock)
do m = 1, update%nsend
msgsize = send_msgsize(m)
if(msgsize == 0) cycle
to_pe = update%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end do ! end do ist = 0,nlist-1
call mpp_clock_end(send_clock)
!unpack recv
!unpack halos in reverse order
! ptr_rfield = f_addrs(1)
call mpp_clock_begin(wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(wait_clock)
buffer_pos = buffer_recv_size
call mpp_clock_begin(unpk_clock)
do m = update%nrecv, 1, -1
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
do n = overPtr%count, 1, -1
dir = overPtr%dir(n)
if( recv(dir) ) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = buffer(pos)
end do
end do
end do
end do
endif
end do ! do n = 1, overPtr%count
end do
call mpp_clock_end(unpk_clock)
call mpp_clock_begin(wait_clock)
call mpp_sync_self( )
call mpp_clock_end(wait_clock)
return
end subroutine mpp_do_update_i4_3d
# 1225 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_check.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_check_r8_3d( f_addrs, domain, check, d_type, ke, flags, name)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: check
real(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer, optional, intent(in) :: flags
character(len=*), optional, intent(in) :: name
real(8) :: field(check%xbegin:check%xend, check%ybegin:check%yend,ke)
pointer(ptr_field, field)
integer :: update_flags
character(len=8) :: text
character(len=64) :: field_name
!equate to mpp_domains_stack
real(8) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos
integer, allocatable :: msg1(:), msg2(:)
!receive domains saved here for unpacking
!for non-blocking version, could be recomputed
integer :: to_pe, from_pe, pos, msgsize
integer :: n, l_size, l, m, i, j, k
integer :: is, ie, js, je, tMe
integer :: buffer_recv_size, nlist
integer :: outunit
outunit = stdout()
ptr = LOC(mpp_domains_stack)
l_size = size(f_addrs,1)
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
!--- if debug_update_level is not NO_DEBUG, check the consistency on the bounds
!--- (domain is symmetry or folded north edge). North bound will be checked when north edge is folded.
!--- when domain is symmetry, For data on T-cell, no check is needed; for data on E-cell,
!--- data on East and West boundary will be checked ; For data on N-cell, data on North and South
!--- boundary will be checked; For data on C-cell, data on West, East, South, North will be checked.
!--- The check will be done in the following way: Western boundary data sent to Eastern boundary to check
!--- and Southern boundary to check
if(present(name)) then
field_name = name
else
field_name = "un-named"
end if
if(debug_message_passing) then
nlist = size(domain%list(:))
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
do m = 1, check%nrecv
msgsize = 0
do n = 1, check%recv(m)%count
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
from_pe = check%recv(m)%pe
l = from_pe-mpp_root_pe()
call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1)
msg2(l) = msgsize
enddo
do m = 1, check%nsend
msgsize = 0
do n = 1, check%send(m)%count
is = check%send(m)%is(n); ie = check%send(m)%ie(n)
js = check%send(m)%js(n); je = check%send(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
call mpp_send(msgsize, plen=1, to_pe=check%send(m)%pe, tag=COMM_TAG_1)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_check: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_check: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
buffer_pos = 0
!--- pre-post recv the data
do m = 1, check%nrecv
msgsize = 0
do n = 1, check%recv(m)%count
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = check%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_CHECK: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
!--- send the data
do m = 1, check%nsend
pos = buffer_pos
do n = 1, check%recv(m)%count
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
tMe = check%recv(m)%tileMe(n)
select case( check%recv(m)%rotation(n) )
case(ZERO)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case(NINETY)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case(ONE_HUNDRED_EIGHTY)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
end do
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
to_pe = check%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos)
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_CHECK: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = pos
end if
end do ! end do list = 0,nlist-1
call mpp_sync_self(check=EVENT_RECV) ! To ensure recv is completed.
buffer_pos = buffer_recv_size
!--- compare the data in reverse order
CHECK_LOOP: do m = check%nrecv, 1, -1
do n = check%recv(m)%count, 1, -1
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = check%recv(m)%tileMe(n)
do l=1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
if( field(i,j,k) .NE. buffer(pos) ) then
print*,"Error from MPP_DO_CHECK on pe = ", mpp_pe(), ": field ", &
trim(field_name), " at point (", i, ",", j, ",", k, ") = ", field(i,j,k), &
" does not equal to the value = ", buffer(pos), " on pe ", check%recv(m)%pe
call mpp_error(debug_update_level, "MPP_DO_CHECK: mismatch on the boundary for symmetry point")
exit CHECK_LOOP
end if
end do
end do
end do
end do
end do
end do CHECK_LOOP ! end do list = nlist-1,0,-1
call mpp_sync_self()
return
end subroutine mpp_do_check_r8_3d
# 1236 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_checkV.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_check_r8_3dv(f_addrsx,f_addrsy, domain, check_x, check_y, &
d_type, ke, flags, name)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: check_x, check_y
integer, intent(in) :: ke
real(8), intent(in) :: d_type ! creates unique interface
integer, intent(in), optional :: flags
character(len=*), intent(in), optional :: name
real(8) :: fieldx(check_x%xbegin:check_x%xend, check_x%ybegin:check_x%yend,ke)
real(8) :: fieldy(check_y%xbegin:check_y%xend, check_y%ybegin:check_y%yend,ke)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
integer, allocatable :: msg1(:), msg2(:)
integer :: update_flags
integer :: l_size, l, i, j, k, is, ie, js, je, n, m
integer :: pos, nlist, msgsize
integer :: to_pe, from_pe
integer :: tMe
real(8) :: buffer(size(mpp_domains_stack(:)))
pointer(ptr,buffer )
integer :: buffer_pos
character(len=8) :: text
character(len=64) :: field_name
integer :: buffer_recv_size
integer :: rank_x, rank_y, ind_x, ind_y, cur_rank
integer :: nsend_x, nsend_y, nrecv_x, nrecv_y
integer :: outunit
outunit = stdout()
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) ) update_flags = flags
buffer_pos = 0 !this initialization goes away if update_domains becomes non-blocking
l_size = size(f_addrsx,1)
nlist = size(domain%list(:))
ptr = LOC(mpp_domains_stack)
!--- if debug_update_level is not NO_DEBUG, check the consistency on the bounds
!--- (domain is symmetry or folded north edge). North bound will be checked when north edge is folded.
!--- when domain is symmetry, For data on T-cell, no check is needed; for data on E-cell,
!--- data on East and West boundary will be checked ; For data on N-cell, data on North and South
!--- boundary will be checked; For data on C-cell, data on West, East, South, North will be checked.
!--- The check will be done in the following way: Western boundary data sent to Eastern boundary to check
!--- and Southern boundary to check
if(present(name)) then
field_name = name
else
field_name = "un-named"
end if
nsend_x = check_x%nsend
nsend_y = check_y%nsend
nrecv_x = check_x%nrecv
nrecv_y = check_y%nrecv
if(debug_message_passing) then
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
cur_rank = get_rank_recv(domain, check_x, check_y, rank_x, rank_y, ind_x, ind_y)
do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
msgsize = 0
if(cur_rank == rank_x) then
from_pe = check_x%recv(ind_x)%pe
do n = 1, check_x%recv(ind_x)%count
is = check_x%recv(ind_x)%is(n); ie = check_x%recv(ind_x)%ie(n)
js = check_x%recv(ind_x)%js(n); je = check_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = check_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = check_y%recv(ind_y)%pe
do n = 1, check_y%recv(ind_y)%count
is = check_y%recv(ind_y)%is(n); ie = check_y%recv(ind_y)%ie(n)
js = check_y%recv(ind_y)%js(n); je = check_y%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = check_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
m = from_pe-mpp_root_pe()
call mpp_recv( msg1(m), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1)
msg2(m) = msgsize
end do
cur_rank = get_rank_send(domain, check_x, check_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
msgsize = 0
if(cur_rank == rank_x) then
to_pe = check_x%send(ind_x)%pe
do n = 1, check_x%send(ind_x)%count
is = check_x%send(ind_x)%is(n); ie = check_x%send(ind_x)%ie(n)
js = check_x%send(ind_x)%js(n); je = check_x%send(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
enddo
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = check_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = check_y%send(ind_y)%pe
do n = 1, check_y%send(ind_y)%count
is = check_y%send(ind_y)%is(n); ie = check_y%send(ind_y)%ie(n)
js = check_y%send(ind_y)%js(n); je = check_y%send(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = check_y%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
call mpp_send( msgsize, plen=1, to_pe=to_pe, tag=COMM_TAG_1)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_checkV: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_checkV: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
!--- recv the data
cur_rank = get_rank_recv(domain, check_x, check_y, rank_x, rank_y, ind_x, ind_y)
do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
msgsize = 0
if(cur_rank == rank_x) then
from_pe = check_x%recv(ind_x)%pe
do n = 1, check_x%recv(ind_x)%count
is = check_x%recv(ind_x)%is(n); ie = check_x%recv(ind_x)%ie(n)
js = check_x%recv(ind_x)%js(n); je = check_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = check_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = check_y%recv(ind_y)%pe
do n = 1, check_y%recv(ind_y)%count
is = check_y%recv(ind_y)%is(n); ie = check_y%recv(ind_y)%ie(n)
js = check_y%recv(ind_y)%js(n); je = check_y%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = check_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_CHECK_V: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.false., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
enddo
buffer_recv_size = buffer_pos
!--- send the data
cur_rank = get_rank_send(domain, check_x, check_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
pos = buffer_pos
if(cur_rank == rank_x) then
to_pe = check_x%send(ind_x)%pe
do n = 1, check_x%send(ind_x)%count
is = check_x%send(ind_x)%is(n); ie = check_x%send(ind_x)%ie(n)
js = check_x%send(ind_x)%js(n); je = check_x%send(ind_x)%je(n)
tMe = check_x%send(ind_x)%tileMe(n)
select case( check_x%send(ind_x)%rotation(n) )
case(ZERO)
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
case(NINETY)
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case(ONE_HUNDRED_EIGHTY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
end select
end do
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = check_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = check_y%send(ind_y)%pe
do n = 1, check_y%send(ind_y)%count
is = check_y%send(ind_y)%is(n); ie = check_y%send(ind_y)%ie(n)
js = check_y%send(ind_y)%js(n); je = check_y%send(ind_y)%je(n)
tMe = check_y%send(ind_y)%tileMe(n)
select case( check_y%send(ind_y)%rotation(n) )
case(ZERO)
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case(NINETY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case(ONE_HUNDRED_EIGHTY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select
end do
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = check_y%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos)
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_CHECK_V: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = pos
end if
end do ! end do list = 0,nlist-1
call mpp_sync_self(check=EVENT_RECV) ! To ensure recv is completed.
buffer_pos = buffer_recv_size
!--- compare the data in reverse order
cur_rank = get_rank_unpack(domain, check_x, check_y, rank_x, rank_y, ind_x, ind_y)
CHECK_LOOP: do while(ind_x >0 .OR. ind_y >0)
if(cur_rank == rank_y) then
do n = check_y%recv(ind_y)%count, 1, -1
is = check_y%recv(ind_y)%is(n); ie = check_y%recv(ind_y)%ie(n)
js = check_y%recv(ind_y)%js(n); je = check_y%recv(ind_y)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = check_y%recv(ind_y)%tileMe(n)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
if( fieldy(i,j,k) .NE. buffer(pos) ) then
print*,"Error from MPP_DO_CHECK_V on pe = ", mpp_pe(), ": y component of vector ", &
trim(field_name), " at point (", i, ",", j, ",", k, ") = ", fieldy(i,j,k), &
" does not equal to the value = ", buffer(pos), " on pe ", check_y%recv(ind_y)%pe
call mpp_error(debug_update_level, "MPP_DO_CHECK_V: mismatch on the boundary for symmetry point")
exit CHECK_LOOP
end if
end do
end do
end do
end do
end do
ind_y = ind_y-1
if(ind_y .GT. 0) then
rank_y = check_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
if(cur_rank == rank_x) then
do n = check_x%recv(ind_x)%count, 1, -1
is = check_x%recv(ind_x)%is(n); ie = check_x%recv(ind_x)%ie(n)
js = check_x%recv(ind_x)%js(n); je = check_x%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = check_x%recv(ind_x)%tileMe(n)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
if( fieldx(i,j,k) .NE. buffer(pos) ) then
print*,"Error from MPP_DO_CHECK_V on pe = ", mpp_pe(), ": x-component of vector ", &
trim(field_name), " at point (", i, ",", j, ",", k, ") = ", fieldx(i,j,k), &
" does not equal to the value = ", buffer(pos), " on pe ", check_x%recv(ind_x)%pe
call mpp_error(debug_update_level, "MPP_DO_CHECK_V: mismatch on the boundary for symmetry point")
exit CHECK_LOOP
end if
end do
end do
end do
end do
end do
ind_x = ind_x-1
if(ind_x .GT. 0) then
rank_x = check_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
end do CHECK_LOOP ! end do list = nlist-1,0,-1
call mpp_sync_self()
return
end subroutine mpp_do_check_r8_3dv
# 1237 "../mpp/include/mpp_domains_misc.inc" 2
# 1246
# 1 "../mpp/include/mpp_do_check.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_check_i8_3d( f_addrs, domain, check, d_type, ke, flags, name)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: check
integer(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer, optional, intent(in) :: flags
character(len=*), optional, intent(in) :: name
integer(8) :: field(check%xbegin:check%xend, check%ybegin:check%yend,ke)
pointer(ptr_field, field)
integer :: update_flags
character(len=8) :: text
character(len=64) :: field_name
!equate to mpp_domains_stack
integer(8) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos
integer, allocatable :: msg1(:), msg2(:)
!receive domains saved here for unpacking
!for non-blocking version, could be recomputed
integer :: to_pe, from_pe, pos, msgsize
integer :: n, l_size, l, m, i, j, k
integer :: is, ie, js, je, tMe
integer :: buffer_recv_size, nlist
integer :: outunit
outunit = stdout()
ptr = LOC(mpp_domains_stack)
l_size = size(f_addrs,1)
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
!--- if debug_update_level is not NO_DEBUG, check the consistency on the bounds
!--- (domain is symmetry or folded north edge). North bound will be checked when north edge is folded.
!--- when domain is symmetry, For data on T-cell, no check is needed; for data on E-cell,
!--- data on East and West boundary will be checked ; For data on N-cell, data on North and South
!--- boundary will be checked; For data on C-cell, data on West, East, South, North will be checked.
!--- The check will be done in the following way: Western boundary data sent to Eastern boundary to check
!--- and Southern boundary to check
if(present(name)) then
field_name = name
else
field_name = "un-named"
end if
if(debug_message_passing) then
nlist = size(domain%list(:))
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
do m = 1, check%nrecv
msgsize = 0
do n = 1, check%recv(m)%count
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
from_pe = check%recv(m)%pe
l = from_pe-mpp_root_pe()
call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1)
msg2(l) = msgsize
enddo
do m = 1, check%nsend
msgsize = 0
do n = 1, check%send(m)%count
is = check%send(m)%is(n); ie = check%send(m)%ie(n)
js = check%send(m)%js(n); je = check%send(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
call mpp_send(msgsize, plen=1, to_pe=check%send(m)%pe, tag=COMM_TAG_1)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_check: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_check: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
buffer_pos = 0
!--- pre-post recv the data
do m = 1, check%nrecv
msgsize = 0
do n = 1, check%recv(m)%count
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = check%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_CHECK: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
!--- send the data
do m = 1, check%nsend
pos = buffer_pos
do n = 1, check%recv(m)%count
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
tMe = check%recv(m)%tileMe(n)
select case( check%recv(m)%rotation(n) )
case(ZERO)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case(NINETY)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case(ONE_HUNDRED_EIGHTY)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
end do
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
to_pe = check%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos)
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_CHECK: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = pos
end if
end do ! end do list = 0,nlist-1
call mpp_sync_self(check=EVENT_RECV) ! To ensure recv is completed.
buffer_pos = buffer_recv_size
!--- compare the data in reverse order
CHECK_LOOP: do m = check%nrecv, 1, -1
do n = check%recv(m)%count, 1, -1
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = check%recv(m)%tileMe(n)
do l=1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
if( field(i,j,k) .NE. buffer(pos) ) then
print*,"Error from MPP_DO_CHECK on pe = ", mpp_pe(), ": field ", &
trim(field_name), " at point (", i, ",", j, ",", k, ") = ", field(i,j,k), &
" does not equal to the value = ", buffer(pos), " on pe ", check%recv(m)%pe
call mpp_error(debug_update_level, "MPP_DO_CHECK: mismatch on the boundary for symmetry point")
exit CHECK_LOOP
end if
end do
end do
end do
end do
end do
end do CHECK_LOOP ! end do list = nlist-1,0,-1
call mpp_sync_self()
return
end subroutine mpp_do_check_i8_3d
# 1254 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_check.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_check_r4_3d( f_addrs, domain, check, d_type, ke, flags, name)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: check
real(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer, optional, intent(in) :: flags
character(len=*), optional, intent(in) :: name
real(4) :: field(check%xbegin:check%xend, check%ybegin:check%yend,ke)
pointer(ptr_field, field)
integer :: update_flags
character(len=8) :: text
character(len=64) :: field_name
!equate to mpp_domains_stack
real(4) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos
integer, allocatable :: msg1(:), msg2(:)
!receive domains saved here for unpacking
!for non-blocking version, could be recomputed
integer :: to_pe, from_pe, pos, msgsize
integer :: n, l_size, l, m, i, j, k
integer :: is, ie, js, je, tMe
integer :: buffer_recv_size, nlist
integer :: outunit
outunit = stdout()
ptr = LOC(mpp_domains_stack)
l_size = size(f_addrs,1)
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
!--- if debug_update_level is not NO_DEBUG, check the consistency on the bounds
!--- (domain is symmetry or folded north edge). North bound will be checked when north edge is folded.
!--- when domain is symmetry, For data on T-cell, no check is needed; for data on E-cell,
!--- data on East and West boundary will be checked ; For data on N-cell, data on North and South
!--- boundary will be checked; For data on C-cell, data on West, East, South, North will be checked.
!--- The check will be done in the following way: Western boundary data sent to Eastern boundary to check
!--- and Southern boundary to check
if(present(name)) then
field_name = name
else
field_name = "un-named"
end if
if(debug_message_passing) then
nlist = size(domain%list(:))
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
do m = 1, check%nrecv
msgsize = 0
do n = 1, check%recv(m)%count
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
from_pe = check%recv(m)%pe
l = from_pe-mpp_root_pe()
call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1)
msg2(l) = msgsize
enddo
do m = 1, check%nsend
msgsize = 0
do n = 1, check%send(m)%count
is = check%send(m)%is(n); ie = check%send(m)%ie(n)
js = check%send(m)%js(n); je = check%send(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
call mpp_send(msgsize, plen=1, to_pe=check%send(m)%pe, tag=COMM_TAG_1)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_check: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_check: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
buffer_pos = 0
!--- pre-post recv the data
do m = 1, check%nrecv
msgsize = 0
do n = 1, check%recv(m)%count
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = check%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_CHECK: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
!--- send the data
do m = 1, check%nsend
pos = buffer_pos
do n = 1, check%recv(m)%count
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
tMe = check%recv(m)%tileMe(n)
select case( check%recv(m)%rotation(n) )
case(ZERO)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case(NINETY)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case(ONE_HUNDRED_EIGHTY)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
end do
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
to_pe = check%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos)
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_CHECK: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = pos
end if
end do ! end do list = 0,nlist-1
call mpp_sync_self(check=EVENT_RECV) ! To ensure recv is completed.
buffer_pos = buffer_recv_size
!--- compare the data in reverse order
CHECK_LOOP: do m = check%nrecv, 1, -1
do n = check%recv(m)%count, 1, -1
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = check%recv(m)%tileMe(n)
do l=1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
if( field(i,j,k) .NE. buffer(pos) ) then
print*,"Error from MPP_DO_CHECK on pe = ", mpp_pe(), ": field ", &
trim(field_name), " at point (", i, ",", j, ",", k, ") = ", field(i,j,k), &
" does not equal to the value = ", buffer(pos), " on pe ", check%recv(m)%pe
call mpp_error(debug_update_level, "MPP_DO_CHECK: mismatch on the boundary for symmetry point")
exit CHECK_LOOP
end if
end do
end do
end do
end do
end do
end do CHECK_LOOP ! end do list = nlist-1,0,-1
call mpp_sync_self()
return
end subroutine mpp_do_check_r4_3d
# 1268 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_checkV.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_check_r4_3dv(f_addrsx,f_addrsy, domain, check_x, check_y, &
d_type, ke, flags, name)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: check_x, check_y
integer, intent(in) :: ke
real(4), intent(in) :: d_type ! creates unique interface
integer, intent(in), optional :: flags
character(len=*), intent(in), optional :: name
real(4) :: fieldx(check_x%xbegin:check_x%xend, check_x%ybegin:check_x%yend,ke)
real(4) :: fieldy(check_y%xbegin:check_y%xend, check_y%ybegin:check_y%yend,ke)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
integer, allocatable :: msg1(:), msg2(:)
integer :: update_flags
integer :: l_size, l, i, j, k, is, ie, js, je, n, m
integer :: pos, nlist, msgsize
integer :: to_pe, from_pe
integer :: tMe
real(4) :: buffer(size(mpp_domains_stack(:)))
pointer(ptr,buffer )
integer :: buffer_pos
character(len=8) :: text
character(len=64) :: field_name
integer :: buffer_recv_size
integer :: rank_x, rank_y, ind_x, ind_y, cur_rank
integer :: nsend_x, nsend_y, nrecv_x, nrecv_y
integer :: outunit
outunit = stdout()
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) ) update_flags = flags
buffer_pos = 0 !this initialization goes away if update_domains becomes non-blocking
l_size = size(f_addrsx,1)
nlist = size(domain%list(:))
ptr = LOC(mpp_domains_stack)
!--- if debug_update_level is not NO_DEBUG, check the consistency on the bounds
!--- (domain is symmetry or folded north edge). North bound will be checked when north edge is folded.
!--- when domain is symmetry, For data on T-cell, no check is needed; for data on E-cell,
!--- data on East and West boundary will be checked ; For data on N-cell, data on North and South
!--- boundary will be checked; For data on C-cell, data on West, East, South, North will be checked.
!--- The check will be done in the following way: Western boundary data sent to Eastern boundary to check
!--- and Southern boundary to check
if(present(name)) then
field_name = name
else
field_name = "un-named"
end if
nsend_x = check_x%nsend
nsend_y = check_y%nsend
nrecv_x = check_x%nrecv
nrecv_y = check_y%nrecv
if(debug_message_passing) then
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
cur_rank = get_rank_recv(domain, check_x, check_y, rank_x, rank_y, ind_x, ind_y)
do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
msgsize = 0
if(cur_rank == rank_x) then
from_pe = check_x%recv(ind_x)%pe
do n = 1, check_x%recv(ind_x)%count
is = check_x%recv(ind_x)%is(n); ie = check_x%recv(ind_x)%ie(n)
js = check_x%recv(ind_x)%js(n); je = check_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = check_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = check_y%recv(ind_y)%pe
do n = 1, check_y%recv(ind_y)%count
is = check_y%recv(ind_y)%is(n); ie = check_y%recv(ind_y)%ie(n)
js = check_y%recv(ind_y)%js(n); je = check_y%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = check_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
m = from_pe-mpp_root_pe()
call mpp_recv( msg1(m), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1)
msg2(m) = msgsize
end do
cur_rank = get_rank_send(domain, check_x, check_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
msgsize = 0
if(cur_rank == rank_x) then
to_pe = check_x%send(ind_x)%pe
do n = 1, check_x%send(ind_x)%count
is = check_x%send(ind_x)%is(n); ie = check_x%send(ind_x)%ie(n)
js = check_x%send(ind_x)%js(n); je = check_x%send(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
enddo
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = check_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = check_y%send(ind_y)%pe
do n = 1, check_y%send(ind_y)%count
is = check_y%send(ind_y)%is(n); ie = check_y%send(ind_y)%ie(n)
js = check_y%send(ind_y)%js(n); je = check_y%send(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = check_y%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
call mpp_send( msgsize, plen=1, to_pe=to_pe, tag=COMM_TAG_1)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_checkV: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_checkV: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
!--- recv the data
cur_rank = get_rank_recv(domain, check_x, check_y, rank_x, rank_y, ind_x, ind_y)
do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
msgsize = 0
if(cur_rank == rank_x) then
from_pe = check_x%recv(ind_x)%pe
do n = 1, check_x%recv(ind_x)%count
is = check_x%recv(ind_x)%is(n); ie = check_x%recv(ind_x)%ie(n)
js = check_x%recv(ind_x)%js(n); je = check_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = check_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = check_y%recv(ind_y)%pe
do n = 1, check_y%recv(ind_y)%count
is = check_y%recv(ind_y)%is(n); ie = check_y%recv(ind_y)%ie(n)
js = check_y%recv(ind_y)%js(n); je = check_y%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = check_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_CHECK_V: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.false., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
enddo
buffer_recv_size = buffer_pos
!--- send the data
cur_rank = get_rank_send(domain, check_x, check_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
pos = buffer_pos
if(cur_rank == rank_x) then
to_pe = check_x%send(ind_x)%pe
do n = 1, check_x%send(ind_x)%count
is = check_x%send(ind_x)%is(n); ie = check_x%send(ind_x)%ie(n)
js = check_x%send(ind_x)%js(n); je = check_x%send(ind_x)%je(n)
tMe = check_x%send(ind_x)%tileMe(n)
select case( check_x%send(ind_x)%rotation(n) )
case(ZERO)
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
case(NINETY)
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case(ONE_HUNDRED_EIGHTY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
end select
end do
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = check_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = check_y%send(ind_y)%pe
do n = 1, check_y%send(ind_y)%count
is = check_y%send(ind_y)%is(n); ie = check_y%send(ind_y)%ie(n)
js = check_y%send(ind_y)%js(n); je = check_y%send(ind_y)%je(n)
tMe = check_y%send(ind_y)%tileMe(n)
select case( check_y%send(ind_y)%rotation(n) )
case(ZERO)
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case(NINETY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case(ONE_HUNDRED_EIGHTY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l = 1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select
end do
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = check_y%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos)
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_CHECK_V: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = pos
end if
end do ! end do list = 0,nlist-1
call mpp_sync_self(check=EVENT_RECV) ! To ensure recv is completed.
buffer_pos = buffer_recv_size
!--- compare the data in reverse order
cur_rank = get_rank_unpack(domain, check_x, check_y, rank_x, rank_y, ind_x, ind_y)
CHECK_LOOP: do while(ind_x >0 .OR. ind_y >0)
if(cur_rank == rank_y) then
do n = check_y%recv(ind_y)%count, 1, -1
is = check_y%recv(ind_y)%is(n); ie = check_y%recv(ind_y)%ie(n)
js = check_y%recv(ind_y)%js(n); je = check_y%recv(ind_y)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = check_y%recv(ind_y)%tileMe(n)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
if( fieldy(i,j,k) .NE. buffer(pos) ) then
print*,"Error from MPP_DO_CHECK_V on pe = ", mpp_pe(), ": y component of vector ", &
trim(field_name), " at point (", i, ",", j, ",", k, ") = ", fieldy(i,j,k), &
" does not equal to the value = ", buffer(pos), " on pe ", check_y%recv(ind_y)%pe
call mpp_error(debug_update_level, "MPP_DO_CHECK_V: mismatch on the boundary for symmetry point")
exit CHECK_LOOP
end if
end do
end do
end do
end do
end do
ind_y = ind_y-1
if(ind_y .GT. 0) then
rank_y = check_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
if(cur_rank == rank_x) then
do n = check_x%recv(ind_x)%count, 1, -1
is = check_x%recv(ind_x)%is(n); ie = check_x%recv(ind_x)%ie(n)
js = check_x%recv(ind_x)%js(n); je = check_x%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = check_x%recv(ind_x)%tileMe(n)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
if( fieldx(i,j,k) .NE. buffer(pos) ) then
print*,"Error from MPP_DO_CHECK_V on pe = ", mpp_pe(), ": x-component of vector ", &
trim(field_name), " at point (", i, ",", j, ",", k, ") = ", fieldx(i,j,k), &
" does not equal to the value = ", buffer(pos), " on pe ", check_x%recv(ind_x)%pe
call mpp_error(debug_update_level, "MPP_DO_CHECK_V: mismatch on the boundary for symmetry point")
exit CHECK_LOOP
end if
end do
end do
end do
end do
end do
ind_x = ind_x-1
if(ind_x .GT. 0) then
rank_x = check_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
end do CHECK_LOOP ! end do list = nlist-1,0,-1
call mpp_sync_self()
return
end subroutine mpp_do_check_r4_3dv
# 1269 "../mpp/include/mpp_domains_misc.inc" 2
# 1278
# 1 "../mpp/include/mpp_do_check.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_check_i4_3d( f_addrs, domain, check, d_type, ke, flags, name)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: check
integer(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer, optional, intent(in) :: flags
character(len=*), optional, intent(in) :: name
integer(4) :: field(check%xbegin:check%xend, check%ybegin:check%yend,ke)
pointer(ptr_field, field)
integer :: update_flags
character(len=8) :: text
character(len=64) :: field_name
!equate to mpp_domains_stack
integer(4) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos
integer, allocatable :: msg1(:), msg2(:)
!receive domains saved here for unpacking
!for non-blocking version, could be recomputed
integer :: to_pe, from_pe, pos, msgsize
integer :: n, l_size, l, m, i, j, k
integer :: is, ie, js, je, tMe
integer :: buffer_recv_size, nlist
integer :: outunit
outunit = stdout()
ptr = LOC(mpp_domains_stack)
l_size = size(f_addrs,1)
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
!--- if debug_update_level is not NO_DEBUG, check the consistency on the bounds
!--- (domain is symmetry or folded north edge). North bound will be checked when north edge is folded.
!--- when domain is symmetry, For data on T-cell, no check is needed; for data on E-cell,
!--- data on East and West boundary will be checked ; For data on N-cell, data on North and South
!--- boundary will be checked; For data on C-cell, data on West, East, South, North will be checked.
!--- The check will be done in the following way: Western boundary data sent to Eastern boundary to check
!--- and Southern boundary to check
if(present(name)) then
field_name = name
else
field_name = "un-named"
end if
if(debug_message_passing) then
nlist = size(domain%list(:))
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
do m = 1, check%nrecv
msgsize = 0
do n = 1, check%recv(m)%count
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
from_pe = check%recv(m)%pe
l = from_pe-mpp_root_pe()
call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1)
msg2(l) = msgsize
enddo
do m = 1, check%nsend
msgsize = 0
do n = 1, check%send(m)%count
is = check%send(m)%is(n); ie = check%send(m)%ie(n)
js = check%send(m)%js(n); je = check%send(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
call mpp_send(msgsize, plen=1, to_pe=check%send(m)%pe, tag=COMM_TAG_1)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_check: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_check: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
buffer_pos = 0
!--- pre-post recv the data
do m = 1, check%nrecv
msgsize = 0
do n = 1, check%recv(m)%count
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = check%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_CHECK: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
!--- send the data
do m = 1, check%nsend
pos = buffer_pos
do n = 1, check%recv(m)%count
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
tMe = check%recv(m)%tileMe(n)
select case( check%recv(m)%rotation(n) )
case(ZERO)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case(MINUS_NINETY)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case(NINETY)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case(ONE_HUNDRED_EIGHTY)
do l = 1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
end do
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
to_pe = check%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos)
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_CHECK: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = pos
end if
end do ! end do list = 0,nlist-1
call mpp_sync_self(check=EVENT_RECV) ! To ensure recv is completed.
buffer_pos = buffer_recv_size
!--- compare the data in reverse order
CHECK_LOOP: do m = check%nrecv, 1, -1
do n = check%recv(m)%count, 1, -1
is = check%recv(m)%is(n); ie = check%recv(m)%ie(n)
js = check%recv(m)%js(n); je = check%recv(m)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = check%recv(m)%tileMe(n)
do l=1, l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
if( field(i,j,k) .NE. buffer(pos) ) then
print*,"Error from MPP_DO_CHECK on pe = ", mpp_pe(), ": field ", &
trim(field_name), " at point (", i, ",", j, ",", k, ") = ", field(i,j,k), &
" does not equal to the value = ", buffer(pos), " on pe ", check%recv(m)%pe
call mpp_error(debug_update_level, "MPP_DO_CHECK: mismatch on the boundary for symmetry point")
exit CHECK_LOOP
end if
end do
end do
end do
end do
end do
end do CHECK_LOOP ! end do list = nlist-1,0,-1
call mpp_sync_self()
return
end subroutine mpp_do_check_i4_3d
# 1285 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_update_nest_domains.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_update_nest_fine_r8_2D(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &
flags, complete, position, extra_halo, name, tile_count)
real(8), intent(in) :: field(:,:)
type(nest_domain_type), intent(inout) :: nest_domain
real(8), intent(inout) :: wbuffer(:,:)
real(8), intent(inout) :: ebuffer(:,:)
real(8), intent(inout) :: sbuffer(:,:)
real(8), intent(inout) :: nbuffer(:,:)
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: extra_halo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3D(size(field,1),size(field,2),1)
real(8) :: wbuffer3D(size(wbuffer,1),size(wbuffer,2),1)
real(8) :: ebuffer3D(size(ebuffer,1),size(ebuffer,2),1)
real(8) :: sbuffer3D(size(sbuffer,1),size(sbuffer,2),1)
real(8) :: nbuffer3D(size(nbuffer,1),size(nbuffer,2),1)
pointer( ptr, field3D )
pointer( ptr_w, wbuffer3D)
pointer( ptr_e, ebuffer3D)
pointer( ptr_s, sbuffer3D)
pointer( ptr_n, nbuffer3D)
ptr = LOC(field)
ptr_w = LOC(wbuffer)
ptr_e = LOC(ebuffer)
ptr_s = LOC(sbuffer)
ptr_n = LOC(nbuffer)
call mpp_update_nest_fine( field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &
flags, complete, position, extra_halo, name, tile_count)
return
end subroutine mpp_update_nest_fine_r8_2D
subroutine mpp_update_nest_fine_r8_3D(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &
flags, complete, position, extra_halo, name, tile_count)
real(8), intent(in) :: field(:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
real(8), intent(inout) :: wbuffer(:,:,:)
real(8), intent(inout) :: ebuffer(:,:,:)
real(8), intent(inout) :: sbuffer(:,:,:)
real(8), intent(inout) :: nbuffer(:,:,:)
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: extra_halo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: d_type
type(nestSpec), pointer :: update=>NULL()
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: f_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: wb_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: eb_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: sb_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: nb_addrs=-9999
character(len=3) :: text
logical :: is_complete, set_mismatch
integer :: tile
integer :: add_halo, update_flags, update_position
integer :: wbuffersz, ebuffersz, sbuffersz, nbuffersz
integer :: isize, jsize, ksize, l_size
integer, save :: isize_save=0, jsize_save=0, ksize_save=0
integer :: wbuffersz_save=0, ebuffersz_save=0, sbuffersz_save=0, nbuffersz_save=0
integer, save :: add_halo_save=0, update_flags_save=0, update_position_save=0
integer, save :: list=0
add_halo = 0
if(present(extra_halo)) add_halo = add_halo
update_position = CENTER
if(present(position)) update_position = position
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(present(tile_count)) tile = tile_count
if( tile > 1 ) then
call mpp_error(FATAL,'MPP_UPDATE_NEST_FINE_3D: currently do not support multiple tile per pe')
endif
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_NEST_FINE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list) = LOC(field)
wb_addrs(list) = LOC(wbuffer)
eb_addrs(list) = LOC(ebuffer)
sb_addrs(list) = LOC(sbuffer)
nb_addrs(list) = LOC(nbuffer)
wbuffersz = size(wbuffer); ebuffersz = size(ebuffer)
sbuffersz = size(sbuffer); nbuffersz = size(nbuffer)
isize=size(field,1); jsize=size(field,2); ksize = size(field,3)
if(list == 1)then
isize_save = isize; jsize_save = jsize; ksize_save = ksize
update_position_save = update_position
update_flags_save = update_flags
wbuffersz_save = wbuffersz; ebuffersz_save = ebuffersz
sbuffersz_save = sbuffersz; nbuffersz_save = nbuffersz
add_halo_save = add_halo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize_save /= isize)
set_mismatch = set_mismatch .OR. (jsize_save /= jsize)
set_mismatch = set_mismatch .OR. (ksize_save /= ksize)
set_mismatch = set_mismatch .OR. (update_position_save /= update_position)
set_mismatch = set_mismatch .OR. (wbuffersz_save /= wbuffersz)
set_mismatch = set_mismatch .OR. (ebuffersz_save /= ebuffersz)
set_mismatch = set_mismatch .OR. (sbuffersz_save /= sbuffersz)
set_mismatch = set_mismatch .OR. (nbuffersz_save /= nbuffersz)
set_mismatch = set_mismatch .OR. (update_flags_save /= update_flags)
set_mismatch = set_mismatch .OR. (add_halo_save /= add_halo)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_NEST_FINE_3D_: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(is_complete)then
update => search_C2F_nest_overlap(nest_domain, add_halo, update_position)
call mpp_do_update_nest_fine(f_addrs(1:l_size), nest_domain, update, d_type, ksize, &
wb_addrs(1:l_size), eb_addrs(1:l_size), sb_addrs(1:l_size), nb_addrs(1:l_size), update_flags )
endif
end subroutine mpp_update_nest_fine_r8_3D
!###############################################################################
subroutine mpp_update_nest_fine_r8_4D(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &
flags, complete, position, extra_halo, name, tile_count)
real(8), intent(in) :: field(:,:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
real(8), intent(inout) :: wbuffer(:,:,:,:)
real(8), intent(inout) :: ebuffer(:,:,:,:)
real(8), intent(inout) :: sbuffer(:,:,:,:)
real(8), intent(inout) :: nbuffer(:,:,:,:)
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: extra_halo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
real(8) :: wbuffer3D(size(wbuffer,1),size(wbuffer,2),size(wbuffer,3)*size(wbuffer,4))
real(8) :: ebuffer3D(size(ebuffer,1),size(ebuffer,2),size(ebuffer,3)*size(ebuffer,4))
real(8) :: sbuffer3D(size(sbuffer,1),size(sbuffer,2),size(sbuffer,3)*size(sbuffer,4))
real(8) :: nbuffer3D(size(nbuffer,1),size(nbuffer,2),size(nbuffer,3)*size(nbuffer,4))
pointer( ptr, field3D )
pointer( ptr_w, wbuffer3D)
pointer( ptr_e, ebuffer3D)
pointer( ptr_s, sbuffer3D)
pointer( ptr_n, nbuffer3D)
ptr = LOC(field)
ptr_w = LOC(wbuffer)
ptr_e = LOC(ebuffer)
ptr_s = LOC(sbuffer)
ptr_n = LOC(nbuffer)
call mpp_update_nest_fine( field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &
flags, complete, position, extra_halo, name, tile_count)
return
end subroutine mpp_update_nest_fine_r8_4D
subroutine mpp_update_nest_coarse_r8_2D(field, nest_domain, buffer, complete, position, name, tile_count)
real(8), intent(in) :: field(:,:)
type(nest_domain_type), intent(inout) :: nest_domain
real(8), intent(inout) :: buffer(:,:)
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3D(size(field,1),size(field,2),1)
real(8) :: buffer3D(size(buffer,1),size(buffer,2),1)
pointer( ptr, field3D )
pointer( ptr_b, buffer3D)
ptr = LOC(field)
ptr_b = LOC(buffer)
call mpp_update_nest_coarse( field3D, nest_domain, buffer3D, complete, position, name, tile_count)
return
end subroutine mpp_update_nest_coarse_r8_2D
subroutine mpp_update_nest_coarse_r8_3D(field, nest_domain, buffer, complete, position, name, tile_count)
real(8), intent(in) :: field(:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
real(8), intent(inout) :: buffer(:,:,:)
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: d_type
type(nestSpec), pointer :: update=>NULL()
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: f_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: b_addrs=-9999
character(len=3) :: text
logical :: is_complete, set_mismatch
integer :: tile
integer :: update_position
integer :: buffersz, buffersz_save=0
integer :: isize, jsize, ksize, l_size
integer, save :: isize_save=0, jsize_save=0, ksize_save=0
integer, save :: update_position_save=0
integer, save :: list=0
update_position = CENTER
if(present(position)) update_position = position
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(present(tile_count)) tile = tile_count
if( tile > 1 ) then
call mpp_error(FATAL,'MPP_UPDATE_NEST_COARSE_3D: currently do not support multiple tile per pe')
endif
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_NEST_COARSE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list) = LOC(field)
b_addrs(list) = LOC(buffer)
buffersz = size(buffer)
isize=size(field,1); jsize=size(field,2); ksize = size(field,3)
if(list == 1)then
isize_save = isize; jsize_save = jsize; ksize_save = ksize
update_position_save = update_position
buffersz_save = buffersz
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize_save /= isize)
set_mismatch = set_mismatch .OR. (jsize_save /= jsize)
set_mismatch = set_mismatch .OR. (ksize_save /= ksize)
set_mismatch = set_mismatch .OR. (update_position_save /= update_position)
set_mismatch = set_mismatch .OR. (buffersz_save /= buffersz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_NEST_COARSE_3D_: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(is_complete)then
update => search_F2C_nest_overlap(nest_domain, update_position)
call mpp_do_update_nest_coarse(f_addrs(1:l_size), nest_domain, update, d_type, ksize, &
b_addrs(1:l_size))
endif
end subroutine mpp_update_nest_coarse_r8_3D
!###############################################################################
subroutine mpp_update_nest_coarse_r8_4D(field, nest_domain, buffer, complete, position, name, tile_count)
real(8), intent(in) :: field(:,:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
real(8), intent(inout) :: buffer(:,:,:,:)
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
real(8) :: buffer3D(size(buffer,1),size(buffer,2),size(buffer,3)*size(buffer,4))
pointer( ptr, field3D )
pointer( ptr_b, buffer3D)
ptr = LOC(field)
ptr_b = LOC(buffer)
call mpp_update_nest_coarse( field3D, nest_domain, buffer3D, complete, position, name, tile_count)
return
end subroutine mpp_update_nest_coarse_r8_4D
# 1303 "../mpp/include/mpp_domains_misc.inc" 2
# 1322
# 1 "../mpp/include/mpp_update_nest_domains.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_update_nest_fine_i8_2D(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &
flags, complete, position, extra_halo, name, tile_count)
integer(8), intent(in) :: field(:,:)
type(nest_domain_type), intent(inout) :: nest_domain
integer(8), intent(inout) :: wbuffer(:,:)
integer(8), intent(inout) :: ebuffer(:,:)
integer(8), intent(inout) :: sbuffer(:,:)
integer(8), intent(inout) :: nbuffer(:,:)
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: extra_halo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(8) :: field3D(size(field,1),size(field,2),1)
integer(8) :: wbuffer3D(size(wbuffer,1),size(wbuffer,2),1)
integer(8) :: ebuffer3D(size(ebuffer,1),size(ebuffer,2),1)
integer(8) :: sbuffer3D(size(sbuffer,1),size(sbuffer,2),1)
integer(8) :: nbuffer3D(size(nbuffer,1),size(nbuffer,2),1)
pointer( ptr, field3D )
pointer( ptr_w, wbuffer3D)
pointer( ptr_e, ebuffer3D)
pointer( ptr_s, sbuffer3D)
pointer( ptr_n, nbuffer3D)
ptr = LOC(field)
ptr_w = LOC(wbuffer)
ptr_e = LOC(ebuffer)
ptr_s = LOC(sbuffer)
ptr_n = LOC(nbuffer)
call mpp_update_nest_fine( field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &
flags, complete, position, extra_halo, name, tile_count)
return
end subroutine mpp_update_nest_fine_i8_2D
subroutine mpp_update_nest_fine_i8_3D(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &
flags, complete, position, extra_halo, name, tile_count)
integer(8), intent(in) :: field(:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
integer(8), intent(inout) :: wbuffer(:,:,:)
integer(8), intent(inout) :: ebuffer(:,:,:)
integer(8), intent(inout) :: sbuffer(:,:,:)
integer(8), intent(inout) :: nbuffer(:,:,:)
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: extra_halo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(8) :: d_type
type(nestSpec), pointer :: update=>NULL()
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: f_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: wb_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: eb_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: sb_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: nb_addrs=-9999
character(len=3) :: text
logical :: is_complete, set_mismatch
integer :: tile
integer :: add_halo, update_flags, update_position
integer :: wbuffersz, ebuffersz, sbuffersz, nbuffersz
integer :: isize, jsize, ksize, l_size
integer, save :: isize_save=0, jsize_save=0, ksize_save=0
integer :: wbuffersz_save=0, ebuffersz_save=0, sbuffersz_save=0, nbuffersz_save=0
integer, save :: add_halo_save=0, update_flags_save=0, update_position_save=0
integer, save :: list=0
add_halo = 0
if(present(extra_halo)) add_halo = add_halo
update_position = CENTER
if(present(position)) update_position = position
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(present(tile_count)) tile = tile_count
if( tile > 1 ) then
call mpp_error(FATAL,'MPP_UPDATE_NEST_FINE_3D: currently do not support multiple tile per pe')
endif
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_NEST_FINE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list) = LOC(field)
wb_addrs(list) = LOC(wbuffer)
eb_addrs(list) = LOC(ebuffer)
sb_addrs(list) = LOC(sbuffer)
nb_addrs(list) = LOC(nbuffer)
wbuffersz = size(wbuffer); ebuffersz = size(ebuffer)
sbuffersz = size(sbuffer); nbuffersz = size(nbuffer)
isize=size(field,1); jsize=size(field,2); ksize = size(field,3)
if(list == 1)then
isize_save = isize; jsize_save = jsize; ksize_save = ksize
update_position_save = update_position
update_flags_save = update_flags
wbuffersz_save = wbuffersz; ebuffersz_save = ebuffersz
sbuffersz_save = sbuffersz; nbuffersz_save = nbuffersz
add_halo_save = add_halo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize_save /= isize)
set_mismatch = set_mismatch .OR. (jsize_save /= jsize)
set_mismatch = set_mismatch .OR. (ksize_save /= ksize)
set_mismatch = set_mismatch .OR. (update_position_save /= update_position)
set_mismatch = set_mismatch .OR. (wbuffersz_save /= wbuffersz)
set_mismatch = set_mismatch .OR. (ebuffersz_save /= ebuffersz)
set_mismatch = set_mismatch .OR. (sbuffersz_save /= sbuffersz)
set_mismatch = set_mismatch .OR. (nbuffersz_save /= nbuffersz)
set_mismatch = set_mismatch .OR. (update_flags_save /= update_flags)
set_mismatch = set_mismatch .OR. (add_halo_save /= add_halo)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_NEST_FINE_3D_: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(is_complete)then
update => search_C2F_nest_overlap(nest_domain, add_halo, update_position)
call mpp_do_update_nest_fine(f_addrs(1:l_size), nest_domain, update, d_type, ksize, &
wb_addrs(1:l_size), eb_addrs(1:l_size), sb_addrs(1:l_size), nb_addrs(1:l_size), update_flags )
endif
end subroutine mpp_update_nest_fine_i8_3D
!###############################################################################
subroutine mpp_update_nest_fine_i8_4D(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &
flags, complete, position, extra_halo, name, tile_count)
integer(8), intent(in) :: field(:,:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
integer(8), intent(inout) :: wbuffer(:,:,:,:)
integer(8), intent(inout) :: ebuffer(:,:,:,:)
integer(8), intent(inout) :: sbuffer(:,:,:,:)
integer(8), intent(inout) :: nbuffer(:,:,:,:)
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: extra_halo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
integer(8) :: wbuffer3D(size(wbuffer,1),size(wbuffer,2),size(wbuffer,3)*size(wbuffer,4))
integer(8) :: ebuffer3D(size(ebuffer,1),size(ebuffer,2),size(ebuffer,3)*size(ebuffer,4))
integer(8) :: sbuffer3D(size(sbuffer,1),size(sbuffer,2),size(sbuffer,3)*size(sbuffer,4))
integer(8) :: nbuffer3D(size(nbuffer,1),size(nbuffer,2),size(nbuffer,3)*size(nbuffer,4))
pointer( ptr, field3D )
pointer( ptr_w, wbuffer3D)
pointer( ptr_e, ebuffer3D)
pointer( ptr_s, sbuffer3D)
pointer( ptr_n, nbuffer3D)
ptr = LOC(field)
ptr_w = LOC(wbuffer)
ptr_e = LOC(ebuffer)
ptr_s = LOC(sbuffer)
ptr_n = LOC(nbuffer)
call mpp_update_nest_fine( field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &
flags, complete, position, extra_halo, name, tile_count)
return
end subroutine mpp_update_nest_fine_i8_4D
subroutine mpp_update_nest_coarse_i8_2D(field, nest_domain, buffer, complete, position, name, tile_count)
integer(8), intent(in) :: field(:,:)
type(nest_domain_type), intent(inout) :: nest_domain
integer(8), intent(inout) :: buffer(:,:)
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(8) :: field3D(size(field,1),size(field,2),1)
integer(8) :: buffer3D(size(buffer,1),size(buffer,2),1)
pointer( ptr, field3D )
pointer( ptr_b, buffer3D)
ptr = LOC(field)
ptr_b = LOC(buffer)
call mpp_update_nest_coarse( field3D, nest_domain, buffer3D, complete, position, name, tile_count)
return
end subroutine mpp_update_nest_coarse_i8_2D
subroutine mpp_update_nest_coarse_i8_3D(field, nest_domain, buffer, complete, position, name, tile_count)
integer(8), intent(in) :: field(:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
integer(8), intent(inout) :: buffer(:,:,:)
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(8) :: d_type
type(nestSpec), pointer :: update=>NULL()
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: f_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: b_addrs=-9999
character(len=3) :: text
logical :: is_complete, set_mismatch
integer :: tile
integer :: update_position
integer :: buffersz, buffersz_save=0
integer :: isize, jsize, ksize, l_size
integer, save :: isize_save=0, jsize_save=0, ksize_save=0
integer, save :: update_position_save=0
integer, save :: list=0
update_position = CENTER
if(present(position)) update_position = position
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(present(tile_count)) tile = tile_count
if( tile > 1 ) then
call mpp_error(FATAL,'MPP_UPDATE_NEST_COARSE_3D: currently do not support multiple tile per pe')
endif
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_NEST_COARSE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list) = LOC(field)
b_addrs(list) = LOC(buffer)
buffersz = size(buffer)
isize=size(field,1); jsize=size(field,2); ksize = size(field,3)
if(list == 1)then
isize_save = isize; jsize_save = jsize; ksize_save = ksize
update_position_save = update_position
buffersz_save = buffersz
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize_save /= isize)
set_mismatch = set_mismatch .OR. (jsize_save /= jsize)
set_mismatch = set_mismatch .OR. (ksize_save /= ksize)
set_mismatch = set_mismatch .OR. (update_position_save /= update_position)
set_mismatch = set_mismatch .OR. (buffersz_save /= buffersz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_NEST_COARSE_3D_: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(is_complete)then
update => search_F2C_nest_overlap(nest_domain, update_position)
call mpp_do_update_nest_coarse(f_addrs(1:l_size), nest_domain, update, d_type, ksize, &
b_addrs(1:l_size))
endif
end subroutine mpp_update_nest_coarse_i8_3D
!###############################################################################
subroutine mpp_update_nest_coarse_i8_4D(field, nest_domain, buffer, complete, position, name, tile_count)
integer(8), intent(in) :: field(:,:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
integer(8), intent(inout) :: buffer(:,:,:,:)
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
integer(8) :: buffer3D(size(buffer,1),size(buffer,2),size(buffer,3)*size(buffer,4))
pointer( ptr, field3D )
pointer( ptr_b, buffer3D)
ptr = LOC(field)
ptr_b = LOC(buffer)
call mpp_update_nest_coarse( field3D, nest_domain, buffer3D, complete, position, name, tile_count)
return
end subroutine mpp_update_nest_coarse_i8_4D
# 1342 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_update_nest_domains.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_update_nest_fine_r4_2D(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &
flags, complete, position, extra_halo, name, tile_count)
real(4), intent(in) :: field(:,:)
type(nest_domain_type), intent(inout) :: nest_domain
real(4), intent(inout) :: wbuffer(:,:)
real(4), intent(inout) :: ebuffer(:,:)
real(4), intent(inout) :: sbuffer(:,:)
real(4), intent(inout) :: nbuffer(:,:)
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: extra_halo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3D(size(field,1),size(field,2),1)
real(4) :: wbuffer3D(size(wbuffer,1),size(wbuffer,2),1)
real(4) :: ebuffer3D(size(ebuffer,1),size(ebuffer,2),1)
real(4) :: sbuffer3D(size(sbuffer,1),size(sbuffer,2),1)
real(4) :: nbuffer3D(size(nbuffer,1),size(nbuffer,2),1)
pointer( ptr, field3D )
pointer( ptr_w, wbuffer3D)
pointer( ptr_e, ebuffer3D)
pointer( ptr_s, sbuffer3D)
pointer( ptr_n, nbuffer3D)
ptr = LOC(field)
ptr_w = LOC(wbuffer)
ptr_e = LOC(ebuffer)
ptr_s = LOC(sbuffer)
ptr_n = LOC(nbuffer)
call mpp_update_nest_fine( field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &
flags, complete, position, extra_halo, name, tile_count)
return
end subroutine mpp_update_nest_fine_r4_2D
subroutine mpp_update_nest_fine_r4_3D(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &
flags, complete, position, extra_halo, name, tile_count)
real(4), intent(in) :: field(:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
real(4), intent(inout) :: wbuffer(:,:,:)
real(4), intent(inout) :: ebuffer(:,:,:)
real(4), intent(inout) :: sbuffer(:,:,:)
real(4), intent(inout) :: nbuffer(:,:,:)
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: extra_halo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: d_type
type(nestSpec), pointer :: update=>NULL()
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: f_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: wb_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: eb_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: sb_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: nb_addrs=-9999
character(len=3) :: text
logical :: is_complete, set_mismatch
integer :: tile
integer :: add_halo, update_flags, update_position
integer :: wbuffersz, ebuffersz, sbuffersz, nbuffersz
integer :: isize, jsize, ksize, l_size
integer, save :: isize_save=0, jsize_save=0, ksize_save=0
integer :: wbuffersz_save=0, ebuffersz_save=0, sbuffersz_save=0, nbuffersz_save=0
integer, save :: add_halo_save=0, update_flags_save=0, update_position_save=0
integer, save :: list=0
add_halo = 0
if(present(extra_halo)) add_halo = add_halo
update_position = CENTER
if(present(position)) update_position = position
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(present(tile_count)) tile = tile_count
if( tile > 1 ) then
call mpp_error(FATAL,'MPP_UPDATE_NEST_FINE_3D: currently do not support multiple tile per pe')
endif
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_NEST_FINE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list) = LOC(field)
wb_addrs(list) = LOC(wbuffer)
eb_addrs(list) = LOC(ebuffer)
sb_addrs(list) = LOC(sbuffer)
nb_addrs(list) = LOC(nbuffer)
wbuffersz = size(wbuffer); ebuffersz = size(ebuffer)
sbuffersz = size(sbuffer); nbuffersz = size(nbuffer)
isize=size(field,1); jsize=size(field,2); ksize = size(field,3)
if(list == 1)then
isize_save = isize; jsize_save = jsize; ksize_save = ksize
update_position_save = update_position
update_flags_save = update_flags
wbuffersz_save = wbuffersz; ebuffersz_save = ebuffersz
sbuffersz_save = sbuffersz; nbuffersz_save = nbuffersz
add_halo_save = add_halo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize_save /= isize)
set_mismatch = set_mismatch .OR. (jsize_save /= jsize)
set_mismatch = set_mismatch .OR. (ksize_save /= ksize)
set_mismatch = set_mismatch .OR. (update_position_save /= update_position)
set_mismatch = set_mismatch .OR. (wbuffersz_save /= wbuffersz)
set_mismatch = set_mismatch .OR. (ebuffersz_save /= ebuffersz)
set_mismatch = set_mismatch .OR. (sbuffersz_save /= sbuffersz)
set_mismatch = set_mismatch .OR. (nbuffersz_save /= nbuffersz)
set_mismatch = set_mismatch .OR. (update_flags_save /= update_flags)
set_mismatch = set_mismatch .OR. (add_halo_save /= add_halo)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_NEST_FINE_3D_: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(is_complete)then
update => search_C2F_nest_overlap(nest_domain, add_halo, update_position)
call mpp_do_update_nest_fine(f_addrs(1:l_size), nest_domain, update, d_type, ksize, &
wb_addrs(1:l_size), eb_addrs(1:l_size), sb_addrs(1:l_size), nb_addrs(1:l_size), update_flags )
endif
end subroutine mpp_update_nest_fine_r4_3D
!###############################################################################
subroutine mpp_update_nest_fine_r4_4D(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &
flags, complete, position, extra_halo, name, tile_count)
real(4), intent(in) :: field(:,:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
real(4), intent(inout) :: wbuffer(:,:,:,:)
real(4), intent(inout) :: ebuffer(:,:,:,:)
real(4), intent(inout) :: sbuffer(:,:,:,:)
real(4), intent(inout) :: nbuffer(:,:,:,:)
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: extra_halo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
real(4) :: wbuffer3D(size(wbuffer,1),size(wbuffer,2),size(wbuffer,3)*size(wbuffer,4))
real(4) :: ebuffer3D(size(ebuffer,1),size(ebuffer,2),size(ebuffer,3)*size(ebuffer,4))
real(4) :: sbuffer3D(size(sbuffer,1),size(sbuffer,2),size(sbuffer,3)*size(sbuffer,4))
real(4) :: nbuffer3D(size(nbuffer,1),size(nbuffer,2),size(nbuffer,3)*size(nbuffer,4))
pointer( ptr, field3D )
pointer( ptr_w, wbuffer3D)
pointer( ptr_e, ebuffer3D)
pointer( ptr_s, sbuffer3D)
pointer( ptr_n, nbuffer3D)
ptr = LOC(field)
ptr_w = LOC(wbuffer)
ptr_e = LOC(ebuffer)
ptr_s = LOC(sbuffer)
ptr_n = LOC(nbuffer)
call mpp_update_nest_fine( field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &
flags, complete, position, extra_halo, name, tile_count)
return
end subroutine mpp_update_nest_fine_r4_4D
subroutine mpp_update_nest_coarse_r4_2D(field, nest_domain, buffer, complete, position, name, tile_count)
real(4), intent(in) :: field(:,:)
type(nest_domain_type), intent(inout) :: nest_domain
real(4), intent(inout) :: buffer(:,:)
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3D(size(field,1),size(field,2),1)
real(4) :: buffer3D(size(buffer,1),size(buffer,2),1)
pointer( ptr, field3D )
pointer( ptr_b, buffer3D)
ptr = LOC(field)
ptr_b = LOC(buffer)
call mpp_update_nest_coarse( field3D, nest_domain, buffer3D, complete, position, name, tile_count)
return
end subroutine mpp_update_nest_coarse_r4_2D
subroutine mpp_update_nest_coarse_r4_3D(field, nest_domain, buffer, complete, position, name, tile_count)
real(4), intent(in) :: field(:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
real(4), intent(inout) :: buffer(:,:,:)
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: d_type
type(nestSpec), pointer :: update=>NULL()
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: f_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: b_addrs=-9999
character(len=3) :: text
logical :: is_complete, set_mismatch
integer :: tile
integer :: update_position
integer :: buffersz, buffersz_save=0
integer :: isize, jsize, ksize, l_size
integer, save :: isize_save=0, jsize_save=0, ksize_save=0
integer, save :: update_position_save=0
integer, save :: list=0
update_position = CENTER
if(present(position)) update_position = position
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(present(tile_count)) tile = tile_count
if( tile > 1 ) then
call mpp_error(FATAL,'MPP_UPDATE_NEST_COARSE_3D: currently do not support multiple tile per pe')
endif
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_NEST_COARSE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list) = LOC(field)
b_addrs(list) = LOC(buffer)
buffersz = size(buffer)
isize=size(field,1); jsize=size(field,2); ksize = size(field,3)
if(list == 1)then
isize_save = isize; jsize_save = jsize; ksize_save = ksize
update_position_save = update_position
buffersz_save = buffersz
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize_save /= isize)
set_mismatch = set_mismatch .OR. (jsize_save /= jsize)
set_mismatch = set_mismatch .OR. (ksize_save /= ksize)
set_mismatch = set_mismatch .OR. (update_position_save /= update_position)
set_mismatch = set_mismatch .OR. (buffersz_save /= buffersz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_NEST_COARSE_3D_: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(is_complete)then
update => search_F2C_nest_overlap(nest_domain, update_position)
call mpp_do_update_nest_coarse(f_addrs(1:l_size), nest_domain, update, d_type, ksize, &
b_addrs(1:l_size))
endif
end subroutine mpp_update_nest_coarse_r4_3D
!###############################################################################
subroutine mpp_update_nest_coarse_r4_4D(field, nest_domain, buffer, complete, position, name, tile_count)
real(4), intent(in) :: field(:,:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
real(4), intent(inout) :: buffer(:,:,:,:)
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
real(4) :: buffer3D(size(buffer,1),size(buffer,2),size(buffer,3)*size(buffer,4))
pointer( ptr, field3D )
pointer( ptr_b, buffer3D)
ptr = LOC(field)
ptr_b = LOC(buffer)
call mpp_update_nest_coarse( field3D, nest_domain, buffer3D, complete, position, name, tile_count)
return
end subroutine mpp_update_nest_coarse_r4_4D
# 1362 "../mpp/include/mpp_domains_misc.inc" 2
# 1382
# 1 "../mpp/include/mpp_update_nest_domains.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_update_nest_fine_i4_2D(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &
flags, complete, position, extra_halo, name, tile_count)
integer(4), intent(in) :: field(:,:)
type(nest_domain_type), intent(inout) :: nest_domain
integer(4), intent(inout) :: wbuffer(:,:)
integer(4), intent(inout) :: ebuffer(:,:)
integer(4), intent(inout) :: sbuffer(:,:)
integer(4), intent(inout) :: nbuffer(:,:)
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: extra_halo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(4) :: field3D(size(field,1),size(field,2),1)
integer(4) :: wbuffer3D(size(wbuffer,1),size(wbuffer,2),1)
integer(4) :: ebuffer3D(size(ebuffer,1),size(ebuffer,2),1)
integer(4) :: sbuffer3D(size(sbuffer,1),size(sbuffer,2),1)
integer(4) :: nbuffer3D(size(nbuffer,1),size(nbuffer,2),1)
pointer( ptr, field3D )
pointer( ptr_w, wbuffer3D)
pointer( ptr_e, ebuffer3D)
pointer( ptr_s, sbuffer3D)
pointer( ptr_n, nbuffer3D)
ptr = LOC(field)
ptr_w = LOC(wbuffer)
ptr_e = LOC(ebuffer)
ptr_s = LOC(sbuffer)
ptr_n = LOC(nbuffer)
call mpp_update_nest_fine( field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &
flags, complete, position, extra_halo, name, tile_count)
return
end subroutine mpp_update_nest_fine_i4_2D
subroutine mpp_update_nest_fine_i4_3D(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &
flags, complete, position, extra_halo, name, tile_count)
integer(4), intent(in) :: field(:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
integer(4), intent(inout) :: wbuffer(:,:,:)
integer(4), intent(inout) :: ebuffer(:,:,:)
integer(4), intent(inout) :: sbuffer(:,:,:)
integer(4), intent(inout) :: nbuffer(:,:,:)
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: extra_halo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(4) :: d_type
type(nestSpec), pointer :: update=>NULL()
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: f_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: wb_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: eb_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: sb_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: nb_addrs=-9999
character(len=3) :: text
logical :: is_complete, set_mismatch
integer :: tile
integer :: add_halo, update_flags, update_position
integer :: wbuffersz, ebuffersz, sbuffersz, nbuffersz
integer :: isize, jsize, ksize, l_size
integer, save :: isize_save=0, jsize_save=0, ksize_save=0
integer :: wbuffersz_save=0, ebuffersz_save=0, sbuffersz_save=0, nbuffersz_save=0
integer, save :: add_halo_save=0, update_flags_save=0, update_position_save=0
integer, save :: list=0
add_halo = 0
if(present(extra_halo)) add_halo = add_halo
update_position = CENTER
if(present(position)) update_position = position
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(present(tile_count)) tile = tile_count
if( tile > 1 ) then
call mpp_error(FATAL,'MPP_UPDATE_NEST_FINE_3D: currently do not support multiple tile per pe')
endif
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_NEST_FINE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list) = LOC(field)
wb_addrs(list) = LOC(wbuffer)
eb_addrs(list) = LOC(ebuffer)
sb_addrs(list) = LOC(sbuffer)
nb_addrs(list) = LOC(nbuffer)
wbuffersz = size(wbuffer); ebuffersz = size(ebuffer)
sbuffersz = size(sbuffer); nbuffersz = size(nbuffer)
isize=size(field,1); jsize=size(field,2); ksize = size(field,3)
if(list == 1)then
isize_save = isize; jsize_save = jsize; ksize_save = ksize
update_position_save = update_position
update_flags_save = update_flags
wbuffersz_save = wbuffersz; ebuffersz_save = ebuffersz
sbuffersz_save = sbuffersz; nbuffersz_save = nbuffersz
add_halo_save = add_halo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize_save /= isize)
set_mismatch = set_mismatch .OR. (jsize_save /= jsize)
set_mismatch = set_mismatch .OR. (ksize_save /= ksize)
set_mismatch = set_mismatch .OR. (update_position_save /= update_position)
set_mismatch = set_mismatch .OR. (wbuffersz_save /= wbuffersz)
set_mismatch = set_mismatch .OR. (ebuffersz_save /= ebuffersz)
set_mismatch = set_mismatch .OR. (sbuffersz_save /= sbuffersz)
set_mismatch = set_mismatch .OR. (nbuffersz_save /= nbuffersz)
set_mismatch = set_mismatch .OR. (update_flags_save /= update_flags)
set_mismatch = set_mismatch .OR. (add_halo_save /= add_halo)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_NEST_FINE_3D_: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(is_complete)then
update => search_C2F_nest_overlap(nest_domain, add_halo, update_position)
call mpp_do_update_nest_fine(f_addrs(1:l_size), nest_domain, update, d_type, ksize, &
wb_addrs(1:l_size), eb_addrs(1:l_size), sb_addrs(1:l_size), nb_addrs(1:l_size), update_flags )
endif
end subroutine mpp_update_nest_fine_i4_3D
!###############################################################################
subroutine mpp_update_nest_fine_i4_4D(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &
flags, complete, position, extra_halo, name, tile_count)
integer(4), intent(in) :: field(:,:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
integer(4), intent(inout) :: wbuffer(:,:,:,:)
integer(4), intent(inout) :: ebuffer(:,:,:,:)
integer(4), intent(inout) :: sbuffer(:,:,:,:)
integer(4), intent(inout) :: nbuffer(:,:,:,:)
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: extra_halo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
integer(4) :: wbuffer3D(size(wbuffer,1),size(wbuffer,2),size(wbuffer,3)*size(wbuffer,4))
integer(4) :: ebuffer3D(size(ebuffer,1),size(ebuffer,2),size(ebuffer,3)*size(ebuffer,4))
integer(4) :: sbuffer3D(size(sbuffer,1),size(sbuffer,2),size(sbuffer,3)*size(sbuffer,4))
integer(4) :: nbuffer3D(size(nbuffer,1),size(nbuffer,2),size(nbuffer,3)*size(nbuffer,4))
pointer( ptr, field3D )
pointer( ptr_w, wbuffer3D)
pointer( ptr_e, ebuffer3D)
pointer( ptr_s, sbuffer3D)
pointer( ptr_n, nbuffer3D)
ptr = LOC(field)
ptr_w = LOC(wbuffer)
ptr_e = LOC(ebuffer)
ptr_s = LOC(sbuffer)
ptr_n = LOC(nbuffer)
call mpp_update_nest_fine( field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &
flags, complete, position, extra_halo, name, tile_count)
return
end subroutine mpp_update_nest_fine_i4_4D
subroutine mpp_update_nest_coarse_i4_2D(field, nest_domain, buffer, complete, position, name, tile_count)
integer(4), intent(in) :: field(:,:)
type(nest_domain_type), intent(inout) :: nest_domain
integer(4), intent(inout) :: buffer(:,:)
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(4) :: field3D(size(field,1),size(field,2),1)
integer(4) :: buffer3D(size(buffer,1),size(buffer,2),1)
pointer( ptr, field3D )
pointer( ptr_b, buffer3D)
ptr = LOC(field)
ptr_b = LOC(buffer)
call mpp_update_nest_coarse( field3D, nest_domain, buffer3D, complete, position, name, tile_count)
return
end subroutine mpp_update_nest_coarse_i4_2D
subroutine mpp_update_nest_coarse_i4_3D(field, nest_domain, buffer, complete, position, name, tile_count)
integer(4), intent(in) :: field(:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
integer(4), intent(inout) :: buffer(:,:,:)
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(4) :: d_type
type(nestSpec), pointer :: update=>NULL()
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: f_addrs=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS),save :: b_addrs=-9999
character(len=3) :: text
logical :: is_complete, set_mismatch
integer :: tile
integer :: update_position
integer :: buffersz, buffersz_save=0
integer :: isize, jsize, ksize, l_size
integer, save :: isize_save=0, jsize_save=0, ksize_save=0
integer, save :: update_position_save=0
integer, save :: list=0
update_position = CENTER
if(present(position)) update_position = position
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(present(tile_count)) tile = tile_count
if( tile > 1 ) then
call mpp_error(FATAL,'MPP_UPDATE_NEST_COARSE_3D: currently do not support multiple tile per pe')
endif
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_NEST_COARSE_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list) = LOC(field)
b_addrs(list) = LOC(buffer)
buffersz = size(buffer)
isize=size(field,1); jsize=size(field,2); ksize = size(field,3)
if(list == 1)then
isize_save = isize; jsize_save = jsize; ksize_save = ksize
update_position_save = update_position
buffersz_save = buffersz
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize_save /= isize)
set_mismatch = set_mismatch .OR. (jsize_save /= jsize)
set_mismatch = set_mismatch .OR. (ksize_save /= ksize)
set_mismatch = set_mismatch .OR. (update_position_save /= update_position)
set_mismatch = set_mismatch .OR. (buffersz_save /= buffersz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_NEST_COARSE_3D_: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(is_complete)then
update => search_F2C_nest_overlap(nest_domain, update_position)
call mpp_do_update_nest_coarse(f_addrs(1:l_size), nest_domain, update, d_type, ksize, &
b_addrs(1:l_size))
endif
end subroutine mpp_update_nest_coarse_i4_3D
!###############################################################################
subroutine mpp_update_nest_coarse_i4_4D(field, nest_domain, buffer, complete, position, name, tile_count)
integer(4), intent(in) :: field(:,:,:,:)
type(nest_domain_type), intent(inout) :: nest_domain
integer(4), intent(inout) :: buffer(:,:,:,:)
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
integer(4) :: buffer3D(size(buffer,1),size(buffer,2),size(buffer,3)*size(buffer,4))
pointer( ptr, field3D )
pointer( ptr_b, buffer3D)
ptr = LOC(field)
ptr_b = LOC(buffer)
call mpp_update_nest_coarse( field3D, nest_domain, buffer3D, complete, position, name, tile_count)
return
end subroutine mpp_update_nest_coarse_i4_4D
# 1401 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_update_nest.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_nest_fine_r8_3D(f_addrs, nest_domain, update, d_type, ke, wb_addrs, eb_addrs, &
sb_addrs, nb_addrs, flags)
integer(8), intent(in) :: f_addrs(:)
type(nest_domain_type), intent(in) :: nest_domain
type(nestSpec), intent(in) :: update
real(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer(8), intent(in) :: wb_addrs(:)
integer(8), intent(in) :: eb_addrs(:)
integer(8), intent(in) :: sb_addrs(:)
integer(8), intent(in) :: nb_addrs(:)
integer, intent(in) :: flags
character(len=8) :: text
type(overlap_type), pointer :: overPtr => NULL()
logical :: send(8), recv(8), update_edge_only
integer :: from_pe, to_pe, dir
integer :: m, n, l, i, j, k
integer :: is, ie, js, je, l_size
integer :: buffer_pos, msgsize
integer :: buffer_recv_size, pos
real(8) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
real(8) :: wbuffer(update%west%is_you :update%west%ie_you, update%west%js_you :update%west%je_you, ke)
real(8) :: ebuffer(update%east%is_you :update%east%ie_you, update%east%js_you :update%east%je_you, ke)
real(8) :: sbuffer(update%south%is_you:update%south%ie_you, update%south%js_you:update%south%je_you,ke)
real(8) :: nbuffer(update%north%is_you:update%north%ie_you, update%north%js_you:update%north%je_you,ke)
real(8) :: buffer(size(mpp_domains_stack(:)))
pointer(ptr_field, field)
pointer(ptr_buffer, buffer )
pointer(ptr_wbuffer, wbuffer)
pointer(ptr_ebuffer, ebuffer)
pointer(ptr_sbuffer, sbuffer)
pointer(ptr_nbuffer, nbuffer)
update_edge_only = BTEST(flags, EDGEONLY)
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
ptr_buffer = LOC(mpp_domains_stack)
l_size = size(f_addrs(:))
!--- pre-post receiving
buffer_pos = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_recv_clock)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = overPtr%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_FINE_3D_: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
call mpp_clock_end(nest_recv_clock)
end do ! end do m = 1, update%nrecv
buffer_recv_size = buffer_pos
!--- pack and send the data
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_pack_clock)
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_FINE_3D_: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
end if
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
endif
end do ! do n = 1, overPtr%count
call mpp_clock_end(nest_pack_clock)
call mpp_clock_begin(nest_send_clock)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
to_pe = overPtr%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = pos
end if
call mpp_clock_end(nest_send_clock)
end do ! end do list = 0,nlist-1
!unpack buffer
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(nest_wait_clock)
buffer_pos = buffer_recv_size
call mpp_clock_begin(nest_unpk_clock)
do m = update%nrecv, 1, -1
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
do n = overPtr%count, 1, -1
dir = overPtr%dir(n)
if( recv(dir) ) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
select case (dir)
case ( 1 ) ! east
do l=1,l_size ! loop over number of fields
ptr_ebuffer = eb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
ebuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
case ( 3 ) ! south
do l=1,l_size ! loop over number of fields
ptr_sbuffer = sb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
sbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
case ( 5 ) ! west
do l=1,l_size ! loop over number of fields
ptr_wbuffer = wb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
wbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
case ( 7 ) ! north
do l=1,l_size ! loop over number of fields
ptr_nbuffer = nb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
nbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, overPtr%count
end do
call mpp_clock_end(nest_unpk_clock)
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self( )
call mpp_clock_end(nest_wait_clock)
return
end subroutine mpp_do_update_nest_fine_r8_3D
!###############################################################################
subroutine mpp_do_update_nest_coarse_r8_3D(f_addrs, nest_domain, update, d_type, ke, b_addrs)
integer(8), intent(in) :: f_addrs(:)
type(nest_domain_type), intent(in) :: nest_domain
type(nestSpec), intent(in) :: update
real(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer(8), intent(in) :: b_addrs(:)
character(len=8) :: text
type(overlap_type), pointer :: overPtr => NULL()
integer :: from_pe, to_pe
integer :: m, n, l, i, j, k
integer :: is, ie, js, je, l_size
integer :: buffer_pos, msgsize
integer :: buffer_recv_size, pos
real(8) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
real(8) :: fillbuffer(update%center%is_you:update%center%ie_you, update%center%js_you :update%center%je_you, ke)
real(8) :: buffer(size(mpp_domains_stack(:)))
pointer(ptr_field, field)
pointer(ptr_buffer, buffer )
pointer(ptr_fillbuffer, fillbuffer)
ptr_buffer = LOC(mpp_domains_stack)
l_size = size(f_addrs(:))
!--- pre-post receiving
buffer_pos = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_recv_clock)
msgsize = 0
do n = 1, overPtr%count
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = overPtr%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_COARSE_3D_: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
call mpp_clock_end(nest_recv_clock)
end do ! end do m = 1, update%nrecv
buffer_recv_size = buffer_pos
!--- pack and send the data
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_pack_clock)
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_COARSE_3D_: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
end if
do n = 1, overPtr%count
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end do ! do n = 1, overPtr%count
call mpp_clock_end(nest_pack_clock)
call mpp_clock_begin(nest_send_clock)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
to_pe = overPtr%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = pos
end if
call mpp_clock_end(nest_send_clock)
end do ! end do list = 0,nlist-1
!unpack buffer
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(nest_wait_clock)
buffer_pos = buffer_recv_size
call mpp_clock_begin(nest_unpk_clock)
do m = update%nrecv, 1, -1
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
do n = overPtr%count, 1, -1
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_fillbuffer = b_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
fillbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
end do ! do n = 1, overPtr%count
end do
call mpp_clock_end(nest_unpk_clock)
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self( )
call mpp_clock_end(nest_wait_clock)
return
end subroutine mpp_do_update_nest_coarse_r8_3D
# 1411 "../mpp/include/mpp_domains_misc.inc" 2
# 1422
# 1 "../mpp/include/mpp_do_update_nest.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_nest_fine_i8_3D(f_addrs, nest_domain, update, d_type, ke, wb_addrs, eb_addrs, &
sb_addrs, nb_addrs, flags)
integer(8), intent(in) :: f_addrs(:)
type(nest_domain_type), intent(in) :: nest_domain
type(nestSpec), intent(in) :: update
integer(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer(8), intent(in) :: wb_addrs(:)
integer(8), intent(in) :: eb_addrs(:)
integer(8), intent(in) :: sb_addrs(:)
integer(8), intent(in) :: nb_addrs(:)
integer, intent(in) :: flags
character(len=8) :: text
type(overlap_type), pointer :: overPtr => NULL()
logical :: send(8), recv(8), update_edge_only
integer :: from_pe, to_pe, dir
integer :: m, n, l, i, j, k
integer :: is, ie, js, je, l_size
integer :: buffer_pos, msgsize
integer :: buffer_recv_size, pos
integer(8) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
integer(8) :: wbuffer(update%west%is_you :update%west%ie_you, update%west%js_you :update%west%je_you, ke)
integer(8) :: ebuffer(update%east%is_you :update%east%ie_you, update%east%js_you :update%east%je_you, ke)
integer(8) :: sbuffer(update%south%is_you:update%south%ie_you, update%south%js_you:update%south%je_you,ke)
integer(8) :: nbuffer(update%north%is_you:update%north%ie_you, update%north%js_you:update%north%je_you,ke)
integer(8) :: buffer(size(mpp_domains_stack(:)))
pointer(ptr_field, field)
pointer(ptr_buffer, buffer )
pointer(ptr_wbuffer, wbuffer)
pointer(ptr_ebuffer, ebuffer)
pointer(ptr_sbuffer, sbuffer)
pointer(ptr_nbuffer, nbuffer)
update_edge_only = BTEST(flags, EDGEONLY)
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
ptr_buffer = LOC(mpp_domains_stack)
l_size = size(f_addrs(:))
!--- pre-post receiving
buffer_pos = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_recv_clock)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = overPtr%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_FINE_3D_: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
call mpp_clock_end(nest_recv_clock)
end do ! end do m = 1, update%nrecv
buffer_recv_size = buffer_pos
!--- pack and send the data
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_pack_clock)
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_FINE_3D_: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
end if
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
endif
end do ! do n = 1, overPtr%count
call mpp_clock_end(nest_pack_clock)
call mpp_clock_begin(nest_send_clock)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
to_pe = overPtr%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = pos
end if
call mpp_clock_end(nest_send_clock)
end do ! end do list = 0,nlist-1
!unpack buffer
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(nest_wait_clock)
buffer_pos = buffer_recv_size
call mpp_clock_begin(nest_unpk_clock)
do m = update%nrecv, 1, -1
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
do n = overPtr%count, 1, -1
dir = overPtr%dir(n)
if( recv(dir) ) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
select case (dir)
case ( 1 ) ! east
do l=1,l_size ! loop over number of fields
ptr_ebuffer = eb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
ebuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
case ( 3 ) ! south
do l=1,l_size ! loop over number of fields
ptr_sbuffer = sb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
sbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
case ( 5 ) ! west
do l=1,l_size ! loop over number of fields
ptr_wbuffer = wb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
wbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
case ( 7 ) ! north
do l=1,l_size ! loop over number of fields
ptr_nbuffer = nb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
nbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, overPtr%count
end do
call mpp_clock_end(nest_unpk_clock)
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self( )
call mpp_clock_end(nest_wait_clock)
return
end subroutine mpp_do_update_nest_fine_i8_3D
!###############################################################################
subroutine mpp_do_update_nest_coarse_i8_3D(f_addrs, nest_domain, update, d_type, ke, b_addrs)
integer(8), intent(in) :: f_addrs(:)
type(nest_domain_type), intent(in) :: nest_domain
type(nestSpec), intent(in) :: update
integer(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer(8), intent(in) :: b_addrs(:)
character(len=8) :: text
type(overlap_type), pointer :: overPtr => NULL()
integer :: from_pe, to_pe
integer :: m, n, l, i, j, k
integer :: is, ie, js, je, l_size
integer :: buffer_pos, msgsize
integer :: buffer_recv_size, pos
integer(8) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
integer(8) :: fillbuffer(update%center%is_you:update%center%ie_you, update%center%js_you :update%center%je_you, ke)
integer(8) :: buffer(size(mpp_domains_stack(:)))
pointer(ptr_field, field)
pointer(ptr_buffer, buffer )
pointer(ptr_fillbuffer, fillbuffer)
ptr_buffer = LOC(mpp_domains_stack)
l_size = size(f_addrs(:))
!--- pre-post receiving
buffer_pos = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_recv_clock)
msgsize = 0
do n = 1, overPtr%count
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = overPtr%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_COARSE_3D_: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
call mpp_clock_end(nest_recv_clock)
end do ! end do m = 1, update%nrecv
buffer_recv_size = buffer_pos
!--- pack and send the data
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_pack_clock)
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_COARSE_3D_: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
end if
do n = 1, overPtr%count
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end do ! do n = 1, overPtr%count
call mpp_clock_end(nest_pack_clock)
call mpp_clock_begin(nest_send_clock)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
to_pe = overPtr%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = pos
end if
call mpp_clock_end(nest_send_clock)
end do ! end do list = 0,nlist-1
!unpack buffer
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(nest_wait_clock)
buffer_pos = buffer_recv_size
call mpp_clock_begin(nest_unpk_clock)
do m = update%nrecv, 1, -1
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
do n = overPtr%count, 1, -1
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_fillbuffer = b_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
fillbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
end do ! do n = 1, overPtr%count
end do
call mpp_clock_end(nest_unpk_clock)
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self( )
call mpp_clock_end(nest_wait_clock)
return
end subroutine mpp_do_update_nest_coarse_i8_3D
# 1434 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_update_nest.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_nest_fine_r4_3D(f_addrs, nest_domain, update, d_type, ke, wb_addrs, eb_addrs, &
sb_addrs, nb_addrs, flags)
integer(8), intent(in) :: f_addrs(:)
type(nest_domain_type), intent(in) :: nest_domain
type(nestSpec), intent(in) :: update
real(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer(8), intent(in) :: wb_addrs(:)
integer(8), intent(in) :: eb_addrs(:)
integer(8), intent(in) :: sb_addrs(:)
integer(8), intent(in) :: nb_addrs(:)
integer, intent(in) :: flags
character(len=8) :: text
type(overlap_type), pointer :: overPtr => NULL()
logical :: send(8), recv(8), update_edge_only
integer :: from_pe, to_pe, dir
integer :: m, n, l, i, j, k
integer :: is, ie, js, je, l_size
integer :: buffer_pos, msgsize
integer :: buffer_recv_size, pos
real(4) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
real(4) :: wbuffer(update%west%is_you :update%west%ie_you, update%west%js_you :update%west%je_you, ke)
real(4) :: ebuffer(update%east%is_you :update%east%ie_you, update%east%js_you :update%east%je_you, ke)
real(4) :: sbuffer(update%south%is_you:update%south%ie_you, update%south%js_you:update%south%je_you,ke)
real(4) :: nbuffer(update%north%is_you:update%north%ie_you, update%north%js_you:update%north%je_you,ke)
real(4) :: buffer(size(mpp_domains_stack(:)))
pointer(ptr_field, field)
pointer(ptr_buffer, buffer )
pointer(ptr_wbuffer, wbuffer)
pointer(ptr_ebuffer, ebuffer)
pointer(ptr_sbuffer, sbuffer)
pointer(ptr_nbuffer, nbuffer)
update_edge_only = BTEST(flags, EDGEONLY)
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
ptr_buffer = LOC(mpp_domains_stack)
l_size = size(f_addrs(:))
!--- pre-post receiving
buffer_pos = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_recv_clock)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = overPtr%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_FINE_3D_: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
call mpp_clock_end(nest_recv_clock)
end do ! end do m = 1, update%nrecv
buffer_recv_size = buffer_pos
!--- pack and send the data
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_pack_clock)
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_FINE_3D_: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
end if
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
endif
end do ! do n = 1, overPtr%count
call mpp_clock_end(nest_pack_clock)
call mpp_clock_begin(nest_send_clock)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
to_pe = overPtr%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = pos
end if
call mpp_clock_end(nest_send_clock)
end do ! end do list = 0,nlist-1
!unpack buffer
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(nest_wait_clock)
buffer_pos = buffer_recv_size
call mpp_clock_begin(nest_unpk_clock)
do m = update%nrecv, 1, -1
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
do n = overPtr%count, 1, -1
dir = overPtr%dir(n)
if( recv(dir) ) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
select case (dir)
case ( 1 ) ! east
do l=1,l_size ! loop over number of fields
ptr_ebuffer = eb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
ebuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
case ( 3 ) ! south
do l=1,l_size ! loop over number of fields
ptr_sbuffer = sb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
sbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
case ( 5 ) ! west
do l=1,l_size ! loop over number of fields
ptr_wbuffer = wb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
wbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
case ( 7 ) ! north
do l=1,l_size ! loop over number of fields
ptr_nbuffer = nb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
nbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, overPtr%count
end do
call mpp_clock_end(nest_unpk_clock)
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self( )
call mpp_clock_end(nest_wait_clock)
return
end subroutine mpp_do_update_nest_fine_r4_3D
!###############################################################################
subroutine mpp_do_update_nest_coarse_r4_3D(f_addrs, nest_domain, update, d_type, ke, b_addrs)
integer(8), intent(in) :: f_addrs(:)
type(nest_domain_type), intent(in) :: nest_domain
type(nestSpec), intent(in) :: update
real(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer(8), intent(in) :: b_addrs(:)
character(len=8) :: text
type(overlap_type), pointer :: overPtr => NULL()
integer :: from_pe, to_pe
integer :: m, n, l, i, j, k
integer :: is, ie, js, je, l_size
integer :: buffer_pos, msgsize
integer :: buffer_recv_size, pos
real(4) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
real(4) :: fillbuffer(update%center%is_you:update%center%ie_you, update%center%js_you :update%center%je_you, ke)
real(4) :: buffer(size(mpp_domains_stack(:)))
pointer(ptr_field, field)
pointer(ptr_buffer, buffer )
pointer(ptr_fillbuffer, fillbuffer)
ptr_buffer = LOC(mpp_domains_stack)
l_size = size(f_addrs(:))
!--- pre-post receiving
buffer_pos = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_recv_clock)
msgsize = 0
do n = 1, overPtr%count
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = overPtr%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_COARSE_3D_: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
call mpp_clock_end(nest_recv_clock)
end do ! end do m = 1, update%nrecv
buffer_recv_size = buffer_pos
!--- pack and send the data
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_pack_clock)
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_COARSE_3D_: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
end if
do n = 1, overPtr%count
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end do ! do n = 1, overPtr%count
call mpp_clock_end(nest_pack_clock)
call mpp_clock_begin(nest_send_clock)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
to_pe = overPtr%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = pos
end if
call mpp_clock_end(nest_send_clock)
end do ! end do list = 0,nlist-1
!unpack buffer
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(nest_wait_clock)
buffer_pos = buffer_recv_size
call mpp_clock_begin(nest_unpk_clock)
do m = update%nrecv, 1, -1
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
do n = overPtr%count, 1, -1
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_fillbuffer = b_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
fillbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
end do ! do n = 1, overPtr%count
end do
call mpp_clock_end(nest_unpk_clock)
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self( )
call mpp_clock_end(nest_wait_clock)
return
end subroutine mpp_do_update_nest_coarse_r4_3D
# 1446 "../mpp/include/mpp_domains_misc.inc" 2
# 1458
# 1 "../mpp/include/mpp_do_update_nest.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_nest_fine_i4_3D(f_addrs, nest_domain, update, d_type, ke, wb_addrs, eb_addrs, &
sb_addrs, nb_addrs, flags)
integer(8), intent(in) :: f_addrs(:)
type(nest_domain_type), intent(in) :: nest_domain
type(nestSpec), intent(in) :: update
integer(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer(8), intent(in) :: wb_addrs(:)
integer(8), intent(in) :: eb_addrs(:)
integer(8), intent(in) :: sb_addrs(:)
integer(8), intent(in) :: nb_addrs(:)
integer, intent(in) :: flags
character(len=8) :: text
type(overlap_type), pointer :: overPtr => NULL()
logical :: send(8), recv(8), update_edge_only
integer :: from_pe, to_pe, dir
integer :: m, n, l, i, j, k
integer :: is, ie, js, je, l_size
integer :: buffer_pos, msgsize
integer :: buffer_recv_size, pos
integer(4) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
integer(4) :: wbuffer(update%west%is_you :update%west%ie_you, update%west%js_you :update%west%je_you, ke)
integer(4) :: ebuffer(update%east%is_you :update%east%ie_you, update%east%js_you :update%east%je_you, ke)
integer(4) :: sbuffer(update%south%is_you:update%south%ie_you, update%south%js_you:update%south%je_you,ke)
integer(4) :: nbuffer(update%north%is_you:update%north%ie_you, update%north%js_you:update%north%je_you,ke)
integer(4) :: buffer(size(mpp_domains_stack(:)))
pointer(ptr_field, field)
pointer(ptr_buffer, buffer )
pointer(ptr_wbuffer, wbuffer)
pointer(ptr_ebuffer, ebuffer)
pointer(ptr_sbuffer, sbuffer)
pointer(ptr_nbuffer, nbuffer)
update_edge_only = BTEST(flags, EDGEONLY)
recv(1) = BTEST(flags,EAST)
recv(3) = BTEST(flags,SOUTH)
recv(5) = BTEST(flags,WEST)
recv(7) = BTEST(flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
ptr_buffer = LOC(mpp_domains_stack)
l_size = size(f_addrs(:))
!--- pre-post receiving
buffer_pos = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_recv_clock)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = overPtr%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_FINE_3D_: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
call mpp_clock_end(nest_recv_clock)
end do ! end do m = 1, update%nrecv
buffer_recv_size = buffer_pos
!--- pack and send the data
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_pack_clock)
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_FINE_3D_: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
end if
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
endif
end do ! do n = 1, overPtr%count
call mpp_clock_end(nest_pack_clock)
call mpp_clock_begin(nest_send_clock)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
to_pe = overPtr%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = pos
end if
call mpp_clock_end(nest_send_clock)
end do ! end do list = 0,nlist-1
!unpack buffer
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(nest_wait_clock)
buffer_pos = buffer_recv_size
call mpp_clock_begin(nest_unpk_clock)
do m = update%nrecv, 1, -1
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
do n = overPtr%count, 1, -1
dir = overPtr%dir(n)
if( recv(dir) ) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
select case (dir)
case ( 1 ) ! east
do l=1,l_size ! loop over number of fields
ptr_ebuffer = eb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
ebuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
case ( 3 ) ! south
do l=1,l_size ! loop over number of fields
ptr_sbuffer = sb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
sbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
case ( 5 ) ! west
do l=1,l_size ! loop over number of fields
ptr_wbuffer = wb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
wbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
case ( 7 ) ! north
do l=1,l_size ! loop over number of fields
ptr_nbuffer = nb_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
nbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, overPtr%count
end do
call mpp_clock_end(nest_unpk_clock)
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self( )
call mpp_clock_end(nest_wait_clock)
return
end subroutine mpp_do_update_nest_fine_i4_3D
!###############################################################################
subroutine mpp_do_update_nest_coarse_i4_3D(f_addrs, nest_domain, update, d_type, ke, b_addrs)
integer(8), intent(in) :: f_addrs(:)
type(nest_domain_type), intent(in) :: nest_domain
type(nestSpec), intent(in) :: update
integer(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer(8), intent(in) :: b_addrs(:)
character(len=8) :: text
type(overlap_type), pointer :: overPtr => NULL()
integer :: from_pe, to_pe
integer :: m, n, l, i, j, k
integer :: is, ie, js, je, l_size
integer :: buffer_pos, msgsize
integer :: buffer_recv_size, pos
integer(4) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
integer(4) :: fillbuffer(update%center%is_you:update%center%ie_you, update%center%js_you :update%center%je_you, ke)
integer(4) :: buffer(size(mpp_domains_stack(:)))
pointer(ptr_field, field)
pointer(ptr_buffer, buffer )
pointer(ptr_fillbuffer, fillbuffer)
ptr_buffer = LOC(mpp_domains_stack)
l_size = size(f_addrs(:))
!--- pre-post receiving
buffer_pos = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_recv_clock)
msgsize = 0
do n = 1, overPtr%count
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
from_pe = overPtr%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_COARSE_3D_: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
call mpp_clock_end(nest_recv_clock)
end do ! end do m = 1, update%nrecv
buffer_recv_size = buffer_pos
!--- pack and send the data
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
call mpp_clock_begin(nest_pack_clock)
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos+msgsize )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_UPDATE_NEST_COARSE_3D_: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
end if
do n = 1, overPtr%count
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end do ! do n = 1, overPtr%count
call mpp_clock_end(nest_pack_clock)
call mpp_clock_begin(nest_send_clock)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
to_pe = overPtr%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = pos
end if
call mpp_clock_end(nest_send_clock)
end do ! end do list = 0,nlist-1
!unpack buffer
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(nest_wait_clock)
buffer_pos = buffer_recv_size
call mpp_clock_begin(nest_unpk_clock)
do m = update%nrecv, 1, -1
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
do n = overPtr%count, 1, -1
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_fillbuffer = b_addrs(l)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
fillbuffer(i,j,k) = buffer(pos)
end do
end do
end do
end do
end do ! do n = 1, overPtr%count
end do
call mpp_clock_end(nest_unpk_clock)
call mpp_clock_begin(nest_wait_clock)
call mpp_sync_self( )
call mpp_clock_end(nest_wait_clock)
return
end subroutine mpp_do_update_nest_coarse_i4_3D
# 1469 "../mpp/include/mpp_domains_misc.inc" 2
!bnc
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_UPDATE_DOMAINS_AD: adjoint fill halos for 2D decomposition !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# 1 "../mpp/include/mpp_update_domains2D_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_update_domains_ad_2D_r8_2D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 2D field whose computational domains have been computed
real(8), intent(inout) :: field(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains_ad( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
return
end subroutine mpp_update_domains_ad_2D_r8_2D
subroutine mpp_update_domains_ad_2D_r8_3D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 3D field whose computational domains have been computed
real(8), intent(inout) :: field(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer :: update_position, update_whalo, update_ehalo, update_shalo, update_nhalo, ntile
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES),save :: f_addrs=-9999
integer :: tile, max_ntile
character(len=3) :: text
logical :: set_mismatch, is_complete
logical :: do_update
integer, save :: isize=0, jsize=0, ke=0, l_size=0, list=0
integer, save :: pos, whalosz, ehalosz, shalosz, nhalosz
real(8) :: d_type
type(overlapSpec), pointer :: update => NULL()
type(overlapSpec), pointer :: check => NULL()
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
!--- when there is NINETY or MINUS_NINETY rotation for some contact, the salar data can not be on E or N-cell,
if(present(position)) then
if(domain%rotated_ninety .AND. ( position == EAST .OR. position == NORTH ) ) &
call mpp_error(FATAL, 'MPP_UPDATE_AD_3D: hen there is NINETY or MINUS_NINETY rotation, ' // &
'can not use scalar version update_domain for data on E or N-cell' )
end if
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_UPDATE_AD_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_AD_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
update_position = CENTER
if(present(position)) update_position = position
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); ke = size(field,3); pos = update_position
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize /= size(field,1))
set_mismatch = set_mismatch .OR. (jsize /= size(field,2))
set_mismatch = set_mismatch .OR. (ke /= size(field,3))
set_mismatch = set_mismatch .OR. (update_position /= pos)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_AD_3D: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
if(debug_update_level .NE. NO_CHECK) then
check => search_check_overlap(domain, update_position)
if(ASSOCIATED(check) ) then
call mpp_do_check(f_addrs(1:l_size,1:ntile), domain, check, d_type, ke, flags, name )
endif
endif
update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
!call mpp_do_update( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke, &
! b_addrs(1:l_size,1:ntile), bsize, flags)
if ( PRESENT ( flags ) ) then
call mpp_do_update_ad( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke, flags )
else
call mpp_do_update_ad( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke )
endif
end if
l_size=0; f_addrs=-9999; isize=0; jsize=0; ke=0
endif
return
end subroutine mpp_update_domains_ad_2D_r8_3D
subroutine mpp_update_domains_ad_2D_r8_4D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 4D field whose computational domains have been computed
real(8), intent(inout) :: field(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains_ad( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domains_ad_2D_r8_4D
subroutine mpp_update_domains_ad_2D_r8_5D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 5D field whose computational domains have been computed
real(8), intent(inout) :: field(:,:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains_ad( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
return
end subroutine mpp_update_domains_ad_2D_r8_5D
! is set to false for real(8) integer.
!vector fields
subroutine mpp_update_domains_ad_2D_r8_2Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 2D field whose computational domains have been computed
real(8), intent(inout) :: fieldx(:,:), fieldy(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3Dx(size(fieldx,1),size(fieldx,2),1)
real(8) :: field3Dy(size(fieldy,1),size(fieldy,2),1)
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_update_domains_ad( field3Dx, field3Dy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domains_ad_2D_r8_2Dv
subroutine mpp_update_domains_ad_2D_r8_3Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 3D field whose computational domains have been computed
real(8), intent(inout) :: fieldx(:,:,:), fieldy(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo, ntile
integer :: grid_offset_type
logical :: exchange_uv
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES),save :: f_addrsx=-9999, f_addrsy=-9999
logical :: do_update, is_complete
integer, save :: isize(2)=0,jsize(2)=0,ke=0,l_size=0, offset_type=0, list=0
integer, save :: whalosz, ehalosz, shalosz, nhalosz
integer :: tile, max_ntile
integer :: position_x, position_y
logical :: set_mismatch
character(len=3) :: text
real(8) :: d_type
type(overlapSpec), pointer :: updatex => NULL()
type(overlapSpec), pointer :: updatey => NULL()
type(overlapSpec), pointer :: checkx => NULL()
type(overlapSpec), pointer :: checky => NULL()
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D_V: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D_V: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D_V: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D_V: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
grid_offset_type = AGRID
if( PRESENT(gridtype) ) grid_offset_type = gridtype
exchange_uv = .false.
if(grid_offset_type == DGRID_NE) then
exchange_uv = .true.
grid_offset_type = CGRID_NE
else if( grid_offset_type == DGRID_SW ) then
exchange_uv = .true.
grid_offset_type = CGRID_SW
end if
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_UPDATE_AD_3D_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D_V: "// &
"optional argument tile_count should be present when number of tiles on some pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_AD_3D_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
if(list == 1 .AND. tile == 1)then
isize(1)=size(fieldx,1); jsize(1)=size(fieldx,2); ke = size(fieldx,3)
isize(2)=size(fieldy,1); jsize(2)=size(fieldy,2)
offset_type = grid_offset_type
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize(1) /= size(fieldx,1))
set_mismatch = set_mismatch .OR. (jsize(1) /= size(fieldx,2))
set_mismatch = set_mismatch .OR. (ke /= size(fieldx,3))
set_mismatch = set_mismatch .OR. (isize(2) /= size(fieldy,1))
set_mismatch = set_mismatch .OR. (jsize(2) /= size(fieldy,2))
set_mismatch = set_mismatch .OR. (ke /= size(fieldy,3))
set_mismatch = set_mismatch .OR. (grid_offset_type /= offset_type)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_AD_3D_V: Incompatible field at count '//text//' for group vector update.' )
end if
end if
if(is_complete) then
l_size = list
list = 0
end if
if(do_update)then
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case default
call mpp_error(FATAL, "mpp_update_domains2D_ad.h: invalid value of grid_offset_type")
end select
if(debug_update_level .NE. NO_CHECK) then
checkx => search_check_overlap(domain, position_x)
checky => search_check_overlap(domain, position_y)
if(ASSOCIATED(checkx)) then
if(exchange_uv) then
call mpp_do_check(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, &
checky, checkx, d_type, ke, flags, name)
else
call mpp_do_check(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, &
checkx, checky, d_type, ke, flags, name)
end if
endif
endif
updatex => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_x)
updatey => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_y)
if(exchange_uv) then
call mpp_do_update_ad(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, updatey, updatex, &
d_type,ke, grid_offset_type, flags)
else
call mpp_do_update_ad(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, updatex, updatey, &
d_type,ke,grid_offset_type, flags)
end if
end if
l_size=0; f_addrsx=-9999; f_addrsy=-9999; isize=0; jsize=0; ke=0
end if
return
end subroutine mpp_update_domains_ad_2D_r8_3Dv
subroutine mpp_update_domains_ad_2D_r8_4Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 4D field whose computational domains have been computed
real(8), intent(inout) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4))
real(8) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_update_domains_ad( field3Dx, field3Dy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domains_ad_2D_r8_4Dv
subroutine mpp_update_domains_ad_2D_r8_5Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 5D field whose computational domains have been computed
real(8), intent(inout) :: fieldx(:,:,:,:,:), fieldy(:,:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(8) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4)*size(fieldx,5))
real(8) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4)*size(fieldy,5))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_update_domains_ad( field3Dx, field3Dy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domains_ad_2D_r8_5Dv
# 1499 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_update_domains2D_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_update_domains_ad_2D_r4_2D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 2D field whose computational domains have been computed
real(4), intent(inout) :: field(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains_ad( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
return
end subroutine mpp_update_domains_ad_2D_r4_2D
subroutine mpp_update_domains_ad_2D_r4_3D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 3D field whose computational domains have been computed
real(4), intent(inout) :: field(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer :: update_position, update_whalo, update_ehalo, update_shalo, update_nhalo, ntile
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES),save :: f_addrs=-9999
integer :: tile, max_ntile
character(len=3) :: text
logical :: set_mismatch, is_complete
logical :: do_update
integer, save :: isize=0, jsize=0, ke=0, l_size=0, list=0
integer, save :: pos, whalosz, ehalosz, shalosz, nhalosz
real(4) :: d_type
type(overlapSpec), pointer :: update => NULL()
type(overlapSpec), pointer :: check => NULL()
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
!--- when there is NINETY or MINUS_NINETY rotation for some contact, the salar data can not be on E or N-cell,
if(present(position)) then
if(domain%rotated_ninety .AND. ( position == EAST .OR. position == NORTH ) ) &
call mpp_error(FATAL, 'MPP_UPDATE_AD_3D: hen there is NINETY or MINUS_NINETY rotation, ' // &
'can not use scalar version update_domain for data on E or N-cell' )
end if
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_UPDATE_AD_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_AD_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
update_position = CENTER
if(present(position)) update_position = position
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); ke = size(field,3); pos = update_position
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize /= size(field,1))
set_mismatch = set_mismatch .OR. (jsize /= size(field,2))
set_mismatch = set_mismatch .OR. (ke /= size(field,3))
set_mismatch = set_mismatch .OR. (update_position /= pos)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_AD_3D: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
if(debug_update_level .NE. NO_CHECK) then
check => search_check_overlap(domain, update_position)
if(ASSOCIATED(check) ) then
call mpp_do_check(f_addrs(1:l_size,1:ntile), domain, check, d_type, ke, flags, name )
endif
endif
update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
!call mpp_do_update( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke, &
! b_addrs(1:l_size,1:ntile), bsize, flags)
if ( PRESENT ( flags ) ) then
call mpp_do_update_ad( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke, flags )
else
call mpp_do_update_ad( f_addrs(1:l_size,1:ntile), domain, update, d_type, ke )
endif
end if
l_size=0; f_addrs=-9999; isize=0; jsize=0; ke=0
endif
return
end subroutine mpp_update_domains_ad_2D_r4_3D
subroutine mpp_update_domains_ad_2D_r4_4D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 4D field whose computational domains have been computed
real(4), intent(inout) :: field(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains_ad( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domains_ad_2D_r4_4D
subroutine mpp_update_domains_ad_2D_r4_5D( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 5D field whose computational domains have been computed
real(4), intent(inout) :: field(:,:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
logical, intent(in), optional :: complete
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_update_domains_ad( field3D, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
return
end subroutine mpp_update_domains_ad_2D_r4_5D
! is set to false for real(4) integer.
!vector fields
subroutine mpp_update_domains_ad_2D_r4_2Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 2D field whose computational domains have been computed
real(4), intent(inout) :: fieldx(:,:), fieldy(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3Dx(size(fieldx,1),size(fieldx,2),1)
real(4) :: field3Dy(size(fieldy,1),size(fieldy,2),1)
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_update_domains_ad( field3Dx, field3Dy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domains_ad_2D_r4_2Dv
subroutine mpp_update_domains_ad_2D_r4_3Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
!updates data domain of 3D field whose computational domains have been computed
real(4), intent(inout) :: fieldx(:,:,:), fieldy(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo, ntile
integer :: grid_offset_type
logical :: exchange_uv
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES),save :: f_addrsx=-9999, f_addrsy=-9999
logical :: do_update, is_complete
integer, save :: isize(2)=0,jsize(2)=0,ke=0,l_size=0, offset_type=0, list=0
integer, save :: whalosz, ehalosz, shalosz, nhalosz
integer :: tile, max_ntile
integer :: position_x, position_y
logical :: set_mismatch
character(len=3) :: text
real(4) :: d_type
type(overlapSpec), pointer :: updatex => NULL()
type(overlapSpec), pointer :: updatey => NULL()
type(overlapSpec), pointer :: checkx => NULL()
type(overlapSpec), pointer :: checky => NULL()
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D_V: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D_V: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D_V: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D_V: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
grid_offset_type = AGRID
if( PRESENT(gridtype) ) grid_offset_type = gridtype
exchange_uv = .false.
if(grid_offset_type == DGRID_NE) then
exchange_uv = .true.
grid_offset_type = CGRID_NE
else if( grid_offset_type == DGRID_SW ) then
exchange_uv = .true.
grid_offset_type = CGRID_SW
end if
max_ntile = domain%max_ntile_pe
ntile = size(domain%x(:))
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
tile = 1
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_UPDATE_AD_3D_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_UPDATE_AD_3D_V: "// &
"optional argument tile_count should be present when number of tiles on some pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_UPDATE_AD_3D_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
if(list == 1 .AND. tile == 1)then
isize(1)=size(fieldx,1); jsize(1)=size(fieldx,2); ke = size(fieldx,3)
isize(2)=size(fieldy,1); jsize(2)=size(fieldy,2)
offset_type = grid_offset_type
whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize(1) /= size(fieldx,1))
set_mismatch = set_mismatch .OR. (jsize(1) /= size(fieldx,2))
set_mismatch = set_mismatch .OR. (ke /= size(fieldx,3))
set_mismatch = set_mismatch .OR. (isize(2) /= size(fieldy,1))
set_mismatch = set_mismatch .OR. (jsize(2) /= size(fieldy,2))
set_mismatch = set_mismatch .OR. (ke /= size(fieldy,3))
set_mismatch = set_mismatch .OR. (grid_offset_type /= offset_type)
set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_UPDATE_AD_3D_V: Incompatible field at count '//text//' for group vector update.' )
end if
end if
if(is_complete) then
l_size = list
list = 0
end if
if(do_update)then
if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case default
call mpp_error(FATAL, "mpp_update_domains2D_ad.h: invalid value of grid_offset_type")
end select
if(debug_update_level .NE. NO_CHECK) then
checkx => search_check_overlap(domain, position_x)
checky => search_check_overlap(domain, position_y)
if(ASSOCIATED(checkx)) then
if(exchange_uv) then
call mpp_do_check(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, &
checky, checkx, d_type, ke, flags, name)
else
call mpp_do_check(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, &
checkx, checky, d_type, ke, flags, name)
end if
endif
endif
updatex => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_x)
updatey => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_y)
if(exchange_uv) then
call mpp_do_update_ad(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, updatey, updatex, &
d_type,ke, grid_offset_type, flags)
else
call mpp_do_update_ad(f_addrsx(1:l_size,1:ntile),f_addrsy(1:l_size,1:ntile), domain, updatex, updatey, &
d_type,ke,grid_offset_type, flags)
end if
end if
l_size=0; f_addrsx=-9999; f_addrsy=-9999; isize=0; jsize=0; ke=0
end if
return
end subroutine mpp_update_domains_ad_2D_r4_3Dv
subroutine mpp_update_domains_ad_2D_r4_4Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 4D field whose computational domains have been computed
real(4), intent(inout) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4))
real(4) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_update_domains_ad( field3Dx, field3Dy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domains_ad_2D_r4_4Dv
subroutine mpp_update_domains_ad_2D_r4_5Dv( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count )
!updates data domain of 5D field whose computational domains have been computed
real(4), intent(inout) :: fieldx(:,:,:,:,:), fieldy(:,:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
logical, intent(in), optional :: complete
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
character(len=*), intent(in), optional :: name
integer, intent(in), optional :: tile_count
real(4) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4)*size(fieldx,5))
real(4) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4)*size(fieldy,5))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_update_domains_ad( field3Dx, field3Dy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
return
end subroutine mpp_update_domains_ad_2D_r4_5Dv
# 1524 "../mpp/include/mpp_domains_misc.inc" 2
!!$
!!$!*******************************************************
# 1 "../mpp/include/mpp_do_update_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_ad_r8_3d( f_addrs, domain, update, d_type, ke, flags)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: update
real(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer, optional, intent(in) :: flags
real(8) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
pointer(ptr_field, field)
integer :: update_flags
type(overlap_type), pointer :: overPtr => NULL()
character(len=8) :: text
!equate to mpp_domains_stack
real(8) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos
!receive domains saved here for unpacking
!for non-blocking version, could be recomputed
integer, allocatable :: msg1(:), msg2(:)
logical :: send(8), recv(8), update_edge_only
integer :: to_pe, from_pe, pos, msgsize, msgsize_send
integer :: n, l_size, l, m, i, j, k
integer :: is, ie, js, je, tMe, dir
integer :: start, start1, start2, index, is1, ie1, js1, je1, ni, nj, total
integer :: buffer_recv_size, nlist, outunit
integer :: send_start_pos
integer :: send_msgsize(MAXLIST)
outunit = stdout()
ptr = LOC(mpp_domains_stack)
l_size = size(f_addrs,1)
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
update_edge_only = BTEST(update_flags, EDGEONLY)
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
if(debug_message_passing) then
nlist = size(domain%list(:))
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
from_pe = update%recv(m)%pe
l = from_pe-mpp_root_pe()
call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
msg2(l) = msgsize
enddo
do m = 1, update%nsend
overPtr => update%send(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(send(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
call mpp_send( msgsize, plen=1, to_pe=overPtr%pe, tag=COMM_TAG_1 )
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_update: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_update: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
!recv
buffer_pos = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
msgsize_send = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos + msgsize_send
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = ke,1,-1
do j = je, js, -1
do i = ie, is, -1
buffer(pos) = field(i,j,k)
field(i,j,k) = 0.
pos = pos - 1
end do
end do
end do
end do
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
to_pe = overPtr%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do ! end do m = 1, update%nrecv
buffer_recv_size = buffer_pos
! send
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
msgsize_send = msgsize
from_pe = overPtr%pe
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do ! end do ist = 0,nlist-1
call mpp_sync_self(check=EVENT_RECV)
buffer_pos = buffer_recv_size
! send
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
buffer_pos = pos
end if
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
select case( overPtr%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, overPtr%count
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
buffer_pos = pos
end if
end do ! end do ist = 0,nlist-1
call mpp_sync_self()
return
end subroutine mpp_do_update_ad_r8_3d
# 1540 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_updateV_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_ad_r8_3dv(f_addrsx,f_addrsy, domain, update_x, update_y, &
d_type, ke, gridtype, flags)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: update_x, update_y
integer, intent(in) :: ke
real(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: gridtype
integer, intent(in), optional :: flags
real(8) :: fieldx(update_x%xbegin:update_x%xend, update_x%ybegin:update_x%yend,ke)
real(8) :: fieldy(update_y%xbegin:update_y%xend, update_y%ybegin:update_y%yend,ke)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
integer :: update_flags
integer :: l_size, l, i, j, k, is, ie, js, je, n, m
integer :: pos, nlist, msgsize
integer :: to_pe, from_pe, midpoint
integer :: tMe, dir
integer :: send_start_pos, nsend
integer :: send_msgsize(2*MAXLIST)
integer :: send_pe(2*MAXLIST)
integer, allocatable :: msg1(:), msg2(:)
logical :: send(8), recv(8), update_edge_only
real(8) :: buffer(size(mpp_domains_stack(:)))
pointer(ptr,buffer )
integer :: buffer_pos
character(len=8) :: text
integer :: buffer_recv_size, shift
integer :: rank_x, rank_y, ind_x, ind_y, cur_rank
integer :: nsend_x, nsend_y, nrecv_x, nrecv_y, outunit
outunit = stdout()
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) ) then
update_flags = flags
! The following test is so that SCALAR_PAIR can be used alone with the
! same default update pattern as without.
if (BTEST(update_flags,SCALAR_BIT)) then
if (.NOT.(BTEST(update_flags,WEST) .OR. BTEST(update_flags,EAST) &
.OR. BTEST(update_flags,NORTH) .OR. BTEST(update_flags,SOUTH))) &
update_flags = update_flags + XUPDATE+YUPDATE !default with SCALAR_PAIR
end if
end if
if( BTEST(update_flags,NORTH) .AND. BTEST(domain%fold,NORTH) .AND. BTEST(gridtype,SOUTH) ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: Incompatible grid offset and fold.' )
update_edge_only = BTEST(update_flags, EDGEONLY)
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
l_size = size(f_addrsx,1)
nlist = size(domain%list(:))
ptr = LOC(mpp_domains_stack)
!recv
nsend_x = update_x%nsend
nsend_y = update_y%nsend
nrecv_x = update_x%nrecv
nrecv_y = update_y%nrecv
if(debug_message_passing) then
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
cur_rank = get_rank_recv(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y)
msgsize = 0
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = update_y%recv(ind_y)%pe
do n = 1, update_y%recv(ind_y)%count
dir = update_y%recv(ind_y)%dir(n)
if(recv(dir)) then
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = update_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
m = from_pe-mpp_root_pe()
call mpp_recv( msg1(m), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1)
msg2(m) = msgsize
end do
cur_rank = get_rank_send(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
msgsize = 0
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = update_y%send(ind_y)%pe
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if( send(dir) ) then
is = update_y%send(ind_y)%is(n); ie = update_y%send(ind_y)%ie(n)
js = update_y%send(ind_y)%js(n); je = update_y%send(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = update_y%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
call mpp_send( msgsize, plen=1, to_pe=to_pe, tag=COMM_TAG_1)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_updateV: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_updateV: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
buffer_pos = 0
cur_rank = get_rank_recv(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y)
msgsize = 0
select case(gridtype)
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*2
ind_x = ind_x+1
ind_y = ind_x
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = update_y%recv(ind_y)%pe
do n = 1, update_y%recv(ind_y)%count
dir = update_y%recv(ind_y)%dir(n)
if(recv(dir)) then
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = update_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
end select
cur_rank = max(rank_x, rank_y)
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
!unpacking
buffer_pos = buffer_recv_size
cur_rank = get_rank_unpack(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x > 0 .OR. ind_y > 0)
pos = buffer_pos
select case ( gridtype )
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
do n = update_x%recv(ind_x)%count, 1, -1
dir = update_x%recv(ind_x)%dir(n)
if( recv(dir) ) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*2*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 2
buffer(pos-1) = fieldx(i,j,k)
buffer(pos) = fieldy(i,j,k)
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end do
end do
end if ! end if( recv(dir) )
end do ! do dir=8,1,-1
ind_x = ind_x-1
ind_y = ind_x
if(ind_x .GT. 0) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_y) then
do n = update_y%recv(ind_y)%count, 1, -1
dir = update_y%recv(ind_y)%dir(n)
if( recv(dir) ) then
tMe = update_y%recv(ind_y)%tileMe(n)
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
fieldy(i,j,k) = 0.
end do
end do
end do
end do
end if
end do
ind_y = ind_y-1
if(ind_y .GT. 0) then
rank_y = update_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
if(cur_rank == rank_x) then
do n = update_x%recv(ind_x)%count, 1, -1
dir = update_x%recv(ind_x)%dir(n)
if( recv(dir) ) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
fieldx(i,j,k) = 0.
end do
end do
end do
end do
end if
end do
ind_x = ind_x-1
if(ind_x .GT. 0) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
end select
cur_rank = min(rank_x, rank_y)
end do
! ---northern boundary fold
shift = 0
if(domain%symmetry) shift = 1
if( BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then
j = domain%y(1)%global%end+shift
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
j = domain%y(1)%global%end+shift
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
if( .NOT. domain%symmetry ) is = is - 1
do i = is ,ie, midpoint
if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%end+shift
if ( recv(7) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
if(domain%symmetry) then
is = domain%x(1)%global%begin
else
is = domain%x(1)%global%begin - 1
end if
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-north BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = domain%x(1)%data%begin,is-1
fieldx(2*is-i,j,k) = fieldx(2*is-i,j,k) + fieldx(i,j,k)
fieldy(2*is-i,j,k) = fieldy(2*is-i,j,k) + fieldy(i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-north CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = domain%x(1)%data%begin,is-1
fieldy(2*is-i-1,j,k) = fieldy(2*is-i-1,j,k) + fieldy(i,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
ie = domain%x(1)%data%end
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,SOUTH) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then ! ---southern boundary fold
! NOTE: symmetry is assumed for fold-south boundary
j = domain%y(1)%global%begin
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
j = domain%y(1)%global%begin
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
do i = is ,ie, midpoint
if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%begin
if ( recv(3) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-south BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = domain%x(1)%data%begin,is-1
fieldx(2*is-i,j,k) = fieldx(2*is-i,j,k) + fieldx(i,j,k)
fieldy(2*is-i,j,k) = fieldy(2*is-i,j,k) + fieldy(i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-south CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = domain%x(1)%data%begin,is-1
fieldy(2*is-i-1,j,k) = fieldy(2*is-i-1,j,k) + fieldy(i,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
ie = domain%x(1)%data%end
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,WEST) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then ! ---eastern boundary fold
! NOTE: symmetry is assumed for fold-west boundary
i = domain%x(1)%global%begin
if( domain%x(1)%data%begin.LE.i .AND. i.LE.domain%x(1)%data%end+shift )then !fold is within domain
midpoint = (domain%y(1)%global%begin+domain%y(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
i = domain%x(1)%global%begin
js = domain%y(1)%global%begin; je = domain%y(1)%global%end+shift
do j = js ,je, midpoint
if( domain%y(1)%data%begin.LE.j .AND. j.LE. domain%y(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off south edge, when update south or west direction
i = domain%x(1)%global%begin
if ( recv(3) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin.GT.domain%y(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-west BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i,2*js-j,k) = fieldx(i,2*js-j,k) + fieldx(i,j,k)
fieldy(i,2*js-j,k) = fieldy(i,2*js-j,k) + fieldy(i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin-1.GT.domain%y(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-west CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i, 2*js-j-1,k) = fieldx(i, 2*js-j-1,k) + fieldx(i,j,k)
end do
end do
end do
end if
end select
end if
!off north edge
js = domain%y(1)%global%end
if(domain%y(1)%cyclic .AND. js.LT.domain%y(1)%data%end )then
je = domain%y(1)%data%end
js = js + 1
select case(gridtype)
case(BGRID_NE)
js = js + shift
je = je + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = js,je
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = js, je
fieldx(i,j,k) = -fieldx(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,EAST) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then ! ---eastern boundary fold
! NOTE: symmetry is assumed for fold-west boundary
i = domain%x(1)%global%end+shift
if( domain%x(1)%data%begin.LE.i .AND. i.LE.domain%x(1)%data%end+shift )then !fold is within domain
midpoint = (domain%y(1)%global%begin+domain%y(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
i = domain%x(1)%global%end+shift
js = domain%y(1)%global%begin; je = domain%y(1)%global%end+shift
do j = js ,je, midpoint
if( domain%y(1)%data%begin.LE.j .AND. j.LE. domain%y(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off south edge, when update south or west direction
i = domain%x(1)%global%end+shift
if ( recv(3) .OR. recv(1) ) then
select case(gridtype)
case(BGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin.GT.domain%y(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-east BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i,2*js-j,k) = fieldx(i,2*js-j,k) + fieldx(i,j,k)
fieldy(i,2*js-j,k) = fieldy(i,2*js-j,k) + fieldy(i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin-1.GT.domain%y(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-east CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i, 2*js-j-1,k) = fieldx(i, 2*js-j-1,k) + fieldx(i,j,k)
end do
end do
end do
end if
end select
end if
!off north edge
js = domain%y(1)%global%end
if(domain%y(1)%cyclic .AND. js.LT.domain%y(1)%data%end )then
je = domain%y(1)%data%end
js = js + 1
select case(gridtype)
case(BGRID_NE)
js = js + shift
je = je + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = js,je
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = js, je
fieldx(i,j,k) = -fieldx(i,j,k)
end do
end do
end do
end select
end if
end if
end if
!unpacking done
!--- recv
buffer_pos = 0
cur_rank = get_rank_recv(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y)
msgsize = 0
select case(gridtype)
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*2
ind_x = ind_x+1
ind_y = ind_x
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = update_y%recv(ind_y)%pe
do n = 1, update_y%recv(ind_y)%count
dir = update_y%recv(ind_y)%dir(n)
if(recv(dir)) then
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = update_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
end select
cur_rank = max(rank_x, rank_y)
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=from_pe, tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
cur_rank = get_rank_send(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
pos = buffer_pos
!--- make sure the domain stack size is big enough
msgsize = 0
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) msgsize = msgsize + update_x%send(ind_x)%msgsize(n)
enddo
endif
if(cur_rank == rank_y) then
to_pe = update_y%send(ind_y)%pe
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if( send(dir) ) msgsize = msgsize + update_y%send(ind_y)%msgsize(n)
enddo
endif
select case( gridtype )
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
ind_x = ind_x+1
ind_y = ind_x
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = update_y%send(ind_y)%pe
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = update_y%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
end select
cur_rank = min(rank_x, rank_y)
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
end if
if( msgsize.GT.0 )then
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=to_pe, block=.false., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
enddo
call mpp_sync_self(check=EVENT_RECV)
!--- send
buffer_pos = buffer_recv_size
pos = buffer_pos
cur_rank = get_rank_send(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
pos = buffer_pos
select case( gridtype )
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
tMe = update_x%send(ind_x)%tileMe(n)
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
select case( update_x%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 2
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos-1)
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos)
end do
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 2
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos-1)
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 2
fieldy(i,j,k)=fieldy(i,j,k)-buffer(pos-1)
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos)
end do
end do
end do
end do
end if
case( NINETY )
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 2
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos-1)
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 2
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos-1)
fieldx(i,j,k)=fieldx(i,j,k)-buffer(pos)
end do
end do
end do
end do
end if
case( ONE_HUNDRED_EIGHTY )
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 2
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos-1)
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 2
fieldx(i,j,k)=fieldx(i,j,k)-buffer(pos-1)
fieldy(i,j,k)=fieldy(i,j,k)-buffer(pos)
end do
end do
end do
end do
end if
end select ! select case( rotation(n) )
end if ! if( send(dir) )
end do ! do n = 1, update_x%send(ind_x)%count
ind_x = ind_x+1
ind_y = ind_x
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
tMe = update_x%send(ind_x)%tileMe(n)
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
select case( update_x%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos)
end do
end do
end do
end do
case(MINUS_NINETY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
fieldy(i,j,k)=fieldy(i,j,k)-buffer(pos)
end do
end do
end do
end do
end if
case(NINETY)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos)
end do
end do
end do
end do
case(ONE_HUNDRED_EIGHTY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldx(i,j,k)=fieldx(i,j,k)-buffer(pos)
end do
end do
end do
end do
end if
end select
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = update_y%send(ind_y)%pe
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if( send(dir) ) then
tMe = update_y%send(ind_y)%tileMe(n)
is = update_y%send(ind_y)%is(n); ie = update_y%send(ind_y)%ie(n)
js = update_y%send(ind_y)%js(n); je = update_y%send(ind_y)%je(n)
select case( update_y%send(ind_y)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos)
end do
end do
end do
end do
case(MINUS_NINETY)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos)
end do
end do
end do
end do
case(NINETY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
fieldx(i,j,k)=fieldx(i,j,k)-buffer(pos)
end do
end do
end do
end do
end if
case(ONE_HUNDRED_EIGHTY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldy(i,j,k)=fieldy(i,j,k)-buffer(pos)
end do
end do
end do
end do
end if
end select
endif
enddo
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = update_y%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
end select
cur_rank = min(rank_x, rank_y)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
buffer_pos = pos
end if
end do
call mpp_sync_self( )
return
end subroutine mpp_do_update_ad_r8_3dv
# 1541 "../mpp/include/mpp_domains_misc.inc" 2
# 1550
# 1 "../mpp/include/mpp_do_update_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_ad_i8_3d( f_addrs, domain, update, d_type, ke, flags)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: update
integer(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer, optional, intent(in) :: flags
integer(8) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
pointer(ptr_field, field)
integer :: update_flags
type(overlap_type), pointer :: overPtr => NULL()
character(len=8) :: text
!equate to mpp_domains_stack
integer(8) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos
!receive domains saved here for unpacking
!for non-blocking version, could be recomputed
integer, allocatable :: msg1(:), msg2(:)
logical :: send(8), recv(8), update_edge_only
integer :: to_pe, from_pe, pos, msgsize, msgsize_send
integer :: n, l_size, l, m, i, j, k
integer :: is, ie, js, je, tMe, dir
integer :: start, start1, start2, index, is1, ie1, js1, je1, ni, nj, total
integer :: buffer_recv_size, nlist, outunit
integer :: send_start_pos
integer :: send_msgsize(MAXLIST)
outunit = stdout()
ptr = LOC(mpp_domains_stack)
l_size = size(f_addrs,1)
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
update_edge_only = BTEST(update_flags, EDGEONLY)
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
if(debug_message_passing) then
nlist = size(domain%list(:))
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
from_pe = update%recv(m)%pe
l = from_pe-mpp_root_pe()
call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
msg2(l) = msgsize
enddo
do m = 1, update%nsend
overPtr => update%send(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(send(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
call mpp_send( msgsize, plen=1, to_pe=overPtr%pe, tag=COMM_TAG_1 )
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_update: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_update: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
!recv
buffer_pos = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
msgsize_send = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos + msgsize_send
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = ke,1,-1
do j = je, js, -1
do i = ie, is, -1
buffer(pos) = field(i,j,k)
field(i,j,k) = 0.
pos = pos - 1
end do
end do
end do
end do
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
to_pe = overPtr%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do ! end do m = 1, update%nrecv
buffer_recv_size = buffer_pos
! send
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
msgsize_send = msgsize
from_pe = overPtr%pe
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do ! end do ist = 0,nlist-1
call mpp_sync_self(check=EVENT_RECV)
buffer_pos = buffer_recv_size
! send
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
buffer_pos = pos
end if
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
select case( overPtr%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, overPtr%count
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
buffer_pos = pos
end if
end do ! end do ist = 0,nlist-1
call mpp_sync_self()
return
end subroutine mpp_do_update_ad_i8_3d
# 1558 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_update_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_ad_r4_3d( f_addrs, domain, update, d_type, ke, flags)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: update
real(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer, optional, intent(in) :: flags
real(4) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
pointer(ptr_field, field)
integer :: update_flags
type(overlap_type), pointer :: overPtr => NULL()
character(len=8) :: text
!equate to mpp_domains_stack
real(4) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos
!receive domains saved here for unpacking
!for non-blocking version, could be recomputed
integer, allocatable :: msg1(:), msg2(:)
logical :: send(8), recv(8), update_edge_only
integer :: to_pe, from_pe, pos, msgsize, msgsize_send
integer :: n, l_size, l, m, i, j, k
integer :: is, ie, js, je, tMe, dir
integer :: start, start1, start2, index, is1, ie1, js1, je1, ni, nj, total
integer :: buffer_recv_size, nlist, outunit
integer :: send_start_pos
integer :: send_msgsize(MAXLIST)
outunit = stdout()
ptr = LOC(mpp_domains_stack)
l_size = size(f_addrs,1)
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
update_edge_only = BTEST(update_flags, EDGEONLY)
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
if(debug_message_passing) then
nlist = size(domain%list(:))
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
from_pe = update%recv(m)%pe
l = from_pe-mpp_root_pe()
call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
msg2(l) = msgsize
enddo
do m = 1, update%nsend
overPtr => update%send(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(send(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
call mpp_send( msgsize, plen=1, to_pe=overPtr%pe, tag=COMM_TAG_1 )
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_update: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_update: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
!recv
buffer_pos = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
msgsize_send = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos + msgsize_send
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = ke,1,-1
do j = je, js, -1
do i = ie, is, -1
buffer(pos) = field(i,j,k)
field(i,j,k) = 0.
pos = pos - 1
end do
end do
end do
end do
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
to_pe = overPtr%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do ! end do m = 1, update%nrecv
buffer_recv_size = buffer_pos
! send
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
msgsize_send = msgsize
from_pe = overPtr%pe
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do ! end do ist = 0,nlist-1
call mpp_sync_self(check=EVENT_RECV)
buffer_pos = buffer_recv_size
! send
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
buffer_pos = pos
end if
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
select case( overPtr%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, overPtr%count
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
buffer_pos = pos
end if
end do ! end do ist = 0,nlist-1
call mpp_sync_self()
return
end subroutine mpp_do_update_ad_r4_3d
# 1572 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_updateV_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_ad_r4_3dv(f_addrsx,f_addrsy, domain, update_x, update_y, &
d_type, ke, gridtype, flags)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
type(domain2d), intent(in) :: domain
type(overlapSpec), intent(in) :: update_x, update_y
integer, intent(in) :: ke
real(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: gridtype
integer, intent(in), optional :: flags
real(4) :: fieldx(update_x%xbegin:update_x%xend, update_x%ybegin:update_x%yend,ke)
real(4) :: fieldy(update_y%xbegin:update_y%xend, update_y%ybegin:update_y%yend,ke)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
integer :: update_flags
integer :: l_size, l, i, j, k, is, ie, js, je, n, m
integer :: pos, nlist, msgsize
integer :: to_pe, from_pe, midpoint
integer :: tMe, dir
integer :: send_start_pos, nsend
integer :: send_msgsize(2*MAXLIST)
integer :: send_pe(2*MAXLIST)
integer, allocatable :: msg1(:), msg2(:)
logical :: send(8), recv(8), update_edge_only
real(4) :: buffer(size(mpp_domains_stack(:)))
pointer(ptr,buffer )
integer :: buffer_pos
character(len=8) :: text
integer :: buffer_recv_size, shift
integer :: rank_x, rank_y, ind_x, ind_y, cur_rank
integer :: nsend_x, nsend_y, nrecv_x, nrecv_y, outunit
outunit = stdout()
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) ) then
update_flags = flags
! The following test is so that SCALAR_PAIR can be used alone with the
! same default update pattern as without.
if (BTEST(update_flags,SCALAR_BIT)) then
if (.NOT.(BTEST(update_flags,WEST) .OR. BTEST(update_flags,EAST) &
.OR. BTEST(update_flags,NORTH) .OR. BTEST(update_flags,SOUTH))) &
update_flags = update_flags + XUPDATE+YUPDATE !default with SCALAR_PAIR
end if
end if
if( BTEST(update_flags,NORTH) .AND. BTEST(domain%fold,NORTH) .AND. BTEST(gridtype,SOUTH) ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: Incompatible grid offset and fold.' )
update_edge_only = BTEST(update_flags, EDGEONLY)
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
l_size = size(f_addrsx,1)
nlist = size(domain%list(:))
ptr = LOC(mpp_domains_stack)
!recv
nsend_x = update_x%nsend
nsend_y = update_y%nsend
nrecv_x = update_x%nrecv
nrecv_y = update_y%nrecv
if(debug_message_passing) then
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
cur_rank = get_rank_recv(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y)
msgsize = 0
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = update_y%recv(ind_y)%pe
do n = 1, update_y%recv(ind_y)%count
dir = update_y%recv(ind_y)%dir(n)
if(recv(dir)) then
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = update_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
m = from_pe-mpp_root_pe()
call mpp_recv( msg1(m), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1)
msg2(m) = msgsize
end do
cur_rank = get_rank_send(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
msgsize = 0
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = update_y%send(ind_y)%pe
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if( send(dir) ) then
is = update_y%send(ind_y)%is(n); ie = update_y%send(ind_y)%ie(n)
js = update_y%send(ind_y)%js(n); je = update_y%send(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = update_y%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
call mpp_send( msgsize, plen=1, to_pe=to_pe, tag=COMM_TAG_1)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_updateV: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_updateV: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
buffer_pos = 0
cur_rank = get_rank_recv(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y)
msgsize = 0
select case(gridtype)
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*2
ind_x = ind_x+1
ind_y = ind_x
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = update_y%recv(ind_y)%pe
do n = 1, update_y%recv(ind_y)%count
dir = update_y%recv(ind_y)%dir(n)
if(recv(dir)) then
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = update_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
end select
cur_rank = max(rank_x, rank_y)
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
!unpacking
buffer_pos = buffer_recv_size
cur_rank = get_rank_unpack(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x > 0 .OR. ind_y > 0)
pos = buffer_pos
select case ( gridtype )
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
do n = update_x%recv(ind_x)%count, 1, -1
dir = update_x%recv(ind_x)%dir(n)
if( recv(dir) ) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*2*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1, l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 2
buffer(pos-1) = fieldx(i,j,k)
buffer(pos) = fieldy(i,j,k)
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end do
end do
end if ! end if( recv(dir) )
end do ! do dir=8,1,-1
ind_x = ind_x-1
ind_y = ind_x
if(ind_x .GT. 0) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_y) then
do n = update_y%recv(ind_y)%count, 1, -1
dir = update_y%recv(ind_y)%dir(n)
if( recv(dir) ) then
tMe = update_y%recv(ind_y)%tileMe(n)
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
fieldy(i,j,k) = 0.
end do
end do
end do
end do
end if
end do
ind_y = ind_y-1
if(ind_y .GT. 0) then
rank_y = update_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
if(cur_rank == rank_x) then
do n = update_x%recv(ind_x)%count, 1, -1
dir = update_x%recv(ind_x)%dir(n)
if( recv(dir) ) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
fieldx(i,j,k) = 0.
end do
end do
end do
end do
end if
end do
ind_x = ind_x-1
if(ind_x .GT. 0) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
end select
cur_rank = min(rank_x, rank_y)
end do
! ---northern boundary fold
shift = 0
if(domain%symmetry) shift = 1
if( BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then
j = domain%y(1)%global%end+shift
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
j = domain%y(1)%global%end+shift
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
if( .NOT. domain%symmetry ) is = is - 1
do i = is ,ie, midpoint
if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%end+shift
if ( recv(7) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
if(domain%symmetry) then
is = domain%x(1)%global%begin
else
is = domain%x(1)%global%begin - 1
end if
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-north BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = domain%x(1)%data%begin,is-1
fieldx(2*is-i,j,k) = fieldx(2*is-i,j,k) + fieldx(i,j,k)
fieldy(2*is-i,j,k) = fieldy(2*is-i,j,k) + fieldy(i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-north CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = domain%x(1)%data%begin,is-1
fieldy(2*is-i-1,j,k) = fieldy(2*is-i-1,j,k) + fieldy(i,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
ie = domain%x(1)%data%end
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,SOUTH) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then ! ---southern boundary fold
! NOTE: symmetry is assumed for fold-south boundary
j = domain%y(1)%global%begin
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
j = domain%y(1)%global%begin
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
do i = is ,ie, midpoint
if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%begin
if ( recv(3) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-south BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = domain%x(1)%data%begin,is-1
fieldx(2*is-i,j,k) = fieldx(2*is-i,j,k) + fieldx(i,j,k)
fieldy(2*is-i,j,k) = fieldy(2*is-i,j,k) + fieldy(i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-south CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = domain%x(1)%data%begin,is-1
fieldy(2*is-i-1,j,k) = fieldy(2*is-i-1,j,k) + fieldy(i,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
ie = domain%x(1)%data%end
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,WEST) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then ! ---eastern boundary fold
! NOTE: symmetry is assumed for fold-west boundary
i = domain%x(1)%global%begin
if( domain%x(1)%data%begin.LE.i .AND. i.LE.domain%x(1)%data%end+shift )then !fold is within domain
midpoint = (domain%y(1)%global%begin+domain%y(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
i = domain%x(1)%global%begin
js = domain%y(1)%global%begin; je = domain%y(1)%global%end+shift
do j = js ,je, midpoint
if( domain%y(1)%data%begin.LE.j .AND. j.LE. domain%y(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off south edge, when update south or west direction
i = domain%x(1)%global%begin
if ( recv(3) .OR. recv(5) ) then
select case(gridtype)
case(BGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin.GT.domain%y(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-west BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i,2*js-j,k) = fieldx(i,2*js-j,k) + fieldx(i,j,k)
fieldy(i,2*js-j,k) = fieldy(i,2*js-j,k) + fieldy(i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin-1.GT.domain%y(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-west CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i, 2*js-j-1,k) = fieldx(i, 2*js-j-1,k) + fieldx(i,j,k)
end do
end do
end do
end if
end select
end if
!off north edge
js = domain%y(1)%global%end
if(domain%y(1)%cyclic .AND. js.LT.domain%y(1)%data%end )then
je = domain%y(1)%data%end
js = js + 1
select case(gridtype)
case(BGRID_NE)
js = js + shift
je = je + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = js,je
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = js, je
fieldx(i,j,k) = -fieldx(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,EAST) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then ! ---eastern boundary fold
! NOTE: symmetry is assumed for fold-west boundary
i = domain%x(1)%global%end+shift
if( domain%x(1)%data%begin.LE.i .AND. i.LE.domain%x(1)%data%end+shift )then !fold is within domain
midpoint = (domain%y(1)%global%begin+domain%y(1)%global%end-1+shift)/2
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
i = domain%x(1)%global%end+shift
js = domain%y(1)%global%begin; je = domain%y(1)%global%end+shift
do j = js ,je, midpoint
if( domain%y(1)%data%begin.LE.j .AND. j.LE. domain%y(1)%data%end+shift )then
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off south edge, when update south or west direction
i = domain%x(1)%global%end+shift
if ( recv(3) .OR. recv(1) ) then
select case(gridtype)
case(BGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin.GT.domain%y(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-east BGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i,2*js-j,k) = fieldx(i,2*js-j,k) + fieldx(i,j,k)
fieldy(i,2*js-j,k) = fieldy(i,2*js-j,k) + fieldy(i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
js = domain%y(1)%global%begin
if( js.GT.domain%y(1)%data%begin )then
if( 2*js-domain%y(1)%data%begin-1.GT.domain%y(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_AD_V: folded-east CGRID_NE west edge ubound error.' )
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = domain%y(1)%data%begin,js-1
fieldx(i, 2*js-j-1,k) = fieldx(i, 2*js-j-1,k) + fieldx(i,j,k)
end do
end do
end do
end if
end select
end if
!off north edge
js = domain%y(1)%global%end
if(domain%y(1)%cyclic .AND. js.LT.domain%y(1)%data%end )then
je = domain%y(1)%data%end
js = js + 1
select case(gridtype)
case(BGRID_NE)
js = js + shift
je = je + shift
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
ptr_fieldy = f_addrsy(l, 1)
do k = 1,ke
do j = js,je
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,l_size
ptr_fieldx = f_addrsx(l, 1)
do k = 1,ke
do j = js, je
fieldx(i,j,k) = -fieldx(i,j,k)
end do
end do
end do
end select
end if
end if
end if
!unpacking done
!--- recv
buffer_pos = 0
cur_rank = get_rank_recv(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y)
msgsize = 0
select case(gridtype)
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
tMe = update_x%recv(ind_x)%tileMe(n)
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*2
ind_x = ind_x+1
ind_y = ind_x
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_x) then
from_pe = update_x%recv(ind_x)%pe
do n = 1, update_x%recv(ind_x)%count
dir = update_x%recv(ind_x)%dir(n)
if(recv(dir)) then
is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = update_x%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = update_y%recv(ind_y)%pe
do n = 1, update_y%recv(ind_y)%count
dir = update_y%recv(ind_y)%dir(n)
if(recv(dir)) then
is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = update_y%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
end select
cur_rank = max(rank_x, rank_y)
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=from_pe, tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
cur_rank = get_rank_send(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
pos = buffer_pos
!--- make sure the domain stack size is big enough
msgsize = 0
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) msgsize = msgsize + update_x%send(ind_x)%msgsize(n)
enddo
endif
if(cur_rank == rank_y) then
to_pe = update_y%send(ind_y)%pe
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if( send(dir) ) msgsize = msgsize + update_y%send(ind_y)%msgsize(n)
enddo
endif
select case( gridtype )
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
ind_x = ind_x+1
ind_y = ind_x
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = update_y%send(ind_y)%pe
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = update_y%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
end select
cur_rank = min(rank_x, rank_y)
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
end if
if( msgsize.GT.0 )then
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=to_pe, block=.false., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
enddo
call mpp_sync_self(check=EVENT_RECV)
!--- send
buffer_pos = buffer_recv_size
pos = buffer_pos
cur_rank = get_rank_send(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
pos = buffer_pos
select case( gridtype )
case(BGRID_NE, BGRID_SW, AGRID)
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
tMe = update_x%send(ind_x)%tileMe(n)
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
select case( update_x%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 2
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos-1)
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos)
end do
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 2
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos-1)
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 2
fieldy(i,j,k)=fieldy(i,j,k)-buffer(pos-1)
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos)
end do
end do
end do
end do
end if
case( NINETY )
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 2
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos-1)
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 2
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos-1)
fieldx(i,j,k)=fieldx(i,j,k)-buffer(pos)
end do
end do
end do
end do
end if
case( ONE_HUNDRED_EIGHTY )
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 2
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos-1)
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l,tMe)
ptr_fieldy = f_addrsy(l,tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 2
fieldx(i,j,k)=fieldx(i,j,k)-buffer(pos-1)
fieldy(i,j,k)=fieldy(i,j,k)-buffer(pos)
end do
end do
end do
end do
end if
end select ! select case( rotation(n) )
end if ! if( send(dir) )
end do ! do n = 1, update_x%send(ind_x)%count
ind_x = ind_x+1
ind_y = ind_x
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
rank_y = rank_x
endif
case(CGRID_NE, CGRID_SW)
if(cur_rank == rank_x) then
to_pe = update_x%send(ind_x)%pe
do n = 1, update_x%send(ind_x)%count
dir = update_x%send(ind_x)%dir(n)
if( send(dir) ) then
tMe = update_x%send(ind_x)%tileMe(n)
is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
select case( update_x%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos)
end do
end do
end do
end do
case(MINUS_NINETY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
fieldy(i,j,k)=fieldy(i,j,k)-buffer(pos)
end do
end do
end do
end do
end if
case(NINETY)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos)
end do
end do
end do
end do
case(ONE_HUNDRED_EIGHTY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldx(i,j,k)=fieldx(i,j,k)-buffer(pos)
end do
end do
end do
end do
end if
end select
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = update_x%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = update_y%send(ind_y)%pe
do n = 1, update_y%send(ind_y)%count
dir = update_y%send(ind_y)%dir(n)
if( send(dir) ) then
tMe = update_y%send(ind_y)%tileMe(n)
is = update_y%send(ind_y)%is(n); ie = update_y%send(ind_y)%ie(n)
js = update_y%send(ind_y)%js(n); je = update_y%send(ind_y)%je(n)
select case( update_y%send(ind_y)%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos)
end do
end do
end do
end do
case(MINUS_NINETY)
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos)
end do
end do
end do
end do
case(NINETY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
fieldx(i,j,k)=fieldx(i,j,k)+buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
fieldx(i,j,k)=fieldx(i,j,k)-buffer(pos)
end do
end do
end do
end do
end if
case(ONE_HUNDRED_EIGHTY)
if( BTEST(update_flags,SCALAR_BIT) ) then
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldy(i,j,k)=fieldy(i,j,k)+buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size ! loop over number of fields
ptr_fieldx = f_addrsx(l, tMe)
ptr_fieldy = f_addrsy(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldy(i,j,k)=fieldy(i,j,k)-buffer(pos)
end do
end do
end do
end do
end if
end select
endif
enddo
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = update_y%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
end select
cur_rank = min(rank_x, rank_y)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
buffer_pos = pos
end if
end do
call mpp_sync_self( )
return
end subroutine mpp_do_update_ad_r4_3dv
# 1573 "../mpp/include/mpp_domains_misc.inc" 2
# 1583
# 1 "../mpp/include/mpp_do_update_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_update_ad_i4_3d( f_addrs, domain, update, d_type, ke, flags)
!updates data domain of 3D field whose computational domains have been computed
integer(8), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: update
integer(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: ke
integer, optional, intent(in) :: flags
integer(4) :: field(update%xbegin:update%xend, update%ybegin:update%yend,ke)
pointer(ptr_field, field)
integer :: update_flags
type(overlap_type), pointer :: overPtr => NULL()
character(len=8) :: text
!equate to mpp_domains_stack
integer(4) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos
!receive domains saved here for unpacking
!for non-blocking version, could be recomputed
integer, allocatable :: msg1(:), msg2(:)
logical :: send(8), recv(8), update_edge_only
integer :: to_pe, from_pe, pos, msgsize, msgsize_send
integer :: n, l_size, l, m, i, j, k
integer :: is, ie, js, je, tMe, dir
integer :: start, start1, start2, index, is1, ie1, js1, je1, ni, nj, total
integer :: buffer_recv_size, nlist, outunit
integer :: send_start_pos
integer :: send_msgsize(MAXLIST)
outunit = stdout()
ptr = LOC(mpp_domains_stack)
l_size = size(f_addrs,1)
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
update_edge_only = BTEST(update_flags, EDGEONLY)
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
send = recv
if(debug_message_passing) then
nlist = size(domain%list(:))
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
from_pe = update%recv(m)%pe
l = from_pe-mpp_root_pe()
call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
msg2(l) = msgsize
enddo
do m = 1, update%nsend
overPtr => update%send(m)
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(send(dir)) then
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
call mpp_send( msgsize, plen=1, to_pe=overPtr%pe, tag=COMM_TAG_1 )
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_update: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_update: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
!recv
buffer_pos = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
msgsize_send = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos + msgsize_send
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = ke,1,-1
do j = je, js, -1
do i = ie, is, -1
buffer(pos) = field(i,j,k)
field(i,j,k) = 0.
pos = pos - 1
end do
end do
end do
end do
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
to_pe = overPtr%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do ! end do m = 1, update%nrecv
buffer_recv_size = buffer_pos
! send
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
msgsize_send = msgsize
from_pe = overPtr%pe
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do ! end do ist = 0,nlist-1
call mpp_sync_self(check=EVENT_RECV)
buffer_pos = buffer_recv_size
! send
do m = 1, update%nsend
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
msgsize = 0
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
buffer_pos = pos
end if
do n = 1, overPtr%count
dir = overPtr%dir(n)
if( send(dir) ) then
tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
select case( overPtr%rotation(n) )
case(ZERO)
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = is, ie
do j = je, js, -1
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do i = ie, is, -1
do j = js, je
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,l_size ! loop over number of fields
ptr_field = f_addrs(l, tMe)
do k = 1,ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
field(i,j,k)=field(i,j,k)+buffer(pos)
end do
end do
end do
end do
end select
endif
end do ! do n = 1, overPtr%count
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
buffer_pos = pos
end if
end do ! end do ist = 0,nlist-1
call mpp_sync_self()
return
end subroutine mpp_do_update_ad_i4_3d
# 1590 "../mpp/include/mpp_domains_misc.inc" 2
!!$#undef
!!$#define
!!$#undef integer(4)
!!$#define integer(4) real(8)
!!$#undef mpp_do_update_ad_i4_3d
!!$#define mpp_do_update_ad_i4_3d mpp_do_update_ad_r8_3d
!!$#ifdef
!!$#undef mpp_do_update_ad_r4_3dv
!!$#define mpp_do_update_ad_r4_3dv mpp_do_update_ad_r8_3dv
!!$#endif
!!$#include
!!$#include
!!$#undef
!!$
!!$#ifdef OVERLOAD_C8
!!$#undef integer(4)
!!$#define integer(4) complex(8)
!!$#undef mpp_do_update_ad_i4_3d
!!$#define mpp_do_update_ad_i4_3d mpp_do_update_ad_c8_3d
!!$#include
!!$#endif
!!$
!!$#ifndef no_8byte_integers
!!$#undef integer(4)
!!$#define integer(4) integer(8)
!!$#undef mpp_do_update_ad_i4_3d
!!$#define mpp_do_update_ad_i4_3d mpp_do_update_ad_i8_3d
!!$#include
!!$#endif
!!$
!!$#ifdef 1
!!$#undef
!!$#define
!!$#undef integer(4)
!!$#define integer(4) real(4)
!!$#undef mpp_do_update_ad_i4_3d
!!$#define mpp_do_update_ad_i4_3d mpp_do_update_ad_r4_3d
!!$#ifdef
!!$#undef mpp_do_update_ad_r4_3dv
!!$#define mpp_do_update_ad_r4_3dv mpp_do_update_ad_r4_3dv
!!$#endif
!!$#include
!!$#include
!!$#endif
!!$
!!$#ifdef OVERLOAD_C4
!!$#undef
!!$#undef integer(4)
!!$#define integer(4) complex(4)
!!$#undef mpp_do_update_ad_i4_3d
!!$#define mpp_do_update_ad_i4_3d mpp_do_update_ad_c4_3d
!!$#include
!!$#endif
!!$
!!$#undef integer(4)
!!$#define integer(4) integer(4)
!!$#undef mpp_do_update_ad_i4_3d
!!$#define mpp_do_update_ad_i4_3d mpp_do_update_ad_i4_3d
!!$#include
!bnc
!********************************************************
# 1 "../mpp/include/mpp_do_redistribute.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_redistribute_r8_3D( f_in, f_out, d_comm, d_type )
integer(8), intent(in) :: f_in(:), f_out(:)
type(DomainCommunicator2D), intent(in) :: d_comm
real(8), intent(in) :: d_type
real(8) :: field_in(d_comm%domain_in%x(1)%data%begin:d_comm%domain_in%x(1)%data%end, &
d_comm%domain_in%y(1)%data%begin:d_comm%domain_in%y(1)%data%end,d_comm%ke)
pointer( ptr_field_in, field_in)
real(8) :: field_out(d_comm%domain_out%x(1)%data%begin:d_comm%domain_out%x(1)%data%end, &
d_comm%domain_out%y(1)%data%begin:d_comm%domain_out%y(1)%data%end,d_comm%ke)
pointer( ptr_field_out, field_out)
type(domain2D), pointer :: domain_in, domain_out
integer :: i, j, k, l, n, l_size
integer :: is, ie, js, je
integer :: ke
integer :: list, pos, msgsize
integer :: to_pe, from_pe
real(8) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos, wordlen, errunit
!fix ke
errunit = stderr()
l_size = size(f_out(:)) ! equal to size(f_in(:))
ke = d_comm%ke
domain_in =>d_comm%domain_in; domain_out =>d_comm%domain_out
buffer_pos = 0
ptr = LOC(mpp_domains_stack)
wordlen = size(TRANSFER(buffer(1),mpp_domains_stack))
!pre-post recv
n = d_comm%Rlist_size
do list = 0,n-1
if( .NOT. d_comm%R_do_buf(list) )cycle
from_pe = d_comm%cfrom_pe(list)
msgsize = d_comm%R_msize(list)*l_size
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
enddo
!send
n = d_comm%Slist_size
do list = 0,n-1
if( .NOT. d_comm%S_do_buf(list) )cycle
to_pe = d_comm%cto_pe(list)
is=d_comm%sendis(1,list); ie=d_comm%sendie(1,list)
js=d_comm%sendjs(1,list); je=d_comm%sendje(1,list)
pos = buffer_pos
do l=1,l_size ! loop over number of fields
ptr_field_in = f_in(l)
do k = 1,ke
do j = js,je
do i = is,ie
pos = pos+1
buffer(pos) = field_in(i,j,k)
end do
end do
end do
end do
if( debug )write( errunit,* )'PE', pe, ' to PE ', to_pe, 'is,ie,js,je=', is, ie, js, je
msgsize = pos - buffer_pos
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = pos
end do
call mpp_sync_self(check=EVENT_RECV)
!unpack buffer
buffer_pos = 0
n = d_comm%Rlist_size
do list = 0,n-1
if( .NOT. d_comm%R_do_buf(list) )cycle
from_pe = d_comm%cfrom_pe(list)
is=d_comm%recvis(1,list); ie=d_comm%recvie(1,list)
js=d_comm%recvjs(1,list); je=d_comm%recvje(1,list)
if( debug )write( errunit,* )'PE', pe, ' from PE ', from_pe, 'is,ie,js,je=', is, ie, js, je
pos = buffer_pos
do l=1,l_size ! loop over number of in/out fields
ptr_field_out = f_out(l)
do k = 1,ke
do j = js,je
do i = is,ie
pos = pos+1
field_out(i,j,k) = buffer(pos)
end do
end do
end do
end do
buffer_pos = pos
end do
call mpp_sync_self()
end subroutine mpp_do_redistribute_r8_3D
# 1659 "../mpp/include/mpp_domains_misc.inc" 2
# 1667
# 1 "../mpp/include/mpp_do_redistribute.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_redistribute_i8_3D( f_in, f_out, d_comm, d_type )
integer(8), intent(in) :: f_in(:), f_out(:)
type(DomainCommunicator2D), intent(in) :: d_comm
integer(8), intent(in) :: d_type
integer(8) :: field_in(d_comm%domain_in%x(1)%data%begin:d_comm%domain_in%x(1)%data%end, &
d_comm%domain_in%y(1)%data%begin:d_comm%domain_in%y(1)%data%end,d_comm%ke)
pointer( ptr_field_in, field_in)
integer(8) :: field_out(d_comm%domain_out%x(1)%data%begin:d_comm%domain_out%x(1)%data%end, &
d_comm%domain_out%y(1)%data%begin:d_comm%domain_out%y(1)%data%end,d_comm%ke)
pointer( ptr_field_out, field_out)
type(domain2D), pointer :: domain_in, domain_out
integer :: i, j, k, l, n, l_size
integer :: is, ie, js, je
integer :: ke
integer :: list, pos, msgsize
integer :: to_pe, from_pe
integer(8) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos, wordlen, errunit
!fix ke
errunit = stderr()
l_size = size(f_out(:)) ! equal to size(f_in(:))
ke = d_comm%ke
domain_in =>d_comm%domain_in; domain_out =>d_comm%domain_out
buffer_pos = 0
ptr = LOC(mpp_domains_stack)
wordlen = size(TRANSFER(buffer(1),mpp_domains_stack))
!pre-post recv
n = d_comm%Rlist_size
do list = 0,n-1
if( .NOT. d_comm%R_do_buf(list) )cycle
from_pe = d_comm%cfrom_pe(list)
msgsize = d_comm%R_msize(list)*l_size
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
enddo
!send
n = d_comm%Slist_size
do list = 0,n-1
if( .NOT. d_comm%S_do_buf(list) )cycle
to_pe = d_comm%cto_pe(list)
is=d_comm%sendis(1,list); ie=d_comm%sendie(1,list)
js=d_comm%sendjs(1,list); je=d_comm%sendje(1,list)
pos = buffer_pos
do l=1,l_size ! loop over number of fields
ptr_field_in = f_in(l)
do k = 1,ke
do j = js,je
do i = is,ie
pos = pos+1
buffer(pos) = field_in(i,j,k)
end do
end do
end do
end do
if( debug )write( errunit,* )'PE', pe, ' to PE ', to_pe, 'is,ie,js,je=', is, ie, js, je
msgsize = pos - buffer_pos
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = pos
end do
call mpp_sync_self(check=EVENT_RECV)
!unpack buffer
buffer_pos = 0
n = d_comm%Rlist_size
do list = 0,n-1
if( .NOT. d_comm%R_do_buf(list) )cycle
from_pe = d_comm%cfrom_pe(list)
is=d_comm%recvis(1,list); ie=d_comm%recvie(1,list)
js=d_comm%recvjs(1,list); je=d_comm%recvje(1,list)
if( debug )write( errunit,* )'PE', pe, ' from PE ', from_pe, 'is,ie,js,je=', is, ie, js, je
pos = buffer_pos
do l=1,l_size ! loop over number of in/out fields
ptr_field_out = f_out(l)
do k = 1,ke
do j = js,je
do i = is,ie
pos = pos+1
field_out(i,j,k) = buffer(pos)
end do
end do
end do
end do
buffer_pos = pos
end do
call mpp_sync_self()
end subroutine mpp_do_redistribute_i8_3D
# 1675 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_redistribute.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_redistribute_l8_3D( f_in, f_out, d_comm, d_type )
integer(8), intent(in) :: f_in(:), f_out(:)
type(DomainCommunicator2D), intent(in) :: d_comm
logical(8), intent(in) :: d_type
logical(8) :: field_in(d_comm%domain_in%x(1)%data%begin:d_comm%domain_in%x(1)%data%end, &
d_comm%domain_in%y(1)%data%begin:d_comm%domain_in%y(1)%data%end,d_comm%ke)
pointer( ptr_field_in, field_in)
logical(8) :: field_out(d_comm%domain_out%x(1)%data%begin:d_comm%domain_out%x(1)%data%end, &
d_comm%domain_out%y(1)%data%begin:d_comm%domain_out%y(1)%data%end,d_comm%ke)
pointer( ptr_field_out, field_out)
type(domain2D), pointer :: domain_in, domain_out
integer :: i, j, k, l, n, l_size
integer :: is, ie, js, je
integer :: ke
integer :: list, pos, msgsize
integer :: to_pe, from_pe
logical(8) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos, wordlen, errunit
!fix ke
errunit = stderr()
l_size = size(f_out(:)) ! equal to size(f_in(:))
ke = d_comm%ke
domain_in =>d_comm%domain_in; domain_out =>d_comm%domain_out
buffer_pos = 0
ptr = LOC(mpp_domains_stack)
wordlen = size(TRANSFER(buffer(1),mpp_domains_stack))
!pre-post recv
n = d_comm%Rlist_size
do list = 0,n-1
if( .NOT. d_comm%R_do_buf(list) )cycle
from_pe = d_comm%cfrom_pe(list)
msgsize = d_comm%R_msize(list)*l_size
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
enddo
!send
n = d_comm%Slist_size
do list = 0,n-1
if( .NOT. d_comm%S_do_buf(list) )cycle
to_pe = d_comm%cto_pe(list)
is=d_comm%sendis(1,list); ie=d_comm%sendie(1,list)
js=d_comm%sendjs(1,list); je=d_comm%sendje(1,list)
pos = buffer_pos
do l=1,l_size ! loop over number of fields
ptr_field_in = f_in(l)
do k = 1,ke
do j = js,je
do i = is,ie
pos = pos+1
buffer(pos) = field_in(i,j,k)
end do
end do
end do
end do
if( debug )write( errunit,* )'PE', pe, ' to PE ', to_pe, 'is,ie,js,je=', is, ie, js, je
msgsize = pos - buffer_pos
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = pos
end do
call mpp_sync_self(check=EVENT_RECV)
!unpack buffer
buffer_pos = 0
n = d_comm%Rlist_size
do list = 0,n-1
if( .NOT. d_comm%R_do_buf(list) )cycle
from_pe = d_comm%cfrom_pe(list)
is=d_comm%recvis(1,list); ie=d_comm%recvie(1,list)
js=d_comm%recvjs(1,list); je=d_comm%recvje(1,list)
if( debug )write( errunit,* )'PE', pe, ' from PE ', from_pe, 'is,ie,js,je=', is, ie, js, je
pos = buffer_pos
do l=1,l_size ! loop over number of in/out fields
ptr_field_out = f_out(l)
do k = 1,ke
do j = js,je
do i = is,ie
pos = pos+1
field_out(i,j,k) = buffer(pos)
end do
end do
end do
end do
buffer_pos = pos
end do
call mpp_sync_self()
end subroutine mpp_do_redistribute_l8_3D
# 1681 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_redistribute.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_redistribute_r4_3D( f_in, f_out, d_comm, d_type )
integer(8), intent(in) :: f_in(:), f_out(:)
type(DomainCommunicator2D), intent(in) :: d_comm
real(4), intent(in) :: d_type
real(4) :: field_in(d_comm%domain_in%x(1)%data%begin:d_comm%domain_in%x(1)%data%end, &
d_comm%domain_in%y(1)%data%begin:d_comm%domain_in%y(1)%data%end,d_comm%ke)
pointer( ptr_field_in, field_in)
real(4) :: field_out(d_comm%domain_out%x(1)%data%begin:d_comm%domain_out%x(1)%data%end, &
d_comm%domain_out%y(1)%data%begin:d_comm%domain_out%y(1)%data%end,d_comm%ke)
pointer( ptr_field_out, field_out)
type(domain2D), pointer :: domain_in, domain_out
integer :: i, j, k, l, n, l_size
integer :: is, ie, js, je
integer :: ke
integer :: list, pos, msgsize
integer :: to_pe, from_pe
real(4) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos, wordlen, errunit
!fix ke
errunit = stderr()
l_size = size(f_out(:)) ! equal to size(f_in(:))
ke = d_comm%ke
domain_in =>d_comm%domain_in; domain_out =>d_comm%domain_out
buffer_pos = 0
ptr = LOC(mpp_domains_stack)
wordlen = size(TRANSFER(buffer(1),mpp_domains_stack))
!pre-post recv
n = d_comm%Rlist_size
do list = 0,n-1
if( .NOT. d_comm%R_do_buf(list) )cycle
from_pe = d_comm%cfrom_pe(list)
msgsize = d_comm%R_msize(list)*l_size
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
enddo
!send
n = d_comm%Slist_size
do list = 0,n-1
if( .NOT. d_comm%S_do_buf(list) )cycle
to_pe = d_comm%cto_pe(list)
is=d_comm%sendis(1,list); ie=d_comm%sendie(1,list)
js=d_comm%sendjs(1,list); je=d_comm%sendje(1,list)
pos = buffer_pos
do l=1,l_size ! loop over number of fields
ptr_field_in = f_in(l)
do k = 1,ke
do j = js,je
do i = is,ie
pos = pos+1
buffer(pos) = field_in(i,j,k)
end do
end do
end do
end do
if( debug )write( errunit,* )'PE', pe, ' to PE ', to_pe, 'is,ie,js,je=', is, ie, js, je
msgsize = pos - buffer_pos
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = pos
end do
call mpp_sync_self(check=EVENT_RECV)
!unpack buffer
buffer_pos = 0
n = d_comm%Rlist_size
do list = 0,n-1
if( .NOT. d_comm%R_do_buf(list) )cycle
from_pe = d_comm%cfrom_pe(list)
is=d_comm%recvis(1,list); ie=d_comm%recvie(1,list)
js=d_comm%recvjs(1,list); je=d_comm%recvje(1,list)
if( debug )write( errunit,* )'PE', pe, ' from PE ', from_pe, 'is,ie,js,je=', is, ie, js, je
pos = buffer_pos
do l=1,l_size ! loop over number of in/out fields
ptr_field_out = f_out(l)
do k = 1,ke
do j = js,je
do i = is,ie
pos = pos+1
field_out(i,j,k) = buffer(pos)
end do
end do
end do
end do
buffer_pos = pos
end do
call mpp_sync_self()
end subroutine mpp_do_redistribute_r4_3D
# 1689 "../mpp/include/mpp_domains_misc.inc" 2
# 1698
# 1 "../mpp/include/mpp_do_redistribute.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_redistribute_i4_3D( f_in, f_out, d_comm, d_type )
integer(8), intent(in) :: f_in(:), f_out(:)
type(DomainCommunicator2D), intent(in) :: d_comm
integer(4), intent(in) :: d_type
integer(4) :: field_in(d_comm%domain_in%x(1)%data%begin:d_comm%domain_in%x(1)%data%end, &
d_comm%domain_in%y(1)%data%begin:d_comm%domain_in%y(1)%data%end,d_comm%ke)
pointer( ptr_field_in, field_in)
integer(4) :: field_out(d_comm%domain_out%x(1)%data%begin:d_comm%domain_out%x(1)%data%end, &
d_comm%domain_out%y(1)%data%begin:d_comm%domain_out%y(1)%data%end,d_comm%ke)
pointer( ptr_field_out, field_out)
type(domain2D), pointer :: domain_in, domain_out
integer :: i, j, k, l, n, l_size
integer :: is, ie, js, je
integer :: ke
integer :: list, pos, msgsize
integer :: to_pe, from_pe
integer(4) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos, wordlen, errunit
!fix ke
errunit = stderr()
l_size = size(f_out(:)) ! equal to size(f_in(:))
ke = d_comm%ke
domain_in =>d_comm%domain_in; domain_out =>d_comm%domain_out
buffer_pos = 0
ptr = LOC(mpp_domains_stack)
wordlen = size(TRANSFER(buffer(1),mpp_domains_stack))
!pre-post recv
n = d_comm%Rlist_size
do list = 0,n-1
if( .NOT. d_comm%R_do_buf(list) )cycle
from_pe = d_comm%cfrom_pe(list)
msgsize = d_comm%R_msize(list)*l_size
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
enddo
!send
n = d_comm%Slist_size
do list = 0,n-1
if( .NOT. d_comm%S_do_buf(list) )cycle
to_pe = d_comm%cto_pe(list)
is=d_comm%sendis(1,list); ie=d_comm%sendie(1,list)
js=d_comm%sendjs(1,list); je=d_comm%sendje(1,list)
pos = buffer_pos
do l=1,l_size ! loop over number of fields
ptr_field_in = f_in(l)
do k = 1,ke
do j = js,je
do i = is,ie
pos = pos+1
buffer(pos) = field_in(i,j,k)
end do
end do
end do
end do
if( debug )write( errunit,* )'PE', pe, ' to PE ', to_pe, 'is,ie,js,je=', is, ie, js, je
msgsize = pos - buffer_pos
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = pos
end do
call mpp_sync_self(check=EVENT_RECV)
!unpack buffer
buffer_pos = 0
n = d_comm%Rlist_size
do list = 0,n-1
if( .NOT. d_comm%R_do_buf(list) )cycle
from_pe = d_comm%cfrom_pe(list)
is=d_comm%recvis(1,list); ie=d_comm%recvie(1,list)
js=d_comm%recvjs(1,list); je=d_comm%recvje(1,list)
if( debug )write( errunit,* )'PE', pe, ' from PE ', from_pe, 'is,ie,js,je=', is, ie, js, je
pos = buffer_pos
do l=1,l_size ! loop over number of in/out fields
ptr_field_out = f_out(l)
do k = 1,ke
do j = js,je
do i = is,ie
pos = pos+1
field_out(i,j,k) = buffer(pos)
end do
end do
end do
end do
buffer_pos = pos
end do
call mpp_sync_self()
end subroutine mpp_do_redistribute_i4_3D
# 1705 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_redistribute.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_redistribute_l4_3D( f_in, f_out, d_comm, d_type )
integer(8), intent(in) :: f_in(:), f_out(:)
type(DomainCommunicator2D), intent(in) :: d_comm
logical(4), intent(in) :: d_type
logical(4) :: field_in(d_comm%domain_in%x(1)%data%begin:d_comm%domain_in%x(1)%data%end, &
d_comm%domain_in%y(1)%data%begin:d_comm%domain_in%y(1)%data%end,d_comm%ke)
pointer( ptr_field_in, field_in)
logical(4) :: field_out(d_comm%domain_out%x(1)%data%begin:d_comm%domain_out%x(1)%data%end, &
d_comm%domain_out%y(1)%data%begin:d_comm%domain_out%y(1)%data%end,d_comm%ke)
pointer( ptr_field_out, field_out)
type(domain2D), pointer :: domain_in, domain_out
integer :: i, j, k, l, n, l_size
integer :: is, ie, js, je
integer :: ke
integer :: list, pos, msgsize
integer :: to_pe, from_pe
logical(4) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
integer :: buffer_pos, wordlen, errunit
!fix ke
errunit = stderr()
l_size = size(f_out(:)) ! equal to size(f_in(:))
ke = d_comm%ke
domain_in =>d_comm%domain_in; domain_out =>d_comm%domain_out
buffer_pos = 0
ptr = LOC(mpp_domains_stack)
wordlen = size(TRANSFER(buffer(1),mpp_domains_stack))
!pre-post recv
n = d_comm%Rlist_size
do list = 0,n-1
if( .NOT. d_comm%R_do_buf(list) )cycle
from_pe = d_comm%cfrom_pe(list)
msgsize = d_comm%R_msize(list)*l_size
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
enddo
!send
n = d_comm%Slist_size
do list = 0,n-1
if( .NOT. d_comm%S_do_buf(list) )cycle
to_pe = d_comm%cto_pe(list)
is=d_comm%sendis(1,list); ie=d_comm%sendie(1,list)
js=d_comm%sendjs(1,list); je=d_comm%sendje(1,list)
pos = buffer_pos
do l=1,l_size ! loop over number of fields
ptr_field_in = f_in(l)
do k = 1,ke
do j = js,je
do i = is,ie
pos = pos+1
buffer(pos) = field_in(i,j,k)
end do
end do
end do
end do
if( debug )write( errunit,* )'PE', pe, ' to PE ', to_pe, 'is,ie,js,je=', is, ie, js, je
msgsize = pos - buffer_pos
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = pos
end do
call mpp_sync_self(check=EVENT_RECV)
!unpack buffer
buffer_pos = 0
n = d_comm%Rlist_size
do list = 0,n-1
if( .NOT. d_comm%R_do_buf(list) )cycle
from_pe = d_comm%cfrom_pe(list)
is=d_comm%recvis(1,list); ie=d_comm%recvie(1,list)
js=d_comm%recvjs(1,list); je=d_comm%recvje(1,list)
if( debug )write( errunit,* )'PE', pe, ' from PE ', from_pe, 'is,ie,js,je=', is, ie, js, je
pos = buffer_pos
do l=1,l_size ! loop over number of in/out fields
ptr_field_out = f_out(l)
do k = 1,ke
do j = js,je
do i = is,ie
pos = pos+1
field_out(i,j,k) = buffer(pos)
end do
end do
end do
end do
buffer_pos = pos
end do
call mpp_sync_self()
end subroutine mpp_do_redistribute_l4_3D
# 1711 "../mpp/include/mpp_domains_misc.inc" 2
!#undef MPP_GET_BOUNDARY_4D_
!#define MPP_GET_BOUNDARY_4D_ mpp_get_boundary_r8_4d
!#undef MPP_GET_BOUNDARY_5D_
!#define MPP_GET_BOUNDARY_5D_ mpp_get_boundary_r8_5d
!#undef MPP_GET_BOUNDARY_4D_V_
!#define MPP_GET_BOUNDARY_4D_V_ mpp_get_boundary_r8_4dv
!#undef MPP_GET_BOUNDARY_5D_V_
!#define MPP_GET_BOUNDARY_5D_V_ mpp_get_boundary_r8_5dv
# 1 "../mpp/include/mpp_get_boundary.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
! this routine is used to retrieve scalar boundary data for symmetric domain.
subroutine mpp_get_boundary_r8_2d(field, domain, ebuffer, sbuffer, wbuffer, nbuffer, flags, &
position, complete, tile_count)
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(:,:)
real(8), intent(inout), optional :: ebuffer(:), sbuffer(:), wbuffer(:), nbuffer(:)
integer, intent(in), optional :: flags, position, tile_count
logical, intent(in), optional :: complete
real(8) :: field3D(size(field,1),size(field,2),1)
real(8), allocatable, dimension(:,:) :: ebuffer2D, sbuffer2D, wbuffer2D, nbuffer2D
integer :: xcount, ycount
integer :: ntile
logical :: need_ebuffer, need_sbuffer, need_wbuffer, need_nbuffer
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrs=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrs=-9999
integer, save :: bsize(4)=0, isize=0, jsize=0, ksize=0, pos, list=0, l_size=0, upflags
integer :: buffer_size(4)
integer :: max_ntile, tile, update_position, ishift, jshift
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(8) :: d_type
type(overlapSpec), pointer :: bound => NULL()
ntile = size(domain%x(:))
if(present(flags)) then
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_: flags is a dummy optional argument")
endif
update_position = CENTER
if(present(position)) update_position = position
!--- check if the buffer are needed
need_ebuffer=.false.; need_sbuffer=.false.; need_wbuffer=.false.; need_nbuffer=.false.
if( domain%symmetry .AND. PRESENT(position) ) then
select case(position)
case(CORNER)
need_ebuffer=.true.; need_sbuffer=.true.; need_wbuffer=.true.; need_nbuffer=.true.
case(NORTH)
need_sbuffer=.true.; need_nbuffer=.true.
case(EAST)
need_ebuffer=.true.; need_wbuffer=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
if(present(ebuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D: ebuffer should not be present when north is folded')
if(.not. need_ebuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: ebuffer should not be present')
b_addrs(1, list, tile) = LOC(ebuffer)
buffer_size(1) = size(ebuffer(:))
else
b_addrs(1, list, tile) = 0
buffer_size(1) = 1
end if
if(present(sbuffer)) then
if(.not. need_sbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: sbuffer should not be present')
b_addrs(2, list, tile) = LOC(sbuffer)
buffer_size(2) = size(sbuffer(:))
else
b_addrs(2, list, tile) = 0
buffer_size(2) = 1
end if
if(present(wbuffer)) then
if(.not. need_wbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: wbuffer should not be present')
b_addrs(3, list, tile) = LOC(wbuffer)
buffer_size(3) = size(wbuffer(:))
else
b_addrs(3, list, tile) = 0
buffer_size(3) = 1
end if
if(present(nbuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D: nbuffer should not be present when north is folded')
if(.not. need_nbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: nbuffer should be be present')
b_addrs(4, list, tile) = LOC(nbuffer)
buffer_size(4) = size(nbuffer(:))
else
b_addrs(4, list, tile) = 0
buffer_size(4) = 1
end if
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); ksize = 1; pos = update_position
bsize = buffer_size
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize .NE. size(field,1))
set_mismatch = set_mismatch .OR. (jsize .NE. size(field,2))
set_mismatch = set_mismatch .OR. ANY( bsize .NE. buffer_size )
set_mismatch = set_mismatch .OR. (update_position .NE. pos)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
!--- only non-center data in symmetry domain will be retrieved.
if(position == CENTER .OR. (.NOT. domain%symmetry) ) return
bound => search_bound_overlap(domain, update_position)
call mpp_get_domain_shift(domain, ishift, jshift, update_position)
if(size(field,1) .NE. domain%x(1)%memory%size+ishift .OR. size(field,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D: field is not on memory domain")
if(ASSOCIATED(bound)) then
call mpp_do_get_boundary(f_addrs(1:l_size,1:ntile), domain, bound, b_addrs(:,1:l_size,1:ntile), &
bsize, ksize, d_type)
endif
l_size=0; f_addrs=-9999; bsize=0; b_addrs=-9999; isize=0; jsize=0; ksize=0
end if
return
end subroutine mpp_get_boundary_r8_2d
!###############################################################################################
subroutine mpp_get_boundary_r8_3d(field, domain, ebuffer, sbuffer, wbuffer, nbuffer, flags, &
position, complete, tile_count)
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(:,:,:)
real(8), intent(inout), optional :: ebuffer(:,:), sbuffer(:,:), wbuffer(:,:), nbuffer(:,:)
integer, intent(in), optional :: flags, position, tile_count
logical, intent(in), optional :: complete
integer :: ntile
logical :: need_ebuffer, need_sbuffer, need_wbuffer, need_nbuffer
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrs=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrs=-9999
integer, save :: bsize(4)=0, isize=0, jsize=0, ksize=0, pos, list=0, l_size=0, upflags
integer :: buffer_size(4)
integer :: max_ntile, tile, update_position, ishift, jshift
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(8) :: d_type
type(overlapSpec), pointer :: bound => NULL()
ntile = size(domain%x(:))
if(present(flags)) then
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_: flags is a dummy optional argument")
endif
update_position = CENTER
if(present(position)) update_position = position
!--- check if the suitable buffer are present
need_ebuffer=.false.; need_sbuffer=.false.; need_wbuffer=.false.; need_nbuffer=.false.
if( domain%symmetry .AND. PRESENT(position) ) then
select case(position)
case(CORNER)
need_ebuffer=.true.; need_sbuffer=.true.; need_wbuffer=.true.; need_nbuffer=.true.
case(NORTH)
need_sbuffer=.true.; need_nbuffer=.true.
case(EAST)
need_ebuffer=.true.; need_wbuffer=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
if(present(ebuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D: ebuffer should not be present when north is folded')
if(.not. need_ebuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: ebuffer should not be present')
b_addrs(1, list, tile) = LOC(ebuffer)
buffer_size(1) = size(ebuffer,1)
else
b_addrs(1, list, tile) = 0
buffer_size(1) = 1
end if
if(present(sbuffer)) then
if(.not. need_sbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: sbuffer should not be present')
b_addrs(2, list, tile) = LOC(sbuffer)
buffer_size(2) = size(sbuffer,1)
else
b_addrs(2, list, tile) = 0
buffer_size(2) = 1
end if
if(present(wbuffer)) then
if(.not. need_wbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: wbuffer should not be present')
b_addrs(3, list, tile) = LOC(wbuffer)
buffer_size(3) = size(wbuffer,1)
else
b_addrs(3, list, tile) = 0
buffer_size(3) = 1
end if
if(present(nbuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D: nbuffer should not be present when north is folded')
if(.not. need_nbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: nbuffer should not be present')
b_addrs(4, list, tile) = LOC(nbuffer)
buffer_size(4) = size(nbuffer,1)
else
b_addrs(4, list, tile) = 0
buffer_size(4) = 1
end if
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); ksize = size(field,3); pos = update_position
bsize = buffer_size
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize .NE. size(field,1))
set_mismatch = set_mismatch .OR. (jsize .NE. size(field,2))
set_mismatch = set_mismatch .OR. (ksize .NE. size(field,3))
set_mismatch = set_mismatch .OR. ANY( bsize .NE. buffer_size )
set_mismatch = set_mismatch .OR. (update_position .NE. pos)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
!--- only non-center data in symmetry domain will be retrieved.
if(position == CENTER .OR. (.NOT. domain%symmetry) ) return
bound => search_bound_overlap(domain, update_position)
call mpp_get_domain_shift(domain, ishift, jshift, update_position)
if(size(field,1) .NE. domain%x(1)%memory%size+ishift .OR. size(field,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D: field is not on memory domain")
if(ASSOCIATED(bound)) then
call mpp_do_get_boundary(f_addrs(1:l_size,1:ntile), domain, bound, b_addrs(:,1:l_size,1:ntile), &
bsize, ksize, d_type)
endif
l_size=0; f_addrs=-9999; bsize=0; b_addrs=-9999; isize=0; jsize=0; ksize=0
end if
end subroutine mpp_get_boundary_r8_3d
!####################################################################
! vector update
subroutine mpp_get_boundary_r8_2dv(fieldx, fieldy, domain, ebufferx, sbufferx, wbufferx, nbufferx, &
ebuffery, sbuffery, wbuffery, nbuffery, flags, gridtype, &
complete, tile_count)
type(domain2D), intent(in) :: domain
real(8), intent(in) :: fieldx(:,:), fieldy(:,:)
real(8), intent(inout), optional :: ebufferx(:), sbufferx(:), wbufferx(:), nbufferx(:)
real(8), intent(inout), optional :: ebuffery(:), sbuffery(:), wbuffery(:), nbuffery(:)
integer, intent(in), optional :: flags, gridtype, tile_count
logical, intent(in), optional :: complete
integer :: ntile, update_flags
logical :: need_ebufferx, need_sbufferx, need_wbufferx, need_nbufferx
logical :: need_ebuffery, need_sbuffery, need_wbuffery, need_nbuffery
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsx=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsy=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsx=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsy=-9999
integer, save :: bsizex(4)=0, bsizey(4)=0, isize(2)=0, jsize(2)=0, ksize=0, l_size=0, list=0
integer, save :: offset_type, upflags
integer :: bufferx_size(4), buffery_size(4)
integer :: max_ntile, tile, grid_offset_type
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(8) :: d_type
type(overlapSpec), pointer :: boundx=>NULL()
type(overlapSpec), pointer :: boundy=>NULL()
integer :: position_x, position_y, ishift, jshift
ntile = size(domain%x(:))
update_flags = 0
if( PRESENT(flags) ) then
update_flags = flags
end if
!--- check if the suitable buffer are present
need_ebufferx=.FALSE.; need_sbufferx=.FALSE.
need_wbufferx=.FALSE.; need_nbufferx=.FALSE.
need_ebuffery=.FALSE.; need_sbuffery=.FALSE.
need_wbuffery=.FALSE.; need_nbuffery=.FALSE.
if( domain%symmetry .AND. PRESENT(gridtype) ) then
select case(gridtype)
case(BGRID_NE, BGRID_SW)
need_ebufferx=.true.; need_sbufferx=.true.; need_wbufferx=.true.; need_nbufferx=.true.
need_ebuffery=.true.; need_sbuffery=.true.; need_wbuffery=.true.; need_nbuffery=.true.
case(CGRID_NE, CGRID_SW)
need_ebufferx=.true.; need_wbufferx=.true.; need_sbuffery=.true.; need_nbuffery=.true.
case(DGRID_NE, DGRID_SW)
need_ebuffery=.true.; need_wbuffery=.true.; need_sbufferx=.true.; need_nbufferx=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_V: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
if(present(ebufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: ebufferx should not be present when north is folded')
if(.not. need_ebufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: ebufferx should not be present')
b_addrsx(1, list, tile) = LOC(ebufferx)
bufferx_size(1) = size(ebufferx,1)
else
b_addrsx(1, list, tile) = 0
bufferx_size(1) = 1
end if
if(present(sbufferx)) then
if(.not. need_sbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: sbufferx should not be present')
b_addrsx(2, list, tile) = LOC(sbufferx)
bufferx_size(2) = size(sbufferx,1)
else
b_addrsx(2, list, tile) = 0
bufferx_size(2) = 1
end if
if(present(wbufferx)) then
if(.not. need_wbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: wbufferx should not be present')
b_addrsx(3, list, tile) = LOC(wbufferx)
bufferx_size(3) = size(wbufferx,1)
else
b_addrsx(3, list, tile) = 0
bufferx_size(3) = 1
end if
if(present(nbufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: nbufferx should not be present when north is folded')
if(.not. need_nbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: nbufferx should not be present')
b_addrsx(4, list, tile) = LOC(nbufferx)
bufferx_size(4) = size(nbufferx,1)
else
b_addrsx(4, list, tile) = 0
bufferx_size(4) = 1
end if
if(present(ebuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: ebuffery should not be present when north is folded')
if(.not. need_ebuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: ebuffery should not be present')
b_addrsy(1, list, tile) = LOC(ebuffery)
buffery_size(1) = size(ebuffery,1)
else
b_addrsy(1, list, tile) = 0
buffery_size(1) = 1
end if
if(present(sbuffery)) then
if(.not. need_sbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: sbuffery should not be present')
b_addrsy(2, list, tile) = LOC(sbuffery)
buffery_size(2) = size(sbuffery,1)
else
b_addrsy(2, list, tile) = 0
buffery_size(2) = 1
end if
if(present(wbuffery)) then
if(.not. need_wbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: wbuffery should not be present')
b_addrsy(3, list, tile) = LOC(wbuffery)
buffery_size(3) = size(wbuffery,1)
else
b_addrsy(3, list, tile) = 0
buffery_size(3) = 1
end if
if(present(nbuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: nbuffery should not be present when north is folded')
if(.not. need_nbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: nbuffery should not be present')
b_addrsy(4, list, tile) = LOC(nbuffery)
buffery_size(4) = size(nbuffery,1)
else
b_addrsy(4, list, tile) = 0
buffery_size(4) = 1
end if
grid_offset_type = AGRID
if(present(gridtype)) grid_offset_type = gridtype
if(list == 1 .AND. tile == 1 )then
isize(1)=size(fieldx,1); jsize(1)=size(fieldx,2); isize(2)=size(fieldy,1); jsize(2)=size(fieldy,2)
ksize = 1; offset_type = grid_offset_type
bsizex = bufferx_size; bsizey = buffery_size; upflags = update_flags
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize(1) .NE. size(fieldx,1))
set_mismatch = set_mismatch .OR. (jsize(1) .NE. size(fieldx,2))
set_mismatch = set_mismatch .OR. (isize(2) .NE. size(fieldy,1))
set_mismatch = set_mismatch .OR. (jsize(2) .NE. size(fieldy,2))
set_mismatch = set_mismatch .OR. ANY( bsizex .NE. bufferx_size )
set_mismatch = set_mismatch .OR. ANY( bsizey .NE. buffery_size )
set_mismatch = set_mismatch .OR. (offset_type .NE. grid_offset_type)
set_mismatch = set_mismatch .OR. (upflags .NE. update_flags)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case (DGRID_NE, DGRID_SW)
position_x = NORTH
position_y = EAST
case default
call mpp_error(FATAL, "mpp_get_boundary.h: invalid value of grid_offset_type")
end select
boundx => search_bound_overlap(domain, position_x)
boundy => search_bound_overlap(domain, position_y)
call mpp_get_domain_shift(domain, ishift, jshift, position_x)
if(size(fieldx,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldx,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_V: fieldx is not on memory domain")
call mpp_get_domain_shift(domain, ishift, jshift, position_y)
if(size(fieldy,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldy,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_V: fieldy is not on memory domain")
if(ASSOCIATED(boundx) ) then
call mpp_do_get_boundary(f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, boundx, boundy, &
b_addrsx(:,1:l_size,1:ntile), b_addrsy(:,1:l_size,1:ntile), bsizex, &
bsizey, ksize, d_type, update_flags, grid_offset_type)
endif
l_size=0; f_addrsx=-9999; f_addrsy=-9999; bsizex=0; bsizey=0;
b_addrsx=-9999; b_addrsy=-9999; isize=0; jsize=0; ksize=0
end if
return
end subroutine mpp_get_boundary_r8_2dv
!###############################################################################################
subroutine mpp_get_boundary_r8_3dv(fieldx, fieldy, domain, ebufferx, sbufferx, wbufferx, nbufferx, &
ebuffery, sbuffery, wbuffery, nbuffery, flags, gridtype, &
complete, tile_count)
type(domain2D), intent(in) :: domain
real(8), intent(in) :: fieldx(domain%x(1)%memory%begin:,domain%y(1)%memory%begin:,:)
real(8), intent(in) :: fieldy(domain%x(1)%memory%begin:,domain%y(1)%memory%begin:,:)
real(8), intent(inout), optional :: ebufferx(:,:), sbufferx(:,:), wbufferx(:,:), nbufferx(:,:)
real(8), intent(inout), optional :: ebuffery(:,:), sbuffery(:,:), wbuffery(:,:), nbuffery(:,:)
integer, intent(in), optional :: flags, gridtype, tile_count
logical, intent(in), optional :: complete
integer :: ntile, update_flags
logical :: need_ebufferx, need_sbufferx, need_wbufferx, need_nbufferx
logical :: need_ebuffery, need_sbuffery, need_wbuffery, need_nbuffery
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsx=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsy=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsx=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsy=-9999
integer, save :: bsizex(4)=0, bsizey(4)=0, isize(2)=0, jsize(2)=0, ksize=0, l_size=0, list=0
integer, save :: offset_type, upflags
integer :: bufferx_size(4), buffery_size(4)
integer :: max_ntile, tile, grid_offset_type
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(8) :: d_type
type(overlapSpec), pointer :: boundx=>NULL()
type(overlapSpec), pointer :: boundy=>NULL()
integer :: position_x, position_y, ishift, jshift
ntile = size(domain%x(:))
update_flags = 0
if( PRESENT(flags) ) then
update_flags = flags
end if
!--- check if the suitable buffer are present
need_ebufferx=.FALSE.; need_sbufferx=.FALSE.
need_wbufferx=.FALSE.; need_nbufferx=.FALSE.
need_ebuffery=.FALSE.; need_sbuffery=.FALSE.
need_wbuffery=.FALSE.; need_nbuffery=.FALSE.
if( domain%symmetry .AND. PRESENT(gridtype) ) then
select case(gridtype)
case(BGRID_NE, BGRID_SW)
need_ebufferx=.true.; need_sbufferx=.true.; need_wbufferx=.true.; need_nbufferx=.true.
need_ebuffery=.true.; need_sbuffery=.true.; need_wbuffery=.true.; need_nbuffery=.true.
case(CGRID_NE, CGRID_SW)
need_ebufferx=.true.; need_wbufferx=.true.; need_sbuffery=.true.; need_nbuffery=.true.
case(DGRID_NE, DGRID_SW)
need_ebuffery=.true.; need_wbuffery=.true.; need_sbufferx=.true.; need_nbufferx=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_V: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
if(present(ebufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: ebufferx should not be present when north is folded')
if(.not. need_ebufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: ebufferx should not be present')
b_addrsx(1, list, tile) = LOC(ebufferx)
bufferx_size(1) = size(ebufferx,1)
else
b_addrsx(1, list, tile) = 0
bufferx_size(1) = 1
end if
if(present(sbufferx)) then
if(.not. need_sbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: sbufferx should not be present')
b_addrsx(2, list, tile) = LOC(sbufferx)
bufferx_size(2) = size(sbufferx,1)
else
b_addrsx(2, list, tile) = 0
bufferx_size(2) = 1
end if
if(present(wbufferx)) then
if(.not. need_wbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: wbufferx should not be present')
b_addrsx(3, list, tile) = LOC(wbufferx)
bufferx_size(3) = size(wbufferx,1)
else
b_addrsx(3, list, tile) = 0
bufferx_size(3) = 1
end if
if(present(nbufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: nbufferx should not be present when north is folded')
if(.not. need_nbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: nbufferx should not be present')
b_addrsx(4, list, tile) = LOC(nbufferx)
bufferx_size(4) = size(nbufferx,1)
else
b_addrsx(4, list, tile) = 0
bufferx_size(4) = 1
end if
if(present(ebuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: ebuffery should not be present when north is folded')
if(.not. need_ebuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: ebuffery should not be present')
b_addrsy(1, list, tile) = LOC(ebuffery)
buffery_size(1) = size(ebuffery,1)
else
b_addrsy(1, list, tile) = 0
buffery_size(1) = 1
end if
if(present(sbuffery)) then
if(.not. need_sbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: sbuffery should not be present')
b_addrsy(2, list, tile) = LOC(sbuffery)
buffery_size(2) = size(sbuffery,1)
else
b_addrsy(2, list, tile) = 0
buffery_size(2) = 1
end if
if(present(wbuffery)) then
if(.not. need_wbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: wbuffery should not be present')
b_addrsy(3, list, tile) = LOC(wbuffery)
buffery_size(3) = size(wbuffery,1)
else
b_addrsy(3, list, tile) = 0
buffery_size(3) = 1
end if
if(present(nbuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: nbuffery should not be present when north is folded')
if(.not. need_nbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: nbuffery should not be present')
b_addrsy(4, list, tile) = LOC(nbuffery)
buffery_size(4) = size(nbuffery,1)
else
b_addrsy(4, list, tile) = 0
buffery_size(4) = 1
end if
grid_offset_type = AGRID
if(present(gridtype)) grid_offset_type = gridtype
if(list == 1 .AND. tile == 1 )then
isize(1)=size(fieldx,1); jsize(1)=size(fieldx,2); isize(2)=size(fieldy,1); jsize(2)=size(fieldy,2)
ksize = size(fieldx,3); offset_type = grid_offset_type
bsizex = bufferx_size; bsizey = buffery_size; upflags = update_flags
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize(1) .NE. size(fieldx,1))
set_mismatch = set_mismatch .OR. (jsize(1) .NE. size(fieldx,2))
set_mismatch = set_mismatch .OR. (ksize .NE. size(fieldx,3))
set_mismatch = set_mismatch .OR. (isize(2) .NE. size(fieldy,1))
set_mismatch = set_mismatch .OR. (jsize(2) .NE. size(fieldy,2))
set_mismatch = set_mismatch .OR. (ksize .NE. size(fieldy,3))
set_mismatch = set_mismatch .OR. ANY( bsizex .NE. bufferx_size )
set_mismatch = set_mismatch .OR. ANY( bsizey .NE. buffery_size )
set_mismatch = set_mismatch .OR. (offset_type .NE. grid_offset_type)
set_mismatch = set_mismatch .OR. (upflags .NE. update_flags)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case (DGRID_NE, DGRID_SW)
position_x = NORTH
position_y = EAST
case default
call mpp_error(FATAL, "mpp_get_boundary.h: invalid value of grid_offset_type")
end select
boundx => search_bound_overlap(domain, position_x)
boundy => search_bound_overlap(domain, position_y)
call mpp_get_domain_shift(domain, ishift, jshift, position_x)
if(size(fieldx,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldx,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_V: fieldx is not on memory domain")
call mpp_get_domain_shift(domain, ishift, jshift, position_y)
if(size(fieldy,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldy,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_V: fieldy is not on memory domain")
if(ASSOCIATED(boundx) ) then
call mpp_do_get_boundary(f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, boundx, boundy, &
b_addrsx(:,1:l_size,1:ntile), b_addrsy(:,1:l_size,1:ntile), bsizex, &
bsizey, ksize, d_type, update_flags, grid_offset_type)
endif
l_size=0; f_addrsx=-9999; f_addrsy=-9999; bsizex=0; bsizey=0;
b_addrsx=-9999; b_addrsy=-9999; isize=0; jsize=0; ksize=0
end if
end subroutine mpp_get_boundary_r8_3dv
# 1731 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_get_boundary_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
! this routine is used to retrieve scalar boundary data for symmetric domain.
subroutine mpp_get_boundary_ad_r8_2d(field, domain, ebuffer, sbuffer, wbuffer, nbuffer, flags, &
position, complete, tile_count)
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(:,:)
real(8), intent(inout), optional :: ebuffer(:), sbuffer(:), wbuffer(:), nbuffer(:)
integer, intent(in), optional :: flags, position, tile_count
logical, intent(in), optional :: complete
real(8) :: field3D(size(field,1),size(field,2),1)
real(8), allocatable, dimension(:,:) :: ebuffer2D, sbuffer2D, wbuffer2D, nbuffer2D
integer :: xcount, ycount
integer :: ntile
logical :: need_ebuffer, need_sbuffer, need_wbuffer, need_nbuffer
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrs=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrs=-9999
integer, save :: bsize(4)=0, isize=0, jsize=0, ksize=0, pos, list=0, l_size=0, upflags
integer :: buffer_size(4)
integer :: max_ntile, tile, update_position, ishift, jshift
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(8) :: d_type
type(overlapSpec), pointer :: bound => NULL()
ntile = size(domain%x(:))
if(present(flags)) then
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_: flags is a dummy optional argument")
endif
update_position = CENTER
if(present(position)) update_position = position
!--- check if the buffer are needed
need_ebuffer=.false.; need_sbuffer=.false.; need_wbuffer=.false.; need_nbuffer=.false.
if( domain%symmetry .AND. PRESENT(position) ) then
select case(position)
case(CORNER)
need_ebuffer=.true.; need_sbuffer=.true.; need_wbuffer=.true.; need_nbuffer=.true.
case(NORTH)
need_sbuffer=.true.; need_nbuffer=.true.
case(EAST)
need_ebuffer=.true.; need_wbuffer=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
if(present(ebuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D: ebuffer should not be present when north is folded')
if(.not. need_ebuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: ebuffer should not be present')
b_addrs(1, list, tile) = LOC(ebuffer)
buffer_size(1) = size(ebuffer(:))
else
b_addrs(1, list, tile) = 0
buffer_size(1) = 1
end if
if(present(sbuffer)) then
if(.not. need_sbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: sbuffer should not be present')
b_addrs(2, list, tile) = LOC(sbuffer)
buffer_size(2) = size(sbuffer(:))
else
b_addrs(2, list, tile) = 0
buffer_size(2) = 1
end if
if(present(wbuffer)) then
if(.not. need_wbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: wbuffer should not be present')
b_addrs(3, list, tile) = LOC(wbuffer)
buffer_size(3) = size(wbuffer(:))
else
b_addrs(3, list, tile) = 0
buffer_size(3) = 1
end if
if(present(nbuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D: nbuffer should not be present when north is folded')
if(.not. need_nbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: nbuffer should be be present')
b_addrs(4, list, tile) = LOC(nbuffer)
buffer_size(4) = size(nbuffer(:))
else
b_addrs(4, list, tile) = 0
buffer_size(4) = 1
end if
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); ksize = 1; pos = update_position
bsize = buffer_size
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize .NE. size(field,1))
set_mismatch = set_mismatch .OR. (jsize .NE. size(field,2))
set_mismatch = set_mismatch .OR. ANY( bsize .NE. buffer_size )
set_mismatch = set_mismatch .OR. (update_position .NE. pos)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
!--- only non-center data in symmetry domain will be retrieved.
if(position == CENTER .OR. (.NOT. domain%symmetry) ) return
bound => search_bound_overlap(domain, update_position)
call mpp_get_domain_shift(domain, ishift, jshift, update_position)
if(size(field,1) .NE. domain%x(1)%memory%size+ishift .OR. size(field,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D: field is not on memory domain")
if(ASSOCIATED(bound)) then
call mpp_do_get_boundary(f_addrs(1:l_size,1:ntile), domain, bound, b_addrs(:,1:l_size,1:ntile), &
bsize, ksize, d_type)
endif
l_size=0; f_addrs=-9999; bsize=0; b_addrs=-9999; isize=0; jsize=0; ksize=0
end if
return
end subroutine mpp_get_boundary_ad_r8_2d
!###############################################################################################
subroutine mpp_get_boundary_ad_r8_3d(field, domain, ebuffer, sbuffer, wbuffer, nbuffer, flags, &
position, complete, tile_count)
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(:,:,:)
real(8), intent(inout), optional :: ebuffer(:,:), sbuffer(:,:), wbuffer(:,:), nbuffer(:,:)
integer, intent(in), optional :: flags, position, tile_count
logical, intent(in), optional :: complete
integer :: ntile
logical :: need_ebuffer, need_sbuffer, need_wbuffer, need_nbuffer
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrs=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrs=-9999
integer, save :: bsize(4)=0, isize=0, jsize=0, ksize=0, pos, list=0, l_size=0, upflags
integer :: buffer_size(4)
integer :: max_ntile, tile, update_position, ishift, jshift
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(8) :: d_type
type(overlapSpec), pointer :: bound => NULL()
ntile = size(domain%x(:))
if(present(flags)) then
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_: flags is a dummy optional argument")
endif
update_position = CENTER
if(present(position)) update_position = position
!--- check if the suitable buffer are present
need_ebuffer=.false.; need_sbuffer=.false.; need_wbuffer=.false.; need_nbuffer=.false.
if( domain%symmetry .AND. PRESENT(position) ) then
select case(position)
case(CORNER)
need_ebuffer=.true.; need_sbuffer=.true.; need_wbuffer=.true.; need_nbuffer=.true.
case(NORTH)
need_sbuffer=.true.; need_nbuffer=.true.
case(EAST)
need_ebuffer=.true.; need_wbuffer=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
if(present(ebuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D: ebuffer should not be present when north is folded')
if(.not. need_ebuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: ebuffer should not be present')
b_addrs(1, list, tile) = LOC(ebuffer)
buffer_size(1) = size(ebuffer,1)
else
b_addrs(1, list, tile) = 0
buffer_size(1) = 1
end if
if(present(sbuffer)) then
if(.not. need_sbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: sbuffer should not be present')
b_addrs(2, list, tile) = LOC(sbuffer)
buffer_size(2) = size(sbuffer,1)
else
b_addrs(2, list, tile) = 0
buffer_size(2) = 1
end if
if(present(wbuffer)) then
if(.not. need_wbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: wbuffer should not be present')
b_addrs(3, list, tile) = LOC(wbuffer)
buffer_size(3) = size(wbuffer,1)
else
b_addrs(3, list, tile) = 0
buffer_size(3) = 1
end if
if(present(nbuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D: nbuffer should not be present when north is folded')
if(.not. need_nbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: nbuffer should not be present')
b_addrs(4, list, tile) = LOC(nbuffer)
buffer_size(4) = size(nbuffer,1)
else
b_addrs(4, list, tile) = 0
buffer_size(4) = 1
end if
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); ksize = size(field,3); pos = update_position
bsize = buffer_size
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize .NE. size(field,1))
set_mismatch = set_mismatch .OR. (jsize .NE. size(field,2))
set_mismatch = set_mismatch .OR. (ksize .NE. size(field,3))
set_mismatch = set_mismatch .OR. ANY( bsize .NE. buffer_size )
set_mismatch = set_mismatch .OR. (update_position .NE. pos)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
!--- only non-center data in symmetry domain will be retrieved.
if(position == CENTER .OR. (.NOT. domain%symmetry) ) return
bound => search_bound_overlap(domain, update_position)
call mpp_get_domain_shift(domain, ishift, jshift, update_position)
if(size(field,1) .NE. domain%x(1)%memory%size+ishift .OR. size(field,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D: field is not on memory domain")
if(ASSOCIATED(bound)) then
call mpp_do_get_boundary(f_addrs(1:l_size,1:ntile), domain, bound, b_addrs(:,1:l_size,1:ntile), &
bsize, ksize, d_type)
endif
l_size=0; f_addrs=-9999; bsize=0; b_addrs=-9999; isize=0; jsize=0; ksize=0
end if
end subroutine mpp_get_boundary_ad_r8_3d
!####################################################################
! vector update
subroutine mpp_get_boundary_ad_r8_2dv(fieldx, fieldy, domain, ebufferx, sbufferx, wbufferx, nbufferx, &
ebuffery, sbuffery, wbuffery, nbuffery, flags, gridtype, &
complete, tile_count)
type(domain2D), intent(in) :: domain
real(8), intent(in) :: fieldx(:,:), fieldy(:,:)
real(8), intent(inout), optional :: ebufferx(:), sbufferx(:), wbufferx(:), nbufferx(:)
real(8), intent(inout), optional :: ebuffery(:), sbuffery(:), wbuffery(:), nbuffery(:)
integer, intent(in), optional :: flags, gridtype, tile_count
logical, intent(in), optional :: complete
integer :: ntile, update_flags
logical :: need_ebufferx, need_sbufferx, need_wbufferx, need_nbufferx
logical :: need_ebuffery, need_sbuffery, need_wbuffery, need_nbuffery
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsx=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsy=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsx=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsy=-9999
integer, save :: bsizex(4)=0, bsizey(4)=0, isize(2)=0, jsize(2)=0, ksize=0, l_size=0, list=0
integer, save :: offset_type, upflags
integer :: bufferx_size(4), buffery_size(4)
integer :: max_ntile, tile, grid_offset_type
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(8) :: d_type
type(overlapSpec), pointer :: boundx=>NULL()
type(overlapSpec), pointer :: boundy=>NULL()
integer :: position_x, position_y, ishift, jshift
ntile = size(domain%x(:))
update_flags = 0
if( PRESENT(flags) ) then
update_flags = flags
end if
!--- check if the suitable buffer are present
need_ebufferx=.FALSE.; need_sbufferx=.FALSE.
need_wbufferx=.FALSE.; need_nbufferx=.FALSE.
need_ebuffery=.FALSE.; need_sbuffery=.FALSE.
need_wbuffery=.FALSE.; need_nbuffery=.FALSE.
if( domain%symmetry .AND. PRESENT(gridtype) ) then
select case(gridtype)
case(BGRID_NE, BGRID_SW)
need_ebufferx=.true.; need_sbufferx=.true.; need_wbufferx=.true.; need_nbufferx=.true.
need_ebuffery=.true.; need_sbuffery=.true.; need_wbuffery=.true.; need_nbuffery=.true.
case(CGRID_NE, CGRID_SW)
need_ebufferx=.true.; need_wbufferx=.true.; need_sbuffery=.true.; need_nbuffery=.true.
case(DGRID_NE, DGRID_SW)
need_ebuffery=.true.; need_wbuffery=.true.; need_sbufferx=.true.; need_nbufferx=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_V: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
if(present(ebufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: ebufferx should not be present when north is folded')
if(.not. need_ebufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: ebufferx should not be present')
b_addrsx(1, list, tile) = LOC(ebufferx)
bufferx_size(1) = size(ebufferx,1)
else
b_addrsx(1, list, tile) = 0
bufferx_size(1) = 1
end if
if(present(sbufferx)) then
if(.not. need_sbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: sbufferx should not be present')
b_addrsx(2, list, tile) = LOC(sbufferx)
bufferx_size(2) = size(sbufferx,1)
else
b_addrsx(2, list, tile) = 0
bufferx_size(2) = 1
end if
if(present(wbufferx)) then
if(.not. need_wbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: wbufferx should not be present')
b_addrsx(3, list, tile) = LOC(wbufferx)
bufferx_size(3) = size(wbufferx,1)
else
b_addrsx(3, list, tile) = 0
bufferx_size(3) = 1
end if
if(present(nbufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: nbufferx should not be present when north is folded')
if(.not. need_nbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: nbufferx should not be present')
b_addrsx(4, list, tile) = LOC(nbufferx)
bufferx_size(4) = size(nbufferx,1)
else
b_addrsx(4, list, tile) = 0
bufferx_size(4) = 1
end if
if(present(ebuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: ebuffery should not be present when north is folded')
if(.not. need_ebuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: ebuffery should not be present')
b_addrsy(1, list, tile) = LOC(ebuffery)
buffery_size(1) = size(ebuffery,1)
else
b_addrsy(1, list, tile) = 0
buffery_size(1) = 1
end if
if(present(sbuffery)) then
if(.not. need_sbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: sbuffery should not be present')
b_addrsy(2, list, tile) = LOC(sbuffery)
buffery_size(2) = size(sbuffery,1)
else
b_addrsy(2, list, tile) = 0
buffery_size(2) = 1
end if
if(present(wbuffery)) then
if(.not. need_wbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: wbuffery should not be present')
b_addrsy(3, list, tile) = LOC(wbuffery)
buffery_size(3) = size(wbuffery,1)
else
b_addrsy(3, list, tile) = 0
buffery_size(3) = 1
end if
if(present(nbuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: nbuffery should not be present when north is folded')
if(.not. need_nbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: nbuffery should not be present')
b_addrsy(4, list, tile) = LOC(nbuffery)
buffery_size(4) = size(nbuffery,1)
else
b_addrsy(4, list, tile) = 0
buffery_size(4) = 1
end if
grid_offset_type = AGRID
if(present(gridtype)) grid_offset_type = gridtype
if(list == 1 .AND. tile == 1 )then
isize(1)=size(fieldx,1); jsize(1)=size(fieldx,2); isize(2)=size(fieldy,1); jsize(2)=size(fieldy,2)
ksize = 1; offset_type = grid_offset_type
bsizex = bufferx_size; bsizey = buffery_size; upflags = update_flags
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize(1) .NE. size(fieldx,1))
set_mismatch = set_mismatch .OR. (jsize(1) .NE. size(fieldx,2))
set_mismatch = set_mismatch .OR. (isize(2) .NE. size(fieldy,1))
set_mismatch = set_mismatch .OR. (jsize(2) .NE. size(fieldy,2))
set_mismatch = set_mismatch .OR. ANY( bsizex .NE. bufferx_size )
set_mismatch = set_mismatch .OR. ANY( bsizey .NE. buffery_size )
set_mismatch = set_mismatch .OR. (offset_type .NE. grid_offset_type)
set_mismatch = set_mismatch .OR. (upflags .NE. update_flags)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case (DGRID_NE, DGRID_SW)
position_x = NORTH
position_y = EAST
case default
call mpp_error(FATAL, "mpp_get_boundary.h: invalid value of grid_offset_type")
end select
boundx => search_bound_overlap(domain, position_x)
boundy => search_bound_overlap(domain, position_y)
call mpp_get_domain_shift(domain, ishift, jshift, position_x)
if(size(fieldx,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldx,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_V: fieldx is not on memory domain")
call mpp_get_domain_shift(domain, ishift, jshift, position_y)
if(size(fieldy,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldy,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_V: fieldy is not on memory domain")
if(ASSOCIATED(boundx) ) then
call mpp_do_get_boundary(f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, boundx, boundy, &
b_addrsx(:,1:l_size,1:ntile), b_addrsy(:,1:l_size,1:ntile), bsizex, &
bsizey, ksize, d_type, update_flags, grid_offset_type)
endif
l_size=0; f_addrsx=-9999; f_addrsy=-9999; bsizex=0; bsizey=0;
b_addrsx=-9999; b_addrsy=-9999; isize=0; jsize=0; ksize=0
end if
return
end subroutine mpp_get_boundary_ad_r8_2dv
!###############################################################################################
subroutine mpp_get_boundary_ad_r8_3dv(fieldx, fieldy, domain, ebufferx, sbufferx, wbufferx, nbufferx, &
ebuffery, sbuffery, wbuffery, nbuffery, flags, gridtype, &
complete, tile_count)
type(domain2D), intent(in) :: domain
real(8), intent(in) :: fieldx(domain%x(1)%memory%begin:,domain%y(1)%memory%begin:,:)
real(8), intent(in) :: fieldy(domain%x(1)%memory%begin:,domain%y(1)%memory%begin:,:)
real(8), intent(inout), optional :: ebufferx(:,:), sbufferx(:,:), wbufferx(:,:), nbufferx(:,:)
real(8), intent(inout), optional :: ebuffery(:,:), sbuffery(:,:), wbuffery(:,:), nbuffery(:,:)
integer, intent(in), optional :: flags, gridtype, tile_count
logical, intent(in), optional :: complete
integer :: ntile, update_flags
logical :: need_ebufferx, need_sbufferx, need_wbufferx, need_nbufferx
logical :: need_ebuffery, need_sbuffery, need_wbuffery, need_nbuffery
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsx=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsy=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsx=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsy=-9999
integer, save :: bsizex(4)=0, bsizey(4)=0, isize(2)=0, jsize(2)=0, ksize=0, l_size=0, list=0
integer, save :: offset_type, upflags
integer :: bufferx_size(4), buffery_size(4)
integer :: max_ntile, tile, grid_offset_type
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(8) :: d_type
type(overlapSpec), pointer :: boundx=>NULL()
type(overlapSpec), pointer :: boundy=>NULL()
integer :: position_x, position_y, ishift, jshift
ntile = size(domain%x(:))
update_flags = 0
if( PRESENT(flags) ) then
update_flags = flags
end if
!--- check if the suitable buffer are present
need_ebufferx=.FALSE.; need_sbufferx=.FALSE.
need_wbufferx=.FALSE.; need_nbufferx=.FALSE.
need_ebuffery=.FALSE.; need_sbuffery=.FALSE.
need_wbuffery=.FALSE.; need_nbuffery=.FALSE.
if( domain%symmetry .AND. PRESENT(gridtype) ) then
select case(gridtype)
case(BGRID_NE, BGRID_SW)
need_ebufferx=.true.; need_sbufferx=.true.; need_wbufferx=.true.; need_nbufferx=.true.
need_ebuffery=.true.; need_sbuffery=.true.; need_wbuffery=.true.; need_nbuffery=.true.
case(CGRID_NE, CGRID_SW)
need_ebufferx=.true.; need_wbufferx=.true.; need_sbuffery=.true.; need_nbuffery=.true.
case(DGRID_NE, DGRID_SW)
need_ebuffery=.true.; need_wbuffery=.true.; need_sbufferx=.true.; need_nbufferx=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_V: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
if(present(ebufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: ebufferx should not be present when north is folded')
if(.not. need_ebufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: ebufferx should not be present')
b_addrsx(1, list, tile) = LOC(ebufferx)
bufferx_size(1) = size(ebufferx,1)
else
b_addrsx(1, list, tile) = 0
bufferx_size(1) = 1
end if
if(present(sbufferx)) then
if(.not. need_sbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: sbufferx should not be present')
b_addrsx(2, list, tile) = LOC(sbufferx)
bufferx_size(2) = size(sbufferx,1)
else
b_addrsx(2, list, tile) = 0
bufferx_size(2) = 1
end if
if(present(wbufferx)) then
if(.not. need_wbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: wbufferx should not be present')
b_addrsx(3, list, tile) = LOC(wbufferx)
bufferx_size(3) = size(wbufferx,1)
else
b_addrsx(3, list, tile) = 0
bufferx_size(3) = 1
end if
if(present(nbufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: nbufferx should not be present when north is folded')
if(.not. need_nbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: nbufferx should not be present')
b_addrsx(4, list, tile) = LOC(nbufferx)
bufferx_size(4) = size(nbufferx,1)
else
b_addrsx(4, list, tile) = 0
bufferx_size(4) = 1
end if
if(present(ebuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: ebuffery should not be present when north is folded')
if(.not. need_ebuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: ebuffery should not be present')
b_addrsy(1, list, tile) = LOC(ebuffery)
buffery_size(1) = size(ebuffery,1)
else
b_addrsy(1, list, tile) = 0
buffery_size(1) = 1
end if
if(present(sbuffery)) then
if(.not. need_sbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: sbuffery should not be present')
b_addrsy(2, list, tile) = LOC(sbuffery)
buffery_size(2) = size(sbuffery,1)
else
b_addrsy(2, list, tile) = 0
buffery_size(2) = 1
end if
if(present(wbuffery)) then
if(.not. need_wbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: wbuffery should not be present')
b_addrsy(3, list, tile) = LOC(wbuffery)
buffery_size(3) = size(wbuffery,1)
else
b_addrsy(3, list, tile) = 0
buffery_size(3) = 1
end if
if(present(nbuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: nbuffery should not be present when north is folded')
if(.not. need_nbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: nbuffery should not be present')
b_addrsy(4, list, tile) = LOC(nbuffery)
buffery_size(4) = size(nbuffery,1)
else
b_addrsy(4, list, tile) = 0
buffery_size(4) = 1
end if
grid_offset_type = AGRID
if(present(gridtype)) grid_offset_type = gridtype
if(list == 1 .AND. tile == 1 )then
isize(1)=size(fieldx,1); jsize(1)=size(fieldx,2); isize(2)=size(fieldy,1); jsize(2)=size(fieldy,2)
ksize = size(fieldx,3); offset_type = grid_offset_type
bsizex = bufferx_size; bsizey = buffery_size; upflags = update_flags
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize(1) .NE. size(fieldx,1))
set_mismatch = set_mismatch .OR. (jsize(1) .NE. size(fieldx,2))
set_mismatch = set_mismatch .OR. (ksize .NE. size(fieldx,3))
set_mismatch = set_mismatch .OR. (isize(2) .NE. size(fieldy,1))
set_mismatch = set_mismatch .OR. (jsize(2) .NE. size(fieldy,2))
set_mismatch = set_mismatch .OR. (ksize .NE. size(fieldy,3))
set_mismatch = set_mismatch .OR. ANY( bsizex .NE. bufferx_size )
set_mismatch = set_mismatch .OR. ANY( bsizey .NE. buffery_size )
set_mismatch = set_mismatch .OR. (offset_type .NE. grid_offset_type)
set_mismatch = set_mismatch .OR. (upflags .NE. update_flags)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case (DGRID_NE, DGRID_SW)
position_x = NORTH
position_y = EAST
case default
call mpp_error(FATAL, "mpp_get_boundary.h: invalid value of grid_offset_type")
end select
boundx => search_bound_overlap(domain, position_x)
boundy => search_bound_overlap(domain, position_y)
call mpp_get_domain_shift(domain, ishift, jshift, position_x)
if(size(fieldx,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldx,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_V: fieldx is not on memory domain")
call mpp_get_domain_shift(domain, ishift, jshift, position_y)
if(size(fieldy,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldy,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_V: fieldy is not on memory domain")
if(ASSOCIATED(boundx) ) then
call mpp_do_get_boundary_ad(f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, boundx, boundy, &
b_addrsx(:,1:l_size,1:ntile), b_addrsy(:,1:l_size,1:ntile), bsizex, &
bsizey, ksize, d_type, update_flags, grid_offset_type)
endif
l_size=0; f_addrsx=-9999; f_addrsy=-9999; bsizex=0; bsizey=0;
b_addrsx=-9999; b_addrsy=-9999; isize=0; jsize=0; ksize=0
end if
end subroutine mpp_get_boundary_ad_r8_3dv
# 1743 "../mpp/include/mpp_domains_misc.inc" 2
!#undef MPP_GET_BOUNDARY_4D_
!#define MPP_GET_BOUNDARY_4D_ mpp_get_boundary_r4_4d
!#undef MPP_GET_BOUNDARY_5D_
!#define MPP_GET_BOUNDARY_5D_ mpp_get_boundary_r4_5d
!#undef MPP_GET_BOUNDARY_4D_V_
!#define MPP_GET_BOUNDARY_4D_V_ mpp_get_boundary_r4_4dv
!#undef MPP_GET_BOUNDARY_5D_V_
!#define MPP_GET_BOUNDARY_5D_V_ mpp_get_boundary_r4_5dv
# 1 "../mpp/include/mpp_get_boundary.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
! this routine is used to retrieve scalar boundary data for symmetric domain.
subroutine mpp_get_boundary_r4_2d(field, domain, ebuffer, sbuffer, wbuffer, nbuffer, flags, &
position, complete, tile_count)
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(:,:)
real(4), intent(inout), optional :: ebuffer(:), sbuffer(:), wbuffer(:), nbuffer(:)
integer, intent(in), optional :: flags, position, tile_count
logical, intent(in), optional :: complete
real(4) :: field3D(size(field,1),size(field,2),1)
real(4), allocatable, dimension(:,:) :: ebuffer2D, sbuffer2D, wbuffer2D, nbuffer2D
integer :: xcount, ycount
integer :: ntile
logical :: need_ebuffer, need_sbuffer, need_wbuffer, need_nbuffer
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrs=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrs=-9999
integer, save :: bsize(4)=0, isize=0, jsize=0, ksize=0, pos, list=0, l_size=0, upflags
integer :: buffer_size(4)
integer :: max_ntile, tile, update_position, ishift, jshift
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(4) :: d_type
type(overlapSpec), pointer :: bound => NULL()
ntile = size(domain%x(:))
if(present(flags)) then
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_: flags is a dummy optional argument")
endif
update_position = CENTER
if(present(position)) update_position = position
!--- check if the buffer are needed
need_ebuffer=.false.; need_sbuffer=.false.; need_wbuffer=.false.; need_nbuffer=.false.
if( domain%symmetry .AND. PRESENT(position) ) then
select case(position)
case(CORNER)
need_ebuffer=.true.; need_sbuffer=.true.; need_wbuffer=.true.; need_nbuffer=.true.
case(NORTH)
need_sbuffer=.true.; need_nbuffer=.true.
case(EAST)
need_ebuffer=.true.; need_wbuffer=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
if(present(ebuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D: ebuffer should not be present when north is folded')
if(.not. need_ebuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: ebuffer should not be present')
b_addrs(1, list, tile) = LOC(ebuffer)
buffer_size(1) = size(ebuffer(:))
else
b_addrs(1, list, tile) = 0
buffer_size(1) = 1
end if
if(present(sbuffer)) then
if(.not. need_sbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: sbuffer should not be present')
b_addrs(2, list, tile) = LOC(sbuffer)
buffer_size(2) = size(sbuffer(:))
else
b_addrs(2, list, tile) = 0
buffer_size(2) = 1
end if
if(present(wbuffer)) then
if(.not. need_wbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: wbuffer should not be present')
b_addrs(3, list, tile) = LOC(wbuffer)
buffer_size(3) = size(wbuffer(:))
else
b_addrs(3, list, tile) = 0
buffer_size(3) = 1
end if
if(present(nbuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D: nbuffer should not be present when north is folded')
if(.not. need_nbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: nbuffer should be be present')
b_addrs(4, list, tile) = LOC(nbuffer)
buffer_size(4) = size(nbuffer(:))
else
b_addrs(4, list, tile) = 0
buffer_size(4) = 1
end if
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); ksize = 1; pos = update_position
bsize = buffer_size
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize .NE. size(field,1))
set_mismatch = set_mismatch .OR. (jsize .NE. size(field,2))
set_mismatch = set_mismatch .OR. ANY( bsize .NE. buffer_size )
set_mismatch = set_mismatch .OR. (update_position .NE. pos)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
!--- only non-center data in symmetry domain will be retrieved.
if(position == CENTER .OR. (.NOT. domain%symmetry) ) return
bound => search_bound_overlap(domain, update_position)
call mpp_get_domain_shift(domain, ishift, jshift, update_position)
if(size(field,1) .NE. domain%x(1)%memory%size+ishift .OR. size(field,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D: field is not on memory domain")
if(ASSOCIATED(bound)) then
call mpp_do_get_boundary(f_addrs(1:l_size,1:ntile), domain, bound, b_addrs(:,1:l_size,1:ntile), &
bsize, ksize, d_type)
endif
l_size=0; f_addrs=-9999; bsize=0; b_addrs=-9999; isize=0; jsize=0; ksize=0
end if
return
end subroutine mpp_get_boundary_r4_2d
!###############################################################################################
subroutine mpp_get_boundary_r4_3d(field, domain, ebuffer, sbuffer, wbuffer, nbuffer, flags, &
position, complete, tile_count)
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(:,:,:)
real(4), intent(inout), optional :: ebuffer(:,:), sbuffer(:,:), wbuffer(:,:), nbuffer(:,:)
integer, intent(in), optional :: flags, position, tile_count
logical, intent(in), optional :: complete
integer :: ntile
logical :: need_ebuffer, need_sbuffer, need_wbuffer, need_nbuffer
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrs=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrs=-9999
integer, save :: bsize(4)=0, isize=0, jsize=0, ksize=0, pos, list=0, l_size=0, upflags
integer :: buffer_size(4)
integer :: max_ntile, tile, update_position, ishift, jshift
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(4) :: d_type
type(overlapSpec), pointer :: bound => NULL()
ntile = size(domain%x(:))
if(present(flags)) then
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_: flags is a dummy optional argument")
endif
update_position = CENTER
if(present(position)) update_position = position
!--- check if the suitable buffer are present
need_ebuffer=.false.; need_sbuffer=.false.; need_wbuffer=.false.; need_nbuffer=.false.
if( domain%symmetry .AND. PRESENT(position) ) then
select case(position)
case(CORNER)
need_ebuffer=.true.; need_sbuffer=.true.; need_wbuffer=.true.; need_nbuffer=.true.
case(NORTH)
need_sbuffer=.true.; need_nbuffer=.true.
case(EAST)
need_ebuffer=.true.; need_wbuffer=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
if(present(ebuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D: ebuffer should not be present when north is folded')
if(.not. need_ebuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: ebuffer should not be present')
b_addrs(1, list, tile) = LOC(ebuffer)
buffer_size(1) = size(ebuffer,1)
else
b_addrs(1, list, tile) = 0
buffer_size(1) = 1
end if
if(present(sbuffer)) then
if(.not. need_sbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: sbuffer should not be present')
b_addrs(2, list, tile) = LOC(sbuffer)
buffer_size(2) = size(sbuffer,1)
else
b_addrs(2, list, tile) = 0
buffer_size(2) = 1
end if
if(present(wbuffer)) then
if(.not. need_wbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: wbuffer should not be present')
b_addrs(3, list, tile) = LOC(wbuffer)
buffer_size(3) = size(wbuffer,1)
else
b_addrs(3, list, tile) = 0
buffer_size(3) = 1
end if
if(present(nbuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D: nbuffer should not be present when north is folded')
if(.not. need_nbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: nbuffer should not be present')
b_addrs(4, list, tile) = LOC(nbuffer)
buffer_size(4) = size(nbuffer,1)
else
b_addrs(4, list, tile) = 0
buffer_size(4) = 1
end if
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); ksize = size(field,3); pos = update_position
bsize = buffer_size
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize .NE. size(field,1))
set_mismatch = set_mismatch .OR. (jsize .NE. size(field,2))
set_mismatch = set_mismatch .OR. (ksize .NE. size(field,3))
set_mismatch = set_mismatch .OR. ANY( bsize .NE. buffer_size )
set_mismatch = set_mismatch .OR. (update_position .NE. pos)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
!--- only non-center data in symmetry domain will be retrieved.
if(position == CENTER .OR. (.NOT. domain%symmetry) ) return
bound => search_bound_overlap(domain, update_position)
call mpp_get_domain_shift(domain, ishift, jshift, update_position)
if(size(field,1) .NE. domain%x(1)%memory%size+ishift .OR. size(field,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D: field is not on memory domain")
if(ASSOCIATED(bound)) then
call mpp_do_get_boundary(f_addrs(1:l_size,1:ntile), domain, bound, b_addrs(:,1:l_size,1:ntile), &
bsize, ksize, d_type)
endif
l_size=0; f_addrs=-9999; bsize=0; b_addrs=-9999; isize=0; jsize=0; ksize=0
end if
end subroutine mpp_get_boundary_r4_3d
!####################################################################
! vector update
subroutine mpp_get_boundary_r4_2dv(fieldx, fieldy, domain, ebufferx, sbufferx, wbufferx, nbufferx, &
ebuffery, sbuffery, wbuffery, nbuffery, flags, gridtype, &
complete, tile_count)
type(domain2D), intent(in) :: domain
real(4), intent(in) :: fieldx(:,:), fieldy(:,:)
real(4), intent(inout), optional :: ebufferx(:), sbufferx(:), wbufferx(:), nbufferx(:)
real(4), intent(inout), optional :: ebuffery(:), sbuffery(:), wbuffery(:), nbuffery(:)
integer, intent(in), optional :: flags, gridtype, tile_count
logical, intent(in), optional :: complete
integer :: ntile, update_flags
logical :: need_ebufferx, need_sbufferx, need_wbufferx, need_nbufferx
logical :: need_ebuffery, need_sbuffery, need_wbuffery, need_nbuffery
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsx=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsy=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsx=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsy=-9999
integer, save :: bsizex(4)=0, bsizey(4)=0, isize(2)=0, jsize(2)=0, ksize=0, l_size=0, list=0
integer, save :: offset_type, upflags
integer :: bufferx_size(4), buffery_size(4)
integer :: max_ntile, tile, grid_offset_type
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(4) :: d_type
type(overlapSpec), pointer :: boundx=>NULL()
type(overlapSpec), pointer :: boundy=>NULL()
integer :: position_x, position_y, ishift, jshift
ntile = size(domain%x(:))
update_flags = 0
if( PRESENT(flags) ) then
update_flags = flags
end if
!--- check if the suitable buffer are present
need_ebufferx=.FALSE.; need_sbufferx=.FALSE.
need_wbufferx=.FALSE.; need_nbufferx=.FALSE.
need_ebuffery=.FALSE.; need_sbuffery=.FALSE.
need_wbuffery=.FALSE.; need_nbuffery=.FALSE.
if( domain%symmetry .AND. PRESENT(gridtype) ) then
select case(gridtype)
case(BGRID_NE, BGRID_SW)
need_ebufferx=.true.; need_sbufferx=.true.; need_wbufferx=.true.; need_nbufferx=.true.
need_ebuffery=.true.; need_sbuffery=.true.; need_wbuffery=.true.; need_nbuffery=.true.
case(CGRID_NE, CGRID_SW)
need_ebufferx=.true.; need_wbufferx=.true.; need_sbuffery=.true.; need_nbuffery=.true.
case(DGRID_NE, DGRID_SW)
need_ebuffery=.true.; need_wbuffery=.true.; need_sbufferx=.true.; need_nbufferx=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_V: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
if(present(ebufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: ebufferx should not be present when north is folded')
if(.not. need_ebufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: ebufferx should not be present')
b_addrsx(1, list, tile) = LOC(ebufferx)
bufferx_size(1) = size(ebufferx,1)
else
b_addrsx(1, list, tile) = 0
bufferx_size(1) = 1
end if
if(present(sbufferx)) then
if(.not. need_sbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: sbufferx should not be present')
b_addrsx(2, list, tile) = LOC(sbufferx)
bufferx_size(2) = size(sbufferx,1)
else
b_addrsx(2, list, tile) = 0
bufferx_size(2) = 1
end if
if(present(wbufferx)) then
if(.not. need_wbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: wbufferx should not be present')
b_addrsx(3, list, tile) = LOC(wbufferx)
bufferx_size(3) = size(wbufferx,1)
else
b_addrsx(3, list, tile) = 0
bufferx_size(3) = 1
end if
if(present(nbufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: nbufferx should not be present when north is folded')
if(.not. need_nbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: nbufferx should not be present')
b_addrsx(4, list, tile) = LOC(nbufferx)
bufferx_size(4) = size(nbufferx,1)
else
b_addrsx(4, list, tile) = 0
bufferx_size(4) = 1
end if
if(present(ebuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: ebuffery should not be present when north is folded')
if(.not. need_ebuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: ebuffery should not be present')
b_addrsy(1, list, tile) = LOC(ebuffery)
buffery_size(1) = size(ebuffery,1)
else
b_addrsy(1, list, tile) = 0
buffery_size(1) = 1
end if
if(present(sbuffery)) then
if(.not. need_sbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: sbuffery should not be present')
b_addrsy(2, list, tile) = LOC(sbuffery)
buffery_size(2) = size(sbuffery,1)
else
b_addrsy(2, list, tile) = 0
buffery_size(2) = 1
end if
if(present(wbuffery)) then
if(.not. need_wbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: wbuffery should not be present')
b_addrsy(3, list, tile) = LOC(wbuffery)
buffery_size(3) = size(wbuffery,1)
else
b_addrsy(3, list, tile) = 0
buffery_size(3) = 1
end if
if(present(nbuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: nbuffery should not be present when north is folded')
if(.not. need_nbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: nbuffery should not be present')
b_addrsy(4, list, tile) = LOC(nbuffery)
buffery_size(4) = size(nbuffery,1)
else
b_addrsy(4, list, tile) = 0
buffery_size(4) = 1
end if
grid_offset_type = AGRID
if(present(gridtype)) grid_offset_type = gridtype
if(list == 1 .AND. tile == 1 )then
isize(1)=size(fieldx,1); jsize(1)=size(fieldx,2); isize(2)=size(fieldy,1); jsize(2)=size(fieldy,2)
ksize = 1; offset_type = grid_offset_type
bsizex = bufferx_size; bsizey = buffery_size; upflags = update_flags
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize(1) .NE. size(fieldx,1))
set_mismatch = set_mismatch .OR. (jsize(1) .NE. size(fieldx,2))
set_mismatch = set_mismatch .OR. (isize(2) .NE. size(fieldy,1))
set_mismatch = set_mismatch .OR. (jsize(2) .NE. size(fieldy,2))
set_mismatch = set_mismatch .OR. ANY( bsizex .NE. bufferx_size )
set_mismatch = set_mismatch .OR. ANY( bsizey .NE. buffery_size )
set_mismatch = set_mismatch .OR. (offset_type .NE. grid_offset_type)
set_mismatch = set_mismatch .OR. (upflags .NE. update_flags)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case (DGRID_NE, DGRID_SW)
position_x = NORTH
position_y = EAST
case default
call mpp_error(FATAL, "mpp_get_boundary.h: invalid value of grid_offset_type")
end select
boundx => search_bound_overlap(domain, position_x)
boundy => search_bound_overlap(domain, position_y)
call mpp_get_domain_shift(domain, ishift, jshift, position_x)
if(size(fieldx,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldx,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_V: fieldx is not on memory domain")
call mpp_get_domain_shift(domain, ishift, jshift, position_y)
if(size(fieldy,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldy,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_V: fieldy is not on memory domain")
if(ASSOCIATED(boundx) ) then
call mpp_do_get_boundary(f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, boundx, boundy, &
b_addrsx(:,1:l_size,1:ntile), b_addrsy(:,1:l_size,1:ntile), bsizex, &
bsizey, ksize, d_type, update_flags, grid_offset_type)
endif
l_size=0; f_addrsx=-9999; f_addrsy=-9999; bsizex=0; bsizey=0;
b_addrsx=-9999; b_addrsy=-9999; isize=0; jsize=0; ksize=0
end if
return
end subroutine mpp_get_boundary_r4_2dv
!###############################################################################################
subroutine mpp_get_boundary_r4_3dv(fieldx, fieldy, domain, ebufferx, sbufferx, wbufferx, nbufferx, &
ebuffery, sbuffery, wbuffery, nbuffery, flags, gridtype, &
complete, tile_count)
type(domain2D), intent(in) :: domain
real(4), intent(in) :: fieldx(domain%x(1)%memory%begin:,domain%y(1)%memory%begin:,:)
real(4), intent(in) :: fieldy(domain%x(1)%memory%begin:,domain%y(1)%memory%begin:,:)
real(4), intent(inout), optional :: ebufferx(:,:), sbufferx(:,:), wbufferx(:,:), nbufferx(:,:)
real(4), intent(inout), optional :: ebuffery(:,:), sbuffery(:,:), wbuffery(:,:), nbuffery(:,:)
integer, intent(in), optional :: flags, gridtype, tile_count
logical, intent(in), optional :: complete
integer :: ntile, update_flags
logical :: need_ebufferx, need_sbufferx, need_wbufferx, need_nbufferx
logical :: need_ebuffery, need_sbuffery, need_wbuffery, need_nbuffery
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsx=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsy=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsx=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsy=-9999
integer, save :: bsizex(4)=0, bsizey(4)=0, isize(2)=0, jsize(2)=0, ksize=0, l_size=0, list=0
integer, save :: offset_type, upflags
integer :: bufferx_size(4), buffery_size(4)
integer :: max_ntile, tile, grid_offset_type
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(4) :: d_type
type(overlapSpec), pointer :: boundx=>NULL()
type(overlapSpec), pointer :: boundy=>NULL()
integer :: position_x, position_y, ishift, jshift
ntile = size(domain%x(:))
update_flags = 0
if( PRESENT(flags) ) then
update_flags = flags
end if
!--- check if the suitable buffer are present
need_ebufferx=.FALSE.; need_sbufferx=.FALSE.
need_wbufferx=.FALSE.; need_nbufferx=.FALSE.
need_ebuffery=.FALSE.; need_sbuffery=.FALSE.
need_wbuffery=.FALSE.; need_nbuffery=.FALSE.
if( domain%symmetry .AND. PRESENT(gridtype) ) then
select case(gridtype)
case(BGRID_NE, BGRID_SW)
need_ebufferx=.true.; need_sbufferx=.true.; need_wbufferx=.true.; need_nbufferx=.true.
need_ebuffery=.true.; need_sbuffery=.true.; need_wbuffery=.true.; need_nbuffery=.true.
case(CGRID_NE, CGRID_SW)
need_ebufferx=.true.; need_wbufferx=.true.; need_sbuffery=.true.; need_nbuffery=.true.
case(DGRID_NE, DGRID_SW)
need_ebuffery=.true.; need_wbuffery=.true.; need_sbufferx=.true.; need_nbufferx=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_V: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
if(present(ebufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: ebufferx should not be present when north is folded')
if(.not. need_ebufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: ebufferx should not be present')
b_addrsx(1, list, tile) = LOC(ebufferx)
bufferx_size(1) = size(ebufferx,1)
else
b_addrsx(1, list, tile) = 0
bufferx_size(1) = 1
end if
if(present(sbufferx)) then
if(.not. need_sbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: sbufferx should not be present')
b_addrsx(2, list, tile) = LOC(sbufferx)
bufferx_size(2) = size(sbufferx,1)
else
b_addrsx(2, list, tile) = 0
bufferx_size(2) = 1
end if
if(present(wbufferx)) then
if(.not. need_wbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: wbufferx should not be present')
b_addrsx(3, list, tile) = LOC(wbufferx)
bufferx_size(3) = size(wbufferx,1)
else
b_addrsx(3, list, tile) = 0
bufferx_size(3) = 1
end if
if(present(nbufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: nbufferx should not be present when north is folded')
if(.not. need_nbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: nbufferx should not be present')
b_addrsx(4, list, tile) = LOC(nbufferx)
bufferx_size(4) = size(nbufferx,1)
else
b_addrsx(4, list, tile) = 0
bufferx_size(4) = 1
end if
if(present(ebuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: ebuffery should not be present when north is folded')
if(.not. need_ebuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: ebuffery should not be present')
b_addrsy(1, list, tile) = LOC(ebuffery)
buffery_size(1) = size(ebuffery,1)
else
b_addrsy(1, list, tile) = 0
buffery_size(1) = 1
end if
if(present(sbuffery)) then
if(.not. need_sbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: sbuffery should not be present')
b_addrsy(2, list, tile) = LOC(sbuffery)
buffery_size(2) = size(sbuffery,1)
else
b_addrsy(2, list, tile) = 0
buffery_size(2) = 1
end if
if(present(wbuffery)) then
if(.not. need_wbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: wbuffery should not be present')
b_addrsy(3, list, tile) = LOC(wbuffery)
buffery_size(3) = size(wbuffery,1)
else
b_addrsy(3, list, tile) = 0
buffery_size(3) = 1
end if
if(present(nbuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: nbuffery should not be present when north is folded')
if(.not. need_nbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: nbuffery should not be present')
b_addrsy(4, list, tile) = LOC(nbuffery)
buffery_size(4) = size(nbuffery,1)
else
b_addrsy(4, list, tile) = 0
buffery_size(4) = 1
end if
grid_offset_type = AGRID
if(present(gridtype)) grid_offset_type = gridtype
if(list == 1 .AND. tile == 1 )then
isize(1)=size(fieldx,1); jsize(1)=size(fieldx,2); isize(2)=size(fieldy,1); jsize(2)=size(fieldy,2)
ksize = size(fieldx,3); offset_type = grid_offset_type
bsizex = bufferx_size; bsizey = buffery_size; upflags = update_flags
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize(1) .NE. size(fieldx,1))
set_mismatch = set_mismatch .OR. (jsize(1) .NE. size(fieldx,2))
set_mismatch = set_mismatch .OR. (ksize .NE. size(fieldx,3))
set_mismatch = set_mismatch .OR. (isize(2) .NE. size(fieldy,1))
set_mismatch = set_mismatch .OR. (jsize(2) .NE. size(fieldy,2))
set_mismatch = set_mismatch .OR. (ksize .NE. size(fieldy,3))
set_mismatch = set_mismatch .OR. ANY( bsizex .NE. bufferx_size )
set_mismatch = set_mismatch .OR. ANY( bsizey .NE. buffery_size )
set_mismatch = set_mismatch .OR. (offset_type .NE. grid_offset_type)
set_mismatch = set_mismatch .OR. (upflags .NE. update_flags)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case (DGRID_NE, DGRID_SW)
position_x = NORTH
position_y = EAST
case default
call mpp_error(FATAL, "mpp_get_boundary.h: invalid value of grid_offset_type")
end select
boundx => search_bound_overlap(domain, position_x)
boundy => search_bound_overlap(domain, position_y)
call mpp_get_domain_shift(domain, ishift, jshift, position_x)
if(size(fieldx,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldx,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_V: fieldx is not on memory domain")
call mpp_get_domain_shift(domain, ishift, jshift, position_y)
if(size(fieldy,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldy,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_V: fieldy is not on memory domain")
if(ASSOCIATED(boundx) ) then
call mpp_do_get_boundary(f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, boundx, boundy, &
b_addrsx(:,1:l_size,1:ntile), b_addrsy(:,1:l_size,1:ntile), bsizex, &
bsizey, ksize, d_type, update_flags, grid_offset_type)
endif
l_size=0; f_addrsx=-9999; f_addrsy=-9999; bsizex=0; bsizey=0;
b_addrsx=-9999; b_addrsy=-9999; isize=0; jsize=0; ksize=0
end if
end subroutine mpp_get_boundary_r4_3dv
# 1764 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_get_boundary_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
! this routine is used to retrieve scalar boundary data for symmetric domain.
subroutine mpp_get_boundary_ad_r4_2d(field, domain, ebuffer, sbuffer, wbuffer, nbuffer, flags, &
position, complete, tile_count)
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(:,:)
real(4), intent(inout), optional :: ebuffer(:), sbuffer(:), wbuffer(:), nbuffer(:)
integer, intent(in), optional :: flags, position, tile_count
logical, intent(in), optional :: complete
real(4) :: field3D(size(field,1),size(field,2),1)
real(4), allocatable, dimension(:,:) :: ebuffer2D, sbuffer2D, wbuffer2D, nbuffer2D
integer :: xcount, ycount
integer :: ntile
logical :: need_ebuffer, need_sbuffer, need_wbuffer, need_nbuffer
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrs=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrs=-9999
integer, save :: bsize(4)=0, isize=0, jsize=0, ksize=0, pos, list=0, l_size=0, upflags
integer :: buffer_size(4)
integer :: max_ntile, tile, update_position, ishift, jshift
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(4) :: d_type
type(overlapSpec), pointer :: bound => NULL()
ntile = size(domain%x(:))
if(present(flags)) then
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_: flags is a dummy optional argument")
endif
update_position = CENTER
if(present(position)) update_position = position
!--- check if the buffer are needed
need_ebuffer=.false.; need_sbuffer=.false.; need_wbuffer=.false.; need_nbuffer=.false.
if( domain%symmetry .AND. PRESENT(position) ) then
select case(position)
case(CORNER)
need_ebuffer=.true.; need_sbuffer=.true.; need_wbuffer=.true.; need_nbuffer=.true.
case(NORTH)
need_sbuffer=.true.; need_nbuffer=.true.
case(EAST)
need_ebuffer=.true.; need_wbuffer=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
if(present(ebuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D: ebuffer should not be present when north is folded')
if(.not. need_ebuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: ebuffer should not be present')
b_addrs(1, list, tile) = LOC(ebuffer)
buffer_size(1) = size(ebuffer(:))
else
b_addrs(1, list, tile) = 0
buffer_size(1) = 1
end if
if(present(sbuffer)) then
if(.not. need_sbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: sbuffer should not be present')
b_addrs(2, list, tile) = LOC(sbuffer)
buffer_size(2) = size(sbuffer(:))
else
b_addrs(2, list, tile) = 0
buffer_size(2) = 1
end if
if(present(wbuffer)) then
if(.not. need_wbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: wbuffer should not be present')
b_addrs(3, list, tile) = LOC(wbuffer)
buffer_size(3) = size(wbuffer(:))
else
b_addrs(3, list, tile) = 0
buffer_size(3) = 1
end if
if(present(nbuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D: nbuffer should not be present when north is folded')
if(.not. need_nbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: nbuffer should be be present')
b_addrs(4, list, tile) = LOC(nbuffer)
buffer_size(4) = size(nbuffer(:))
else
b_addrs(4, list, tile) = 0
buffer_size(4) = 1
end if
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); ksize = 1; pos = update_position
bsize = buffer_size
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize .NE. size(field,1))
set_mismatch = set_mismatch .OR. (jsize .NE. size(field,2))
set_mismatch = set_mismatch .OR. ANY( bsize .NE. buffer_size )
set_mismatch = set_mismatch .OR. (update_position .NE. pos)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
!--- only non-center data in symmetry domain will be retrieved.
if(position == CENTER .OR. (.NOT. domain%symmetry) ) return
bound => search_bound_overlap(domain, update_position)
call mpp_get_domain_shift(domain, ishift, jshift, update_position)
if(size(field,1) .NE. domain%x(1)%memory%size+ishift .OR. size(field,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D: field is not on memory domain")
if(ASSOCIATED(bound)) then
call mpp_do_get_boundary(f_addrs(1:l_size,1:ntile), domain, bound, b_addrs(:,1:l_size,1:ntile), &
bsize, ksize, d_type)
endif
l_size=0; f_addrs=-9999; bsize=0; b_addrs=-9999; isize=0; jsize=0; ksize=0
end if
return
end subroutine mpp_get_boundary_ad_r4_2d
!###############################################################################################
subroutine mpp_get_boundary_ad_r4_3d(field, domain, ebuffer, sbuffer, wbuffer, nbuffer, flags, &
position, complete, tile_count)
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(:,:,:)
real(4), intent(inout), optional :: ebuffer(:,:), sbuffer(:,:), wbuffer(:,:), nbuffer(:,:)
integer, intent(in), optional :: flags, position, tile_count
logical, intent(in), optional :: complete
integer :: ntile
logical :: need_ebuffer, need_sbuffer, need_wbuffer, need_nbuffer
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrs=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrs=-9999
integer, save :: bsize(4)=0, isize=0, jsize=0, ksize=0, pos, list=0, l_size=0, upflags
integer :: buffer_size(4)
integer :: max_ntile, tile, update_position, ishift, jshift
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(4) :: d_type
type(overlapSpec), pointer :: bound => NULL()
ntile = size(domain%x(:))
if(present(flags)) then
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_: flags is a dummy optional argument")
endif
update_position = CENTER
if(present(position)) update_position = position
!--- check if the suitable buffer are present
need_ebuffer=.false.; need_sbuffer=.false.; need_wbuffer=.false.; need_nbuffer=.false.
if( domain%symmetry .AND. PRESENT(position) ) then
select case(position)
case(CORNER)
need_ebuffer=.true.; need_sbuffer=.true.; need_wbuffer=.true.; need_nbuffer=.true.
case(NORTH)
need_sbuffer=.true.; need_nbuffer=.true.
case(EAST)
need_ebuffer=.true.; need_wbuffer=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrs(list, tile) = LOC(field)
if(present(ebuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D: ebuffer should not be present when north is folded')
if(.not. need_ebuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: ebuffer should not be present')
b_addrs(1, list, tile) = LOC(ebuffer)
buffer_size(1) = size(ebuffer,1)
else
b_addrs(1, list, tile) = 0
buffer_size(1) = 1
end if
if(present(sbuffer)) then
if(.not. need_sbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: sbuffer should not be present')
b_addrs(2, list, tile) = LOC(sbuffer)
buffer_size(2) = size(sbuffer,1)
else
b_addrs(2, list, tile) = 0
buffer_size(2) = 1
end if
if(present(wbuffer)) then
if(.not. need_wbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: wbuffer should not be present')
b_addrs(3, list, tile) = LOC(wbuffer)
buffer_size(3) = size(wbuffer,1)
else
b_addrs(3, list, tile) = 0
buffer_size(3) = 1
end if
if(present(nbuffer)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D: nbuffer should not be present when north is folded')
if(.not. need_nbuffer) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: nbuffer should not be present')
b_addrs(4, list, tile) = LOC(nbuffer)
buffer_size(4) = size(nbuffer,1)
else
b_addrs(4, list, tile) = 0
buffer_size(4) = 1
end if
if(list == 1 .AND. tile == 1 )then
isize=size(field,1); jsize=size(field,2); ksize = size(field,3); pos = update_position
bsize = buffer_size
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize .NE. size(field,1))
set_mismatch = set_mismatch .OR. (jsize .NE. size(field,2))
set_mismatch = set_mismatch .OR. (ksize .NE. size(field,3))
set_mismatch = set_mismatch .OR. ANY( bsize .NE. buffer_size )
set_mismatch = set_mismatch .OR. (update_position .NE. pos)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
!--- only non-center data in symmetry domain will be retrieved.
if(position == CENTER .OR. (.NOT. domain%symmetry) ) return
bound => search_bound_overlap(domain, update_position)
call mpp_get_domain_shift(domain, ishift, jshift, update_position)
if(size(field,1) .NE. domain%x(1)%memory%size+ishift .OR. size(field,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D: field is not on memory domain")
if(ASSOCIATED(bound)) then
call mpp_do_get_boundary(f_addrs(1:l_size,1:ntile), domain, bound, b_addrs(:,1:l_size,1:ntile), &
bsize, ksize, d_type)
endif
l_size=0; f_addrs=-9999; bsize=0; b_addrs=-9999; isize=0; jsize=0; ksize=0
end if
end subroutine mpp_get_boundary_ad_r4_3d
!####################################################################
! vector update
subroutine mpp_get_boundary_ad_r4_2dv(fieldx, fieldy, domain, ebufferx, sbufferx, wbufferx, nbufferx, &
ebuffery, sbuffery, wbuffery, nbuffery, flags, gridtype, &
complete, tile_count)
type(domain2D), intent(in) :: domain
real(4), intent(in) :: fieldx(:,:), fieldy(:,:)
real(4), intent(inout), optional :: ebufferx(:), sbufferx(:), wbufferx(:), nbufferx(:)
real(4), intent(inout), optional :: ebuffery(:), sbuffery(:), wbuffery(:), nbuffery(:)
integer, intent(in), optional :: flags, gridtype, tile_count
logical, intent(in), optional :: complete
integer :: ntile, update_flags
logical :: need_ebufferx, need_sbufferx, need_wbufferx, need_nbufferx
logical :: need_ebuffery, need_sbuffery, need_wbuffery, need_nbuffery
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsx=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsy=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsx=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsy=-9999
integer, save :: bsizex(4)=0, bsizey(4)=0, isize(2)=0, jsize(2)=0, ksize=0, l_size=0, list=0
integer, save :: offset_type, upflags
integer :: bufferx_size(4), buffery_size(4)
integer :: max_ntile, tile, grid_offset_type
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(4) :: d_type
type(overlapSpec), pointer :: boundx=>NULL()
type(overlapSpec), pointer :: boundy=>NULL()
integer :: position_x, position_y, ishift, jshift
ntile = size(domain%x(:))
update_flags = 0
if( PRESENT(flags) ) then
update_flags = flags
end if
!--- check if the suitable buffer are present
need_ebufferx=.FALSE.; need_sbufferx=.FALSE.
need_wbufferx=.FALSE.; need_nbufferx=.FALSE.
need_ebuffery=.FALSE.; need_sbuffery=.FALSE.
need_wbuffery=.FALSE.; need_nbuffery=.FALSE.
if( domain%symmetry .AND. PRESENT(gridtype) ) then
select case(gridtype)
case(BGRID_NE, BGRID_SW)
need_ebufferx=.true.; need_sbufferx=.true.; need_wbufferx=.true.; need_nbufferx=.true.
need_ebuffery=.true.; need_sbuffery=.true.; need_wbuffery=.true.; need_nbuffery=.true.
case(CGRID_NE, CGRID_SW)
need_ebufferx=.true.; need_wbufferx=.true.; need_sbuffery=.true.; need_nbuffery=.true.
case(DGRID_NE, DGRID_SW)
need_ebuffery=.true.; need_wbuffery=.true.; need_sbufferx=.true.; need_nbufferx=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_V: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
if(present(ebufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: ebufferx should not be present when north is folded')
if(.not. need_ebufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: ebufferx should not be present')
b_addrsx(1, list, tile) = LOC(ebufferx)
bufferx_size(1) = size(ebufferx,1)
else
b_addrsx(1, list, tile) = 0
bufferx_size(1) = 1
end if
if(present(sbufferx)) then
if(.not. need_sbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: sbufferx should not be present')
b_addrsx(2, list, tile) = LOC(sbufferx)
bufferx_size(2) = size(sbufferx,1)
else
b_addrsx(2, list, tile) = 0
bufferx_size(2) = 1
end if
if(present(wbufferx)) then
if(.not. need_wbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: wbufferx should not be present')
b_addrsx(3, list, tile) = LOC(wbufferx)
bufferx_size(3) = size(wbufferx,1)
else
b_addrsx(3, list, tile) = 0
bufferx_size(3) = 1
end if
if(present(nbufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: nbufferx should not be present when north is folded')
if(.not. need_nbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: nbufferx should not be present')
b_addrsx(4, list, tile) = LOC(nbufferx)
bufferx_size(4) = size(nbufferx,1)
else
b_addrsx(4, list, tile) = 0
bufferx_size(4) = 1
end if
if(present(ebuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: ebuffery should not be present when north is folded')
if(.not. need_ebuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: ebuffery should not be present')
b_addrsy(1, list, tile) = LOC(ebuffery)
buffery_size(1) = size(ebuffery,1)
else
b_addrsy(1, list, tile) = 0
buffery_size(1) = 1
end if
if(present(sbuffery)) then
if(.not. need_sbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: sbuffery should not be present')
b_addrsy(2, list, tile) = LOC(sbuffery)
buffery_size(2) = size(sbuffery,1)
else
b_addrsy(2, list, tile) = 0
buffery_size(2) = 1
end if
if(present(wbuffery)) then
if(.not. need_wbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: wbuffery should not be present')
b_addrsy(3, list, tile) = LOC(wbuffery)
buffery_size(3) = size(wbuffery,1)
else
b_addrsy(3, list, tile) = 0
buffery_size(3) = 1
end if
if(present(nbuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_2D_V: nbuffery should not be present when north is folded')
if(.not. need_nbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_2D_V: nbuffery should not be present')
b_addrsy(4, list, tile) = LOC(nbuffery)
buffery_size(4) = size(nbuffery,1)
else
b_addrsy(4, list, tile) = 0
buffery_size(4) = 1
end if
grid_offset_type = AGRID
if(present(gridtype)) grid_offset_type = gridtype
if(list == 1 .AND. tile == 1 )then
isize(1)=size(fieldx,1); jsize(1)=size(fieldx,2); isize(2)=size(fieldy,1); jsize(2)=size(fieldy,2)
ksize = 1; offset_type = grid_offset_type
bsizex = bufferx_size; bsizey = buffery_size; upflags = update_flags
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize(1) .NE. size(fieldx,1))
set_mismatch = set_mismatch .OR. (jsize(1) .NE. size(fieldx,2))
set_mismatch = set_mismatch .OR. (isize(2) .NE. size(fieldy,1))
set_mismatch = set_mismatch .OR. (jsize(2) .NE. size(fieldy,2))
set_mismatch = set_mismatch .OR. ANY( bsizex .NE. bufferx_size )
set_mismatch = set_mismatch .OR. ANY( bsizey .NE. buffery_size )
set_mismatch = set_mismatch .OR. (offset_type .NE. grid_offset_type)
set_mismatch = set_mismatch .OR. (upflags .NE. update_flags)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case (DGRID_NE, DGRID_SW)
position_x = NORTH
position_y = EAST
case default
call mpp_error(FATAL, "mpp_get_boundary.h: invalid value of grid_offset_type")
end select
boundx => search_bound_overlap(domain, position_x)
boundy => search_bound_overlap(domain, position_y)
call mpp_get_domain_shift(domain, ishift, jshift, position_x)
if(size(fieldx,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldx,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_V: fieldx is not on memory domain")
call mpp_get_domain_shift(domain, ishift, jshift, position_y)
if(size(fieldy,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldy,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_2D_V: fieldy is not on memory domain")
if(ASSOCIATED(boundx) ) then
call mpp_do_get_boundary(f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, boundx, boundy, &
b_addrsx(:,1:l_size,1:ntile), b_addrsy(:,1:l_size,1:ntile), bsizex, &
bsizey, ksize, d_type, update_flags, grid_offset_type)
endif
l_size=0; f_addrsx=-9999; f_addrsy=-9999; bsizex=0; bsizey=0;
b_addrsx=-9999; b_addrsy=-9999; isize=0; jsize=0; ksize=0
end if
return
end subroutine mpp_get_boundary_ad_r4_2dv
!###############################################################################################
subroutine mpp_get_boundary_ad_r4_3dv(fieldx, fieldy, domain, ebufferx, sbufferx, wbufferx, nbufferx, &
ebuffery, sbuffery, wbuffery, nbuffery, flags, gridtype, &
complete, tile_count)
type(domain2D), intent(in) :: domain
real(4), intent(in) :: fieldx(domain%x(1)%memory%begin:,domain%y(1)%memory%begin:,:)
real(4), intent(in) :: fieldy(domain%x(1)%memory%begin:,domain%y(1)%memory%begin:,:)
real(4), intent(inout), optional :: ebufferx(:,:), sbufferx(:,:), wbufferx(:,:), nbufferx(:,:)
real(4), intent(inout), optional :: ebuffery(:,:), sbuffery(:,:), wbuffery(:,:), nbuffery(:,:)
integer, intent(in), optional :: flags, gridtype, tile_count
logical, intent(in), optional :: complete
integer :: ntile, update_flags
logical :: need_ebufferx, need_sbufferx, need_wbufferx, need_nbufferx
logical :: need_ebuffery, need_sbuffery, need_wbuffery, need_nbuffery
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsx=-9999
integer(8),dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save :: f_addrsy=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsx=-9999
integer(8),dimension(4,MAX_DOMAIN_FIELDS, MAX_TILES),save :: b_addrsy=-9999
integer, save :: bsizex(4)=0, bsizey(4)=0, isize(2)=0, jsize(2)=0, ksize=0, l_size=0, list=0
integer, save :: offset_type, upflags
integer :: bufferx_size(4), buffery_size(4)
integer :: max_ntile, tile, grid_offset_type
logical :: do_update, is_complete, set_mismatch
character(len=3) :: text
real(4) :: d_type
type(overlapSpec), pointer :: boundx=>NULL()
type(overlapSpec), pointer :: boundy=>NULL()
integer :: position_x, position_y, ishift, jshift
ntile = size(domain%x(:))
update_flags = 0
if( PRESENT(flags) ) then
update_flags = flags
end if
!--- check if the suitable buffer are present
need_ebufferx=.FALSE.; need_sbufferx=.FALSE.
need_wbufferx=.FALSE.; need_nbufferx=.FALSE.
need_ebuffery=.FALSE.; need_sbuffery=.FALSE.
need_wbuffery=.FALSE.; need_nbuffery=.FALSE.
if( domain%symmetry .AND. PRESENT(gridtype) ) then
select case(gridtype)
case(BGRID_NE, BGRID_SW)
need_ebufferx=.true.; need_sbufferx=.true.; need_wbufferx=.true.; need_nbufferx=.true.
need_ebuffery=.true.; need_sbuffery=.true.; need_wbuffery=.true.; need_nbuffery=.true.
case(CGRID_NE, CGRID_SW)
need_ebufferx=.true.; need_wbufferx=.true.; need_sbuffery=.true.; need_nbuffery=.true.
case(DGRID_NE, DGRID_SW)
need_ebuffery=.true.; need_wbuffery=.true.; need_sbufferx=.true.; need_nbufferx=.true.
end select
end if
tile = 1
max_ntile = domain%max_ntile_pe
is_complete = .true.
if(PRESENT(complete)) then
is_complete = complete
end if
if(max_ntile>1) then
if(ntile>MAX_TILES) then
write( text,'(i2)' ) MAX_TILES
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: MAX_TILES='//text//' is less than number of tiles on this pe.' )
endif
if(.NOT. present(tile_count) ) call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_V: "// &
"optional argument tile_count should be present when number of tiles on this pe is more than 1")
tile = tile_count
end if
do_update = (tile == ntile) .AND. is_complete
list = list+1
if(list > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
f_addrsx(list, tile) = LOC(fieldx)
f_addrsy(list, tile) = LOC(fieldy)
if(present(ebufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: ebufferx should not be present when north is folded')
if(.not. need_ebufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: ebufferx should not be present')
b_addrsx(1, list, tile) = LOC(ebufferx)
bufferx_size(1) = size(ebufferx,1)
else
b_addrsx(1, list, tile) = 0
bufferx_size(1) = 1
end if
if(present(sbufferx)) then
if(.not. need_sbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: sbufferx should not be present')
b_addrsx(2, list, tile) = LOC(sbufferx)
bufferx_size(2) = size(sbufferx,1)
else
b_addrsx(2, list, tile) = 0
bufferx_size(2) = 1
end if
if(present(wbufferx)) then
if(.not. need_wbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: wbufferx should not be present')
b_addrsx(3, list, tile) = LOC(wbufferx)
bufferx_size(3) = size(wbufferx,1)
else
b_addrsx(3, list, tile) = 0
bufferx_size(3) = 1
end if
if(present(nbufferx)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: nbufferx should not be present when north is folded')
if(.not. need_nbufferx) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: nbufferx should not be present')
b_addrsx(4, list, tile) = LOC(nbufferx)
bufferx_size(4) = size(nbufferx,1)
else
b_addrsx(4, list, tile) = 0
bufferx_size(4) = 1
end if
if(present(ebuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: ebuffery should not be present when north is folded')
if(.not. need_ebuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: ebuffery should not be present')
b_addrsy(1, list, tile) = LOC(ebuffery)
buffery_size(1) = size(ebuffery,1)
else
b_addrsy(1, list, tile) = 0
buffery_size(1) = 1
end if
if(present(sbuffery)) then
if(.not. need_sbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: sbuffery should not be present')
b_addrsy(2, list, tile) = LOC(sbuffery)
buffery_size(2) = size(sbuffery,1)
else
b_addrsy(2, list, tile) = 0
buffery_size(2) = 1
end if
if(present(wbuffery)) then
if(.not. need_wbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: wbuffery should not be present')
b_addrsy(3, list, tile) = LOC(wbuffery)
buffery_size(3) = size(wbuffery,1)
else
b_addrsy(3, list, tile) = 0
buffery_size(3) = 1
end if
if(present(nbuffery)) then
if(BTEST(domain%fold,NORTH)) call mpp_error(FATAL, &
'MPP_GET_BOUNDARY_3D_V: nbuffery should not be present when north is folded')
if(.not. need_nbuffery) call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: nbuffery should not be present')
b_addrsy(4, list, tile) = LOC(nbuffery)
buffery_size(4) = size(nbuffery,1)
else
b_addrsy(4, list, tile) = 0
buffery_size(4) = 1
end if
grid_offset_type = AGRID
if(present(gridtype)) grid_offset_type = gridtype
if(list == 1 .AND. tile == 1 )then
isize(1)=size(fieldx,1); jsize(1)=size(fieldx,2); isize(2)=size(fieldy,1); jsize(2)=size(fieldy,2)
ksize = size(fieldx,3); offset_type = grid_offset_type
bsizex = bufferx_size; bsizey = buffery_size; upflags = update_flags
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (isize(1) .NE. size(fieldx,1))
set_mismatch = set_mismatch .OR. (jsize(1) .NE. size(fieldx,2))
set_mismatch = set_mismatch .OR. (ksize .NE. size(fieldx,3))
set_mismatch = set_mismatch .OR. (isize(2) .NE. size(fieldy,1))
set_mismatch = set_mismatch .OR. (jsize(2) .NE. size(fieldy,2))
set_mismatch = set_mismatch .OR. (ksize .NE. size(fieldy,3))
set_mismatch = set_mismatch .OR. ANY( bsizex .NE. bufferx_size )
set_mismatch = set_mismatch .OR. ANY( bsizey .NE. buffery_size )
set_mismatch = set_mismatch .OR. (offset_type .NE. grid_offset_type)
set_mismatch = set_mismatch .OR. (upflags .NE. update_flags)
if(set_mismatch)then
write( text,'(i2)' ) list
call mpp_error(FATAL,'MPP_GET_BOUNDARY_3D_V: Incompatible field at count '//text//' for group update.' )
endif
endif
if(is_complete) then
l_size = list
list = 0
end if
if(do_update )then
select case(grid_offset_type)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case (DGRID_NE, DGRID_SW)
position_x = NORTH
position_y = EAST
case default
call mpp_error(FATAL, "mpp_get_boundary.h: invalid value of grid_offset_type")
end select
boundx => search_bound_overlap(domain, position_x)
boundy => search_bound_overlap(domain, position_y)
call mpp_get_domain_shift(domain, ishift, jshift, position_x)
if(size(fieldx,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldx,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_V: fieldx is not on memory domain")
call mpp_get_domain_shift(domain, ishift, jshift, position_y)
if(size(fieldy,1) .NE. domain%x(1)%memory%size+ishift .OR. size(fieldy,2) .NE. domain%y(1)%memory%size+jshift ) &
call mpp_error(FATAL, "MPP_GET_BOUNDARY_3D_V: fieldy is not on memory domain")
if(ASSOCIATED(boundx) ) then
call mpp_do_get_boundary_ad(f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, boundx, boundy, &
b_addrsx(:,1:l_size,1:ntile), b_addrsy(:,1:l_size,1:ntile), bsizex, &
bsizey, ksize, d_type, update_flags, grid_offset_type)
endif
l_size=0; f_addrsx=-9999; f_addrsy=-9999; bsizex=0; bsizey=0;
b_addrsx=-9999; b_addrsy=-9999; isize=0; jsize=0; ksize=0
end if
end subroutine mpp_get_boundary_ad_r4_3dv
# 1778 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_get_boundary.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_get_boundary_r8_3d( f_addrs, domain, bound, b_addrs, bsize, ke, d_type)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: bound
integer(8), intent(in) :: f_addrs(:,:)
integer(8), intent(in) :: b_addrs(:,:,:)
integer, intent(in) :: bsize(:), ke
real(8), intent(in) :: d_type ! creates unique interface
real(8) :: field(bound%xbegin:bound%xend, bound%ybegin:bound%yend,ke)
real(8) :: ebuffer(bsize(1), ke), sbuffer(bsize(2), ke), wbuffer(bsize(3), ke), nbuffer(bsize(4), ke)
pointer(ptr_field, field)
pointer(ptr_ebuffer, ebuffer)
pointer(ptr_sbuffer, sbuffer)
pointer(ptr_wbuffer, wbuffer)
pointer(ptr_nbuffer, nbuffer)
integer, allocatable :: msg1(:), msg2(:)
logical :: recv(4), send(4)
integer :: nlist, buffer_pos, pos, tMe, from_pe
integer :: i, j, k, l, m, n, index, buffer_recv_size
integer :: is, ie, js, je, msgsize, l_size, num
character(len=8) :: text
integer :: outunit
real(8) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
ptr = LOC(mpp_domains_stack)
outunit = stdout()
l_size = size(f_addrs,1)
!---- determine recv(1) based on b_addrs ( east boundary )
num = count(b_addrs(1,:,1) == 0)
if( num == 0 ) then
recv(1) = .true.
else if( num == l_size ) then
recv(1) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of ebuffer with null address should be 0 or l_size")
endif
!---- determine recv(2) based on b_addrs ( south boundary )
num = count(b_addrs(2,:,1) == 0)
if( num == 0 ) then
recv(2) = .true.
else if( num == l_size ) then
recv(2) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of sbuffer with null address should be 0 or l_size")
endif
!---- determine recv(3) based on b_addrs ( west boundary )
num = count(b_addrs(3,:,1) == 0)
if( num == 0 ) then
recv(3) = .true.
else if( num == l_size ) then
recv(3) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of wbuffer with null address should be 0 or l_size")
endif
!---- determine recv(4) based on b_addrs ( north boundary )
num = count(b_addrs(4,:,1) == 0)
if( num == 0 ) then
recv(4) = .true.
else if( num == l_size ) then
recv(4) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of nbuffer with null address should be 0 or l_size")
endif
send = recv
nlist = size(domain%list(:))
if(debug_message_passing) then
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
do m = 1, bound%nrecv
msgsize = 0
do n = 1, bound%recv(m)%count
if(recv(bound%recv(m)%dir(n))) then
is = bound%recv(m)%is(n); ie = bound%recv(m)%ie(n)
js = bound%recv(m)%js(n); je = bound%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
from_pe = bound%recv(m)%pe
l = from_pe-mpp_root_pe()
call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1)
msg2(l) = msgsize
enddo
do m = 1, bound%nsend
msgsize = 0
do n = 1, bound%send(m)%count
if(recv(bound%send(m)%dir(n))) then
is = bound%send(m)%is(n); ie = bound%send(m)%ie(n)
js = bound%send(m)%js(n); je = bound%send(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
call mpp_send( msgsize, plen=1, to_pe=bound%send(m)%pe, tag=COMM_TAG_1)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_get_boundary: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_get_boundary: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
!recv
buffer_pos = 0
do m = 1, bound%nrecv
msgsize = 0
do n = 1, bound%recv(m)%count
if(recv(bound%recv(m)%dir(n))) then
is = bound%recv(m)%is(n); ie = bound%recv(m)%ie(n)
js = bound%recv(m)%js(n); je = bound%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_OLD: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=bound%recv(m)%pe, block=.false., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
! send
do m = 1, bound%nsend
pos = buffer_pos
do n = 1, bound%send(m)%count
if(send(bound%send(m)%dir(n))) then
is = bound%send(m)%is(n); ie = bound%send(m)%ie(n)
js = bound%send(m)%js(n); je = bound%send(m)%je(n)
tMe = bound%send(m)%tileMe(n)
select case( bound%send(m)%rotation(n) )
case(ZERO)
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case (ONE_HUNDRED_EIGHTY)
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
end if ! if(send(bound%dir(n)))
end do ! do n = 1, bound%count
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
!--- maybe we do not need the following stack size check.
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_OLD: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=bound%send(m)%pe, tag=COMM_TAG_2 )
buffer_pos = pos
end if
end do
call mpp_clock_begin(wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(wait_clock)
buffer_pos = buffer_recv_size
!unpack recv
!unpack buffer in reverse order.
do m = bound%nrecv, 1, -1
do n = bound%recv(m)%count, 1, -1
if(recv(bound%recv(m)%dir(n))) then
is = bound%recv(m)%is(n); ie = bound%recv(m)%ie(n)
js = bound%recv(m)%js(n); je = bound%recv(m)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = bound%recv(m)%tileMe(n)
select case( bound%recv(m)%dir(n) )
case ( 1 ) ! EAST
do l=1,l_size
ptr_ebuffer = b_addrs(1, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
ebuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 2 ) ! SOUTH
do l=1,l_size
ptr_sbuffer = b_addrs(2, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
sbuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 3 ) ! WEST
do l=1,l_size
ptr_wbuffer = b_addrs(3, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
wbuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 4 ) ! norTH
do l=1,l_size
ptr_nbuffer = b_addrs(4, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
nbuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
end select
end if
end do
end do
call mpp_sync_self( )
end subroutine mpp_do_get_boundary_r8_3d
subroutine mpp_do_get_boundary_r8_3dv(f_addrsx, f_addrsy, domain, boundx, boundy, b_addrsx, b_addrsy, &
bsizex, bsizey, ke, d_type, flags, gridtype)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: boundx, boundy
integer(8), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
integer(8), intent(in) :: b_addrsx(:,:,:), b_addrsy(:,:,:)
integer, intent(in) :: bsizex(:), bsizey(:), ke
real(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: flags
integer, intent(in) :: gridtype
real(8) :: fieldx(boundx%xbegin:boundx%xend, boundx%ybegin:boundx%yend,ke)
real(8) :: fieldy(boundy%xbegin:boundy%xend, boundy%ybegin:boundy%yend,ke)
real(8) :: ebufferx(bsizex(1), ke), sbufferx(bsizex(2), ke), wbufferx(bsizex(3), ke), nbufferx(bsizex(4), ke)
real(8) :: ebuffery(bsizey(1), ke), sbuffery(bsizey(2), ke), wbuffery(bsizey(3), ke), nbuffery(bsizey(4), ke)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
pointer(ptr_ebufferx, ebufferx)
pointer(ptr_sbufferx, sbufferx)
pointer(ptr_wbufferx, wbufferx)
pointer(ptr_nbufferx, nbufferx)
pointer(ptr_ebuffery, ebuffery)
pointer(ptr_sbuffery, sbuffery)
pointer(ptr_wbuffery, wbuffery)
pointer(ptr_nbuffery, nbuffery)
integer, allocatable :: msg1(:), msg2(:)
logical :: recvx(4), sendx(4)
logical :: recvy(4), sendy(4)
integer :: nlist, buffer_pos, pos, tMe, m
integer :: is, ie, js, je, msgsize, l_size, buffer_recv_size
integer :: i, j, k, l, n, index, to_pe, from_pe
integer :: rank_x, rank_y, cur_rank, ind_x, ind_y
integer :: nsend_x, nsend_y, nrecv_x, nrecv_y, num
character(len=8) :: text
integer :: outunit, shift, midpoint
real(8) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
ptr = LOC(mpp_domains_stack)
outunit = stdout()
l_size = size(f_addrsx,1)
!---- determine recv(1) based on b_addrs ( east boundary )
num = count(b_addrsx(1,:,1) == 0)
if( num == 0 ) then
recvx(1) = .true.
else if( num == l_size ) then
recvx(1) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of ebufferx with null address should be 0 or l_size")
endif
!---- determine recv(2) based on b_addrs ( south boundary )
num = count(b_addrsx(2,:,1) == 0)
if( num == 0 ) then
recvx(2) = .true.
else if( num == l_size ) then
recvx(2) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of sbufferx with null address should be 0 or l_size")
endif
!---- determine recv(3) based on b_addrs ( west boundary )
num = count(b_addrsx(3,:,1) == 0)
if( num == 0 ) then
recvx(3) = .true.
else if( num == l_size ) then
recvx(3) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of wbufferx with null address should be 0 or l_size")
endif
!---- determine recv(4) based on b_addrs ( north boundary )
num = count(b_addrsx(4,:,1) == 0)
if( num == 0 ) then
recvx(4) = .true.
else if( num == l_size ) then
recvx(4) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of nbufferx with null address should be 0 or l_size")
endif
!---- determine recv(1) based on b_addrs ( east boundary )
num = count(b_addrsy(1,:,1) == 0)
if( num == 0 ) then
recvy(1) = .true.
else if( num == l_size ) then
recvy(1) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of ebuffery with null address should be 0 or l_size")
endif
!---- determine recv(2) based on b_addrs ( south boundary )
num = count(b_addrsy(2,:,1) == 0)
if( num == 0 ) then
recvy(2) = .true.
else if( num == l_size ) then
recvy(2) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of sbuffery with null address should be 0 or l_size")
endif
!---- determine recv(3) based on b_addrs ( west boundary )
num = count(b_addrsy(3,:,1) == 0)
if( num == 0 ) then
recvy(3) = .true.
else if( num == l_size ) then
recvy(3) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of wbuffery with null address should be 0 or l_size")
endif
!---- determine recv(4) based on b_addrs ( north boundary )
num = count(b_addrsy(4,:,1) == 0)
if( num == 0 ) then
recvy(4) = .true.
else if( num == l_size ) then
recvy(4) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of nbuffery with null address should be 0 or l_size")
endif
sendx = recvx
sendy = recvy
nlist = size(domain%list(:))
nsend_x = boundx%nsend
nsend_y = boundy%nsend
nrecv_x = boundx%nrecv
nrecv_y = boundy%nrecv
if(debug_message_passing) then
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
cur_rank = get_rank_recv(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
msgsize = 0
if(cur_rank == rank_x) then
from_pe = boundx%recv(ind_x)%pe
do n = 1, boundx%recv(ind_x)%count
if(recvx(boundx%recv(ind_x)%dir(n))) then
is = boundx%recv(ind_x)%is(n); ie = boundx%recv(ind_x)%ie(n)
js = boundx%recv(ind_x)%js(n); je = boundx%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = boundx%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = boundy%recv(ind_y)%pe
do n = 1, boundy%recv(ind_y)%count
if(recvy(boundy%recv(ind_y)%dir(n))) then
is = boundy%recv(ind_y)%is(n); ie = boundy%recv(ind_y)%ie(n)
js = boundy%recv(ind_y)%js(n); je = boundy%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = boundy%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
m = from_pe-mpp_root_pe()
call mpp_recv( msg1(m), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_3)
msg2(m) = msgsize
end do
cur_rank = get_rank_send(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
msgsize = 0
if(cur_rank == rank_x) then
to_pe = boundx%send(ind_x)%pe
do n = 1, boundx%send(ind_x)%count
if(sendx(boundx%send(ind_x)%dir(n))) then
is = boundx%send(ind_x)%is(n); ie = boundx%send(ind_x)%ie(n)
js = boundx%send(ind_x)%js(n); je = boundx%send(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
endif
enddo
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = boundx%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = boundy%send(ind_y)%pe
do n = 1, boundy%send(ind_y)%count
if(sendy(boundy%send(ind_y)%dir(n))) then
is = boundy%send(ind_y)%is(n); ie = boundy%send(ind_y)%ie(n)
js = boundy%send(ind_y)%js(n); je = boundy%send(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = boundy%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
call mpp_send( msgsize, plen=1, to_pe=to_pe, tag=COMM_TAG_3)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_get_boundaryV: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_get_boundary_V: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
!recv
buffer_pos = 0
cur_rank = get_rank_recv(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
msgsize = 0
if(cur_rank == rank_x) then
from_pe = boundx%recv(ind_x)%pe
do n = 1, boundx%recv(ind_x)%count
if(recvx(boundx%recv(ind_x)%dir(n))) then
is = boundx%recv(ind_x)%is(n); ie = boundx%recv(ind_x)%ie(n)
js = boundx%recv(ind_x)%js(n); je = boundx%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = boundx%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = boundy%recv(ind_y)%pe
do n = 1, boundy%recv(ind_y)%count
if(recvy(boundy%recv(ind_y)%dir(n))) then
is = boundy%recv(ind_y)%is(n); ie = boundy%recv(ind_y)%ie(n)
js = boundy%recv(ind_y)%js(n); je = boundy%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = boundy%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_V_: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_4 )
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
! send
cur_rank = get_rank_send(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
pos = buffer_pos
if(cur_rank == rank_x) then
to_pe = boundx%send(ind_x)%pe
do n = 1, boundx%send(ind_x)%count
if(sendx(boundx%send(ind_x)%dir(n))) then
is = boundx%send(ind_x)%is(n); ie = boundx%send(ind_x)%ie(n)
js = boundx%send(ind_x)%js(n); je = boundx%send(ind_x)%je(n)
tMe = boundx%send(ind_x)%tileMe(n)
select case( boundx%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
case( NINETY )
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case (ONE_HUNDRED_EIGHTY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
end select
end if ! if(send(boundx%dir(n)))
end do !do n = 1, boundx%count
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = boundx%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = boundy%send(ind_y)%pe
do n = 1, boundy%send(ind_y)%count
if(sendy(boundy%send(ind_y)%dir(n))) then
is = boundy%send(ind_y)%is(n); ie = boundy%send(ind_y)%ie(n)
js = boundy%send(ind_y)%js(n); je = boundy%send(ind_y)%je(n)
tMe = boundy%send(ind_y)%tileMe(n)
select case( boundy%send(ind_y)%rotation(n) )
case(ZERO)
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case( NINETY )
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case (ONE_HUNDRED_EIGHTY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select
end if ! if(send(boundy%dir(n)))
end do ! do n = 1, boundy%count
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = boundy%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
!--- maybe we do not need the following stack size check.
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_V_: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_4 )
buffer_pos = pos
end if
end do
call mpp_sync_self(check=EVENT_RECV)
!unpack recv
!unpack buffer in reverse order.
buffer_pos = buffer_recv_size
cur_rank = get_rank_unpack(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while(ind_x >0 .OR. ind_y >0)
if(cur_rank == rank_y) then
do n = boundy%recv(ind_y)%count, 1, -1
if(recvy(boundy%recv(ind_y)%dir(n))) then
is = boundy%recv(ind_y)%is(n); ie = boundy%recv(ind_y)%ie(n)
js = boundy%recv(ind_y)%js(n); je = boundy%recv(ind_y)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = boundy%recv(ind_y)%tileMe(n)
select case( boundy%recv(ind_y)%dir(n) )
case ( 1 ) ! EAST
do l=1,l_size
ptr_ebuffery = b_addrsy(1, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
ebuffery(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 2 ) ! SOUTH
do l=1,l_size
ptr_sbuffery = b_addrsy(2, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
sbuffery(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 3 ) ! WEST
do l=1,l_size
ptr_wbuffery = b_addrsy(3, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
wbuffery(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 4 ) ! norTH
do l=1,l_size
ptr_nbuffery = b_addrsy(4, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
nbuffery(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
end select
end if
end do
ind_y = ind_y-1
if(ind_y .GT. 0) then
rank_y = boundy%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
if(cur_rank == rank_x) then
do n = boundx%recv(ind_x)%count, 1, -1
if(recvx(boundx%recv(ind_x)%dir(n))) then
is = boundx%recv(ind_x)%is(n); ie = boundx%recv(ind_x)%ie(n)
js = boundx%recv(ind_x)%js(n); je = boundx%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = boundx%recv(ind_x)%tileMe(n)
select case( boundx%recv(ind_x)%dir(n) )
case ( 1 ) ! EAST
do l=1,l_size
ptr_ebufferx = b_addrsx(1, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
ebufferx(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 2 ) ! SOUTH
do l=1,l_size
ptr_sbufferx = b_addrsx(2, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
sbufferx(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 3 ) ! WEST
do l=1,l_size
ptr_wbufferx = b_addrsx(3, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
wbufferx(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 4 ) ! norTH
do l=1,l_size
ptr_nbufferx = b_addrsx(4, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
nbufferx(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
end select
end if
end do
ind_x = ind_x-1
if(ind_x .GT. 0) then
rank_x = boundx%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
end do
!--- domain always is symmetry
shift = 1
tMe = 1
if( BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then
j = domain%y(1)%global%end+shift
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
j = domain%y(1)%global%end+shift - domain%y(1)%compute%begin + 1
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
do i = is ,ie, midpoint
if( domain%x(1)%compute%begin == i )then
do l=1,l_size
ptr_wbufferx = b_addrsx(3, l, tMe)
ptr_wbuffery = b_addrsy(3, l, tMe)
do k = 1,ke
wbufferx(j,k) = 0
wbuffery(j,k) = 0
end do
end do
end if
end do
endif
endif
endif
call mpp_sync_self( )
end subroutine mpp_do_get_boundary_r8_3dv
# 1787 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_get_boundary_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_get_boundary_ad_r8_3d( f_addrs, domain, bound, b_addrs, bsize, ke, d_type)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: bound
integer(8), intent(in) :: f_addrs(:,:)
integer(8), intent(in) :: b_addrs(:,:,:)
integer, intent(in) :: bsize(:), ke
real(8), intent(in) :: d_type ! creates unique interface
real(8) :: field(bound%xbegin:bound%xend, bound%ybegin:bound%yend,ke)
real(8) :: ebuffer(bsize(1), ke), sbuffer(bsize(2), ke), wbuffer(bsize(3), ke), nbuffer(bsize(4), ke)
pointer(ptr_field, field)
pointer(ptr_ebuffer, ebuffer)
pointer(ptr_sbuffer, sbuffer)
pointer(ptr_wbuffer, wbuffer)
pointer(ptr_nbuffer, nbuffer)
integer, allocatable :: msg1(:), msg2(:)
logical :: recv(4), send(4)
integer :: nlist, buffer_pos, pos, tMe, from_pe
integer :: i, j, k, l, m, n, index, buffer_recv_size
integer :: is, ie, js, je, msgsize, l_size, num
character(len=8) :: text
integer :: outunit
real(8) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
ptr = LOC(mpp_domains_stack)
outunit = stdout()
l_size = size(f_addrs,1)
!---- determine recv(1) based on b_addrs ( east boundary )
num = count(b_addrs(1,:,1) == 0)
if( num == 0 ) then
recv(1) = .true.
else if( num == l_size ) then
recv(1) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of ebuffer with null address should be 0 or l_size")
endif
!---- determine recv(2) based on b_addrs ( south boundary )
num = count(b_addrs(2,:,1) == 0)
if( num == 0 ) then
recv(2) = .true.
else if( num == l_size ) then
recv(2) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of sbuffer with null address should be 0 or l_size")
endif
!---- determine recv(3) based on b_addrs ( west boundary )
num = count(b_addrs(3,:,1) == 0)
if( num == 0 ) then
recv(3) = .true.
else if( num == l_size ) then
recv(3) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of wbuffer with null address should be 0 or l_size")
endif
!---- determine recv(4) based on b_addrs ( north boundary )
num = count(b_addrs(4,:,1) == 0)
if( num == 0 ) then
recv(4) = .true.
else if( num == l_size ) then
recv(4) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of nbuffer with null address should be 0 or l_size")
endif
send = recv
nlist = size(domain%list(:))
if(debug_message_passing) then
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
do m = 1, bound%nrecv
msgsize = 0
do n = 1, bound%recv(m)%count
if(recv(bound%recv(m)%dir(n))) then
is = bound%recv(m)%is(n); ie = bound%recv(m)%ie(n)
js = bound%recv(m)%js(n); je = bound%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
from_pe = bound%recv(m)%pe
l = from_pe-mpp_root_pe()
call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1)
msg2(l) = msgsize
enddo
do m = 1, bound%nsend
msgsize = 0
do n = 1, bound%send(m)%count
if(recv(bound%send(m)%dir(n))) then
is = bound%send(m)%is(n); ie = bound%send(m)%ie(n)
js = bound%send(m)%js(n); je = bound%send(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
call mpp_send( msgsize, plen=1, to_pe=bound%send(m)%pe, tag=COMM_TAG_1)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_get_boundary: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_get_boundary: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
!recv
buffer_pos = 0
do m = 1, bound%nrecv
msgsize = 0
do n = 1, bound%recv(m)%count
if(recv(bound%recv(m)%dir(n))) then
is = bound%recv(m)%is(n); ie = bound%recv(m)%ie(n)
js = bound%recv(m)%js(n); je = bound%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_OLD: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=bound%recv(m)%pe, block=.false., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
! send
do m = 1, bound%nsend
pos = buffer_pos
do n = 1, bound%send(m)%count
if(send(bound%send(m)%dir(n))) then
is = bound%send(m)%is(n); ie = bound%send(m)%ie(n)
js = bound%send(m)%js(n); je = bound%send(m)%je(n)
tMe = bound%send(m)%tileMe(n)
select case( bound%send(m)%rotation(n) )
case(ZERO)
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case (ONE_HUNDRED_EIGHTY)
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
end if ! if(send(bound%dir(n)))
end do ! do n = 1, bound%count
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
!--- maybe we do not need the following stack size check.
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_OLD: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=bound%send(m)%pe, tag=COMM_TAG_2 )
buffer_pos = pos
end if
end do
call mpp_clock_begin(wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(wait_clock)
buffer_pos = buffer_recv_size
!unpack recv
!unpack buffer in reverse order.
do m = bound%nrecv, 1, -1
do n = bound%recv(m)%count, 1, -1
if(recv(bound%recv(m)%dir(n))) then
is = bound%recv(m)%is(n); ie = bound%recv(m)%ie(n)
js = bound%recv(m)%js(n); je = bound%recv(m)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = bound%recv(m)%tileMe(n)
select case( bound%recv(m)%dir(n) )
case ( 1 ) ! EAST
do l=1,l_size
ptr_ebuffer = b_addrs(1, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
ebuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 2 ) ! SOUTH
do l=1,l_size
ptr_sbuffer = b_addrs(2, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
sbuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 3 ) ! WEST
do l=1,l_size
ptr_wbuffer = b_addrs(3, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
wbuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 4 ) ! norTH
do l=1,l_size
ptr_nbuffer = b_addrs(4, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
nbuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
end select
end if
end do
end do
call mpp_sync_self( )
end subroutine mpp_do_get_boundary_ad_r8_3d
subroutine mpp_do_get_boundary_ad_r8_3dv(f_addrsx, f_addrsy, domain, boundx, boundy, b_addrsx, b_addrsy, &
bsizex, bsizey, ke, d_type, flags, gridtype)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: boundx, boundy
integer(8), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
integer(8), intent(in) :: b_addrsx(:,:,:), b_addrsy(:,:,:)
integer, intent(in) :: bsizex(:), bsizey(:), ke
real(8), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: flags
integer, intent(in) :: gridtype
real(8) :: fieldx(boundx%xbegin:boundx%xend, boundx%ybegin:boundx%yend,ke)
real(8) :: fieldy(boundy%xbegin:boundy%xend, boundy%ybegin:boundy%yend,ke)
real(8) :: ebufferx(bsizex(1), ke), sbufferx(bsizex(2), ke), wbufferx(bsizex(3), ke), nbufferx(bsizex(4), ke)
real(8) :: ebuffery(bsizey(1), ke), sbuffery(bsizey(2), ke), wbuffery(bsizey(3), ke), nbuffery(bsizey(4), ke)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
pointer(ptr_ebufferx, ebufferx)
pointer(ptr_sbufferx, sbufferx)
pointer(ptr_wbufferx, wbufferx)
pointer(ptr_nbufferx, nbufferx)
pointer(ptr_ebuffery, ebuffery)
pointer(ptr_sbuffery, sbuffery)
pointer(ptr_wbuffery, wbuffery)
pointer(ptr_nbuffery, nbuffery)
integer, allocatable :: msg1(:), msg2(:)
logical :: recvx(4), sendx(4)
logical :: recvy(4), sendy(4)
integer :: nlist, buffer_pos,buffer_pos_old, pos, pos_, tMe, m
integer :: is, ie, js, je, msgsize, l_size, buffer_recv_size, msgsize_send
integer :: i, j, k, l, n, index, to_pe, from_pe
integer :: rank_x, rank_y, cur_rank, ind_x, ind_y
integer :: nsend_x, nsend_y, nrecv_x, nrecv_y, num
character(len=8) :: text
integer :: outunit, shift, midpoint
real(8) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
ptr = LOC(mpp_domains_stack)
outunit = stdout()
l_size = size(f_addrsx,1)
!---- determine recv(1) based on b_addrs ( east boundary )
num = count(b_addrsx(1,:,1) == 0)
if( num == 0 ) then
recvx(1) = .true.
else if( num == l_size ) then
recvx(1) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of ebufferx with null address should be 0 or l_size")
endif
!---- determine recv(2) based on b_addrs ( south boundary )
num = count(b_addrsx(2,:,1) == 0)
if( num == 0 ) then
recvx(2) = .true.
else if( num == l_size ) then
recvx(2) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of sbufferx with null address should be 0 or l_size")
endif
!---- determine recv(3) based on b_addrs ( west boundary )
num = count(b_addrsx(3,:,1) == 0)
if( num == 0 ) then
recvx(3) = .true.
else if( num == l_size ) then
recvx(3) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of wbufferx with null address should be 0 or l_size")
endif
!---- determine recv(4) based on b_addrs ( north boundary )
num = count(b_addrsx(4,:,1) == 0)
if( num == 0 ) then
recvx(4) = .true.
else if( num == l_size ) then
recvx(4) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of nbufferx with null address should be 0 or l_size")
endif
!---- determine recv(1) based on b_addrs ( east boundary )
num = count(b_addrsy(1,:,1) == 0)
if( num == 0 ) then
recvy(1) = .true.
else if( num == l_size ) then
recvy(1) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of ebuffery with null address should be 0 or l_size")
endif
!---- determine recv(2) based on b_addrs ( south boundary )
num = count(b_addrsy(2,:,1) == 0)
if( num == 0 ) then
recvy(2) = .true.
else if( num == l_size ) then
recvy(2) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of sbuffery with null address should be 0 or l_size")
endif
!---- determine recv(3) based on b_addrs ( west boundary )
num = count(b_addrsy(3,:,1) == 0)
if( num == 0 ) then
recvy(3) = .true.
else if( num == l_size ) then
recvy(3) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of wbuffery with null address should be 0 or l_size")
endif
!---- determine recv(4) based on b_addrs ( north boundary )
num = count(b_addrsy(4,:,1) == 0)
if( num == 0 ) then
recvy(4) = .true.
else if( num == l_size ) then
recvy(4) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of nbuffery with null address should be 0 or l_size")
endif
sendx = recvx
sendy = recvy
nlist = size(domain%list(:))
nsend_x = boundx%nsend
nsend_y = boundy%nsend
nrecv_x = boundx%nrecv
nrecv_y = boundy%nrecv
if(debug_message_passing) then
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
cur_rank = get_rank_recv(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
msgsize = 0
if(cur_rank == rank_x) then
from_pe = boundx%recv(ind_x)%pe
do n = 1, boundx%recv(ind_x)%count
if(recvx(boundx%recv(ind_x)%dir(n))) then
is = boundx%recv(ind_x)%is(n); ie = boundx%recv(ind_x)%ie(n)
js = boundx%recv(ind_x)%js(n); je = boundx%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = boundx%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = boundy%recv(ind_y)%pe
do n = 1, boundy%recv(ind_y)%count
if(recvy(boundy%recv(ind_y)%dir(n))) then
is = boundy%recv(ind_y)%is(n); ie = boundy%recv(ind_y)%ie(n)
js = boundy%recv(ind_y)%js(n); je = boundy%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = boundy%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
m = from_pe-mpp_root_pe()
call mpp_recv( msg1(m), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_3)
msg2(m) = msgsize
end do
cur_rank = get_rank_send(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
msgsize = 0
if(cur_rank == rank_x) then
to_pe = boundx%send(ind_x)%pe
do n = 1, boundx%send(ind_x)%count
if(sendx(boundx%send(ind_x)%dir(n))) then
is = boundx%send(ind_x)%is(n); ie = boundx%send(ind_x)%ie(n)
js = boundx%send(ind_x)%js(n); je = boundx%send(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
endif
enddo
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = boundx%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = boundy%send(ind_y)%pe
do n = 1, boundy%send(ind_y)%count
if(sendy(boundy%send(ind_y)%dir(n))) then
is = boundy%send(ind_y)%is(n); ie = boundy%send(ind_y)%ie(n)
js = boundy%send(ind_y)%js(n); je = boundy%send(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = boundy%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
call mpp_send( msgsize, plen=1, to_pe=to_pe, tag=COMM_TAG_3)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_get_boundaryV: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_get_boundary_V: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
!--- domain always is symmetry
shift = 1
tMe = 1
if( BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then
j = domain%y(1)%global%end+shift
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
j = domain%y(1)%global%end+shift - domain%y(1)%compute%begin + 1
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
do i = is ,ie, midpoint
if( domain%x(1)%compute%begin == i )then
do l=1,l_size
ptr_wbufferx = b_addrsx(3, l, tMe)
ptr_wbuffery = b_addrsy(3, l, tMe)
do k = 1,ke
wbufferx(j,k) = 0
wbuffery(j,k) = 0
end do
end do
end if
end do
endif
endif
endif
call mpp_sync_self( )
!unpack recv
!unpack buffer in reverse order.
buffer_pos = 0
cur_rank = get_rank_recv(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
msgsize = 0
if(cur_rank == rank_x) then
from_pe = boundx%recv(ind_x)%pe
do n = 1, boundx%recv(ind_x)%count
if(recvx(boundx%recv(ind_x)%dir(n))) then
is = boundx%recv(ind_x)%is(n); ie = boundx%recv(ind_x)%ie(n)
js = boundx%recv(ind_x)%js(n); je = boundx%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = boundx%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = boundy%recv(ind_y)%pe
do n = 1, boundy%recv(ind_y)%count
if(recvy(boundy%recv(ind_y)%dir(n))) then
is = boundy%recv(ind_y)%is(n); ie = boundy%recv(ind_y)%ie(n)
js = boundy%recv(ind_y)%js(n); je = boundy%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = boundy%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
cur_rank = get_rank_unpack(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while(ind_x >0 .OR. ind_y >0)
if(cur_rank == rank_y) then
do n = boundy%recv(ind_y)%count, 1, -1
if(recvy(boundy%recv(ind_y)%dir(n))) then
is = boundy%recv(ind_y)%is(n); ie = boundy%recv(ind_y)%ie(n)
js = boundy%recv(ind_y)%js(n); je = boundy%recv(ind_y)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = boundy%recv(ind_y)%tileMe(n)
select case( boundy%recv(ind_y)%dir(n) )
case ( 1 ) ! EAST
do l=1,l_size
ptr_ebuffery = b_addrsy(1, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = ebuffery(index,k)
ebuffery(index,k) = 0.
index = index + 1
end do
end do
end do
end do
case ( 2 ) ! SOUTH
do l=1,l_size
ptr_sbuffery = b_addrsy(2, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = sbuffery(index,k)
sbuffery(index,k) = 0.
index = index + 1
end do
end do
end do
end do
case ( 3 ) ! WEST
do l=1,l_size
ptr_wbuffery = b_addrsy(3, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = wbuffery(index,k)
wbuffery(index,k) = 0.
index = index + 1
end do
end do
end do
end do
case ( 4 ) ! norTH
do l=1,l_size
ptr_nbuffery = b_addrsy(4, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = nbuffery(index,k)
nbuffery(index,k) = 0.
index = index + 1
end do
end do
end do
end do
end select
end if
end do
ind_y = ind_y-1
if(ind_y .GT. 0) then
rank_y = boundy%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
if(cur_rank == rank_x) then
do n = boundx%recv(ind_x)%count, 1, -1
if(recvx(boundx%recv(ind_x)%dir(n))) then
is = boundx%recv(ind_x)%is(n); ie = boundx%recv(ind_x)%ie(n)
js = boundx%recv(ind_x)%js(n); je = boundx%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = boundx%recv(ind_x)%tileMe(n)
select case( boundx%recv(ind_x)%dir(n) )
case ( 1 ) ! EAST
do l=1,l_size
ptr_ebufferx = b_addrsx(1, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = ebufferx(index,k)
ebufferx(index,k) = 0.
index = index + 1
end do
end do
end do
end do
case ( 2 ) ! SOUTH
do l=1,l_size
ptr_sbufferx = b_addrsx(2, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = sbufferx(index,k)
sbufferx(index,k) = 0.
index = index + 1
end do
end do
end do
end do
case ( 3 ) ! WEST
do l=1,l_size
ptr_wbufferx = b_addrsx(3, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = wbufferx(index,k)
wbufferx(index,k) = 0.
index = index + 1
end do
end do
end do
end do
case ( 4 ) ! norTH
do l=1,l_size
ptr_nbufferx = b_addrsx(4, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = nbufferx(index,k)
nbufferx(index,k) = 0.
index = index + 1
end do
end do
end do
end do
end select
end if
end do
ind_x = ind_x-1
if(ind_x .GT. 0) then
rank_x = boundx%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
end do
!recv
buffer_pos = 0
cur_rank = get_rank_recv(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
msgsize = 0
if(cur_rank == rank_x) then
from_pe = boundx%recv(ind_x)%pe
do n = 1, boundx%recv(ind_x)%count
if(recvx(boundx%recv(ind_x)%dir(n))) then
is = boundx%recv(ind_x)%is(n); ie = boundx%recv(ind_x)%ie(n)
js = boundx%recv(ind_x)%js(n); je = boundx%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
msgsize_send = (ie-is+1)*(je-js+1)*ke*l_size
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = boundx%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = boundy%recv(ind_y)%pe
do n = 1, boundy%recv(ind_y)%count
if(recvy(boundy%recv(ind_y)%dir(n))) then
is = boundy%recv(ind_y)%is(n); ie = boundy%recv(ind_y)%ie(n)
js = boundy%recv(ind_y)%js(n); je = boundy%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
msgsize_send = (ie-is+1)*(je-js+1)*ke*l_size
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = boundy%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=from_pe, tag=COMM_TAG_4 )
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
! send
cur_rank = get_rank_send(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
pos = buffer_pos
if(cur_rank == rank_x) then
to_pe = boundx%send(ind_x)%pe
do n = 1, boundx%send(ind_x)%count
if(sendx(boundx%send(ind_x)%dir(n))) then
is = boundx%send(ind_x)%is(n); ie = boundx%send(ind_x)%ie(n)
js = boundx%send(ind_x)%js(n); je = boundx%send(ind_x)%je(n)
tMe = boundx%send(ind_x)%tileMe(n)
pos = pos + (ie-is+1)*(je-js+1)*ke*l_size
end if ! if(send(boundx%dir(n)))
end do !do n = 1, boundx%count
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = boundx%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = boundy%send(ind_y)%pe
do n = 1, boundy%send(ind_y)%count
if(sendy(boundy%send(ind_y)%dir(n))) then
is = boundy%send(ind_y)%is(n); ie = boundy%send(ind_y)%ie(n)
js = boundy%send(ind_y)%js(n); je = boundy%send(ind_y)%je(n)
tMe = boundy%send(ind_y)%tileMe(n)
pos = pos + (ie-is+1)*(je-js+1)*ke*l_size
end if ! if(send(boundy%dir(n)))
end do ! do n = 1, boundy%count
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = boundy%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
!--- maybe we do not need the following stack size check.
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_V_: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=to_pe, block=.FALSE., tag=COMM_TAG_4 )
buffer_pos = pos
end if
end do
call mpp_sync_self(check=EVENT_RECV)
!send second part---------------------------------------------------------------
buffer_pos = buffer_recv_size
cur_rank = get_rank_send(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
buffer_pos_old = buffer_pos
pos = buffer_pos
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
pos = buffer_pos
if(cur_rank == rank_x) then
to_pe = boundx%send(ind_x)%pe
do n = boundx%send(ind_x)%count,1,-1
if(sendx(boundx%send(ind_x)%dir(n))) then
is = boundx%send(ind_x)%is(n); ie = boundx%send(ind_x)%ie(n)
js = boundx%send(ind_x)%js(n); je = boundx%send(ind_x)%je(n)
tMe = boundx%send(ind_x)%tileMe(n)
select case( boundx%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k)= fieldx(i,j,k)+ buffer(pos)
end do
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
fieldy(i,j,k)= fieldy(i,j,k)+ buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
fieldy(i,j,k)= fieldy(i,j,k)- buffer(pos)
end do
end do
end do
end do
end if
case( NINETY )
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
fieldy(i,j,k)= fieldy(i,j,k)+ buffer(pos)
end do
end do
end do
end do
case (ONE_HUNDRED_EIGHTY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldx(i,j,k)= fieldx(i,j,k)+ buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldx(i,j,k)= fieldx(i,j,k)- buffer(pos)
end do
end do
end do
end do
end if
end select
end if ! if(send(boundx%dir(n)))
end do !do n = 1, boundx%count
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = boundx%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = boundy%send(ind_y)%pe
do n = boundy%send(ind_y)%count,1,-1
if(sendy(boundy%send(ind_y)%dir(n))) then
is = boundy%send(ind_y)%is(n); ie = boundy%send(ind_y)%ie(n)
js = boundy%send(ind_y)%js(n); je = boundy%send(ind_y)%je(n)
tMe = boundy%send(ind_y)%tileMe(n)
select case( boundy%send(ind_y)%rotation(n) )
case(ZERO)
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k)= fieldy(i,j,k)+ buffer(pos)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
fieldx(i,j,k)= fieldx(i,j,k)+ buffer(pos)
end do
end do
end do
end do
case( NINETY )
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
fieldx(i,j,k)= fieldx(i,j,k)+ buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
fieldx(i,j,k)= fieldx(i,j,k)- buffer(pos)
end do
end do
end do
end do
end if
case (ONE_HUNDRED_EIGHTY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldy(i,j,k)= fieldy(i,j,k)+ buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldy(i,j,k)= fieldy(i,j,k)- buffer(pos)
end do
end do
end do
end do
end if
end select
end if ! if(send(boundy%dir(n)))
end do ! do n = 1, boundy%count
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = boundy%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
buffer_pos = pos
end if
end do
call mpp_sync_self( )
end subroutine mpp_do_get_boundary_ad_r8_3dv
# 1795 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_get_boundary.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_get_boundary_r4_3d( f_addrs, domain, bound, b_addrs, bsize, ke, d_type)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: bound
integer(8), intent(in) :: f_addrs(:,:)
integer(8), intent(in) :: b_addrs(:,:,:)
integer, intent(in) :: bsize(:), ke
real(4), intent(in) :: d_type ! creates unique interface
real(4) :: field(bound%xbegin:bound%xend, bound%ybegin:bound%yend,ke)
real(4) :: ebuffer(bsize(1), ke), sbuffer(bsize(2), ke), wbuffer(bsize(3), ke), nbuffer(bsize(4), ke)
pointer(ptr_field, field)
pointer(ptr_ebuffer, ebuffer)
pointer(ptr_sbuffer, sbuffer)
pointer(ptr_wbuffer, wbuffer)
pointer(ptr_nbuffer, nbuffer)
integer, allocatable :: msg1(:), msg2(:)
logical :: recv(4), send(4)
integer :: nlist, buffer_pos, pos, tMe, from_pe
integer :: i, j, k, l, m, n, index, buffer_recv_size
integer :: is, ie, js, je, msgsize, l_size, num
character(len=8) :: text
integer :: outunit
real(4) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
ptr = LOC(mpp_domains_stack)
outunit = stdout()
l_size = size(f_addrs,1)
!---- determine recv(1) based on b_addrs ( east boundary )
num = count(b_addrs(1,:,1) == 0)
if( num == 0 ) then
recv(1) = .true.
else if( num == l_size ) then
recv(1) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of ebuffer with null address should be 0 or l_size")
endif
!---- determine recv(2) based on b_addrs ( south boundary )
num = count(b_addrs(2,:,1) == 0)
if( num == 0 ) then
recv(2) = .true.
else if( num == l_size ) then
recv(2) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of sbuffer with null address should be 0 or l_size")
endif
!---- determine recv(3) based on b_addrs ( west boundary )
num = count(b_addrs(3,:,1) == 0)
if( num == 0 ) then
recv(3) = .true.
else if( num == l_size ) then
recv(3) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of wbuffer with null address should be 0 or l_size")
endif
!---- determine recv(4) based on b_addrs ( north boundary )
num = count(b_addrs(4,:,1) == 0)
if( num == 0 ) then
recv(4) = .true.
else if( num == l_size ) then
recv(4) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of nbuffer with null address should be 0 or l_size")
endif
send = recv
nlist = size(domain%list(:))
if(debug_message_passing) then
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
do m = 1, bound%nrecv
msgsize = 0
do n = 1, bound%recv(m)%count
if(recv(bound%recv(m)%dir(n))) then
is = bound%recv(m)%is(n); ie = bound%recv(m)%ie(n)
js = bound%recv(m)%js(n); je = bound%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
from_pe = bound%recv(m)%pe
l = from_pe-mpp_root_pe()
call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1)
msg2(l) = msgsize
enddo
do m = 1, bound%nsend
msgsize = 0
do n = 1, bound%send(m)%count
if(recv(bound%send(m)%dir(n))) then
is = bound%send(m)%is(n); ie = bound%send(m)%ie(n)
js = bound%send(m)%js(n); je = bound%send(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
call mpp_send( msgsize, plen=1, to_pe=bound%send(m)%pe, tag=COMM_TAG_1)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_get_boundary: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_get_boundary: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
!recv
buffer_pos = 0
do m = 1, bound%nrecv
msgsize = 0
do n = 1, bound%recv(m)%count
if(recv(bound%recv(m)%dir(n))) then
is = bound%recv(m)%is(n); ie = bound%recv(m)%ie(n)
js = bound%recv(m)%js(n); je = bound%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_OLD: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=bound%recv(m)%pe, block=.false., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
! send
do m = 1, bound%nsend
pos = buffer_pos
do n = 1, bound%send(m)%count
if(send(bound%send(m)%dir(n))) then
is = bound%send(m)%is(n); ie = bound%send(m)%ie(n)
js = bound%send(m)%js(n); je = bound%send(m)%je(n)
tMe = bound%send(m)%tileMe(n)
select case( bound%send(m)%rotation(n) )
case(ZERO)
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case (ONE_HUNDRED_EIGHTY)
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
end if ! if(send(bound%dir(n)))
end do ! do n = 1, bound%count
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
!--- maybe we do not need the following stack size check.
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_OLD: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=bound%send(m)%pe, tag=COMM_TAG_2 )
buffer_pos = pos
end if
end do
call mpp_clock_begin(wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(wait_clock)
buffer_pos = buffer_recv_size
!unpack recv
!unpack buffer in reverse order.
do m = bound%nrecv, 1, -1
do n = bound%recv(m)%count, 1, -1
if(recv(bound%recv(m)%dir(n))) then
is = bound%recv(m)%is(n); ie = bound%recv(m)%ie(n)
js = bound%recv(m)%js(n); je = bound%recv(m)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = bound%recv(m)%tileMe(n)
select case( bound%recv(m)%dir(n) )
case ( 1 ) ! EAST
do l=1,l_size
ptr_ebuffer = b_addrs(1, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
ebuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 2 ) ! SOUTH
do l=1,l_size
ptr_sbuffer = b_addrs(2, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
sbuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 3 ) ! WEST
do l=1,l_size
ptr_wbuffer = b_addrs(3, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
wbuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 4 ) ! norTH
do l=1,l_size
ptr_nbuffer = b_addrs(4, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
nbuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
end select
end if
end do
end do
call mpp_sync_self( )
end subroutine mpp_do_get_boundary_r4_3d
subroutine mpp_do_get_boundary_r4_3dv(f_addrsx, f_addrsy, domain, boundx, boundy, b_addrsx, b_addrsy, &
bsizex, bsizey, ke, d_type, flags, gridtype)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: boundx, boundy
integer(8), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
integer(8), intent(in) :: b_addrsx(:,:,:), b_addrsy(:,:,:)
integer, intent(in) :: bsizex(:), bsizey(:), ke
real(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: flags
integer, intent(in) :: gridtype
real(4) :: fieldx(boundx%xbegin:boundx%xend, boundx%ybegin:boundx%yend,ke)
real(4) :: fieldy(boundy%xbegin:boundy%xend, boundy%ybegin:boundy%yend,ke)
real(4) :: ebufferx(bsizex(1), ke), sbufferx(bsizex(2), ke), wbufferx(bsizex(3), ke), nbufferx(bsizex(4), ke)
real(4) :: ebuffery(bsizey(1), ke), sbuffery(bsizey(2), ke), wbuffery(bsizey(3), ke), nbuffery(bsizey(4), ke)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
pointer(ptr_ebufferx, ebufferx)
pointer(ptr_sbufferx, sbufferx)
pointer(ptr_wbufferx, wbufferx)
pointer(ptr_nbufferx, nbufferx)
pointer(ptr_ebuffery, ebuffery)
pointer(ptr_sbuffery, sbuffery)
pointer(ptr_wbuffery, wbuffery)
pointer(ptr_nbuffery, nbuffery)
integer, allocatable :: msg1(:), msg2(:)
logical :: recvx(4), sendx(4)
logical :: recvy(4), sendy(4)
integer :: nlist, buffer_pos, pos, tMe, m
integer :: is, ie, js, je, msgsize, l_size, buffer_recv_size
integer :: i, j, k, l, n, index, to_pe, from_pe
integer :: rank_x, rank_y, cur_rank, ind_x, ind_y
integer :: nsend_x, nsend_y, nrecv_x, nrecv_y, num
character(len=8) :: text
integer :: outunit, shift, midpoint
real(4) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
ptr = LOC(mpp_domains_stack)
outunit = stdout()
l_size = size(f_addrsx,1)
!---- determine recv(1) based on b_addrs ( east boundary )
num = count(b_addrsx(1,:,1) == 0)
if( num == 0 ) then
recvx(1) = .true.
else if( num == l_size ) then
recvx(1) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of ebufferx with null address should be 0 or l_size")
endif
!---- determine recv(2) based on b_addrs ( south boundary )
num = count(b_addrsx(2,:,1) == 0)
if( num == 0 ) then
recvx(2) = .true.
else if( num == l_size ) then
recvx(2) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of sbufferx with null address should be 0 or l_size")
endif
!---- determine recv(3) based on b_addrs ( west boundary )
num = count(b_addrsx(3,:,1) == 0)
if( num == 0 ) then
recvx(3) = .true.
else if( num == l_size ) then
recvx(3) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of wbufferx with null address should be 0 or l_size")
endif
!---- determine recv(4) based on b_addrs ( north boundary )
num = count(b_addrsx(4,:,1) == 0)
if( num == 0 ) then
recvx(4) = .true.
else if( num == l_size ) then
recvx(4) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of nbufferx with null address should be 0 or l_size")
endif
!---- determine recv(1) based on b_addrs ( east boundary )
num = count(b_addrsy(1,:,1) == 0)
if( num == 0 ) then
recvy(1) = .true.
else if( num == l_size ) then
recvy(1) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of ebuffery with null address should be 0 or l_size")
endif
!---- determine recv(2) based on b_addrs ( south boundary )
num = count(b_addrsy(2,:,1) == 0)
if( num == 0 ) then
recvy(2) = .true.
else if( num == l_size ) then
recvy(2) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of sbuffery with null address should be 0 or l_size")
endif
!---- determine recv(3) based on b_addrs ( west boundary )
num = count(b_addrsy(3,:,1) == 0)
if( num == 0 ) then
recvy(3) = .true.
else if( num == l_size ) then
recvy(3) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of wbuffery with null address should be 0 or l_size")
endif
!---- determine recv(4) based on b_addrs ( north boundary )
num = count(b_addrsy(4,:,1) == 0)
if( num == 0 ) then
recvy(4) = .true.
else if( num == l_size ) then
recvy(4) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of nbuffery with null address should be 0 or l_size")
endif
sendx = recvx
sendy = recvy
nlist = size(domain%list(:))
nsend_x = boundx%nsend
nsend_y = boundy%nsend
nrecv_x = boundx%nrecv
nrecv_y = boundy%nrecv
if(debug_message_passing) then
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
cur_rank = get_rank_recv(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
msgsize = 0
if(cur_rank == rank_x) then
from_pe = boundx%recv(ind_x)%pe
do n = 1, boundx%recv(ind_x)%count
if(recvx(boundx%recv(ind_x)%dir(n))) then
is = boundx%recv(ind_x)%is(n); ie = boundx%recv(ind_x)%ie(n)
js = boundx%recv(ind_x)%js(n); je = boundx%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = boundx%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = boundy%recv(ind_y)%pe
do n = 1, boundy%recv(ind_y)%count
if(recvy(boundy%recv(ind_y)%dir(n))) then
is = boundy%recv(ind_y)%is(n); ie = boundy%recv(ind_y)%ie(n)
js = boundy%recv(ind_y)%js(n); je = boundy%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = boundy%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
m = from_pe-mpp_root_pe()
call mpp_recv( msg1(m), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_3)
msg2(m) = msgsize
end do
cur_rank = get_rank_send(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
msgsize = 0
if(cur_rank == rank_x) then
to_pe = boundx%send(ind_x)%pe
do n = 1, boundx%send(ind_x)%count
if(sendx(boundx%send(ind_x)%dir(n))) then
is = boundx%send(ind_x)%is(n); ie = boundx%send(ind_x)%ie(n)
js = boundx%send(ind_x)%js(n); je = boundx%send(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
endif
enddo
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = boundx%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = boundy%send(ind_y)%pe
do n = 1, boundy%send(ind_y)%count
if(sendy(boundy%send(ind_y)%dir(n))) then
is = boundy%send(ind_y)%is(n); ie = boundy%send(ind_y)%ie(n)
js = boundy%send(ind_y)%js(n); je = boundy%send(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = boundy%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
call mpp_send( msgsize, plen=1, to_pe=to_pe, tag=COMM_TAG_3)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_get_boundaryV: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_get_boundary_V: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
!recv
buffer_pos = 0
cur_rank = get_rank_recv(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
msgsize = 0
if(cur_rank == rank_x) then
from_pe = boundx%recv(ind_x)%pe
do n = 1, boundx%recv(ind_x)%count
if(recvx(boundx%recv(ind_x)%dir(n))) then
is = boundx%recv(ind_x)%is(n); ie = boundx%recv(ind_x)%ie(n)
js = boundx%recv(ind_x)%js(n); je = boundx%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = boundx%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = boundy%recv(ind_y)%pe
do n = 1, boundy%recv(ind_y)%count
if(recvy(boundy%recv(ind_y)%dir(n))) then
is = boundy%recv(ind_y)%is(n); ie = boundy%recv(ind_y)%ie(n)
js = boundy%recv(ind_y)%js(n); je = boundy%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = boundy%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_V_: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_4 )
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
! send
cur_rank = get_rank_send(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
pos = buffer_pos
if(cur_rank == rank_x) then
to_pe = boundx%send(ind_x)%pe
do n = 1, boundx%send(ind_x)%count
if(sendx(boundx%send(ind_x)%dir(n))) then
is = boundx%send(ind_x)%is(n); ie = boundx%send(ind_x)%ie(n)
js = boundx%send(ind_x)%js(n); je = boundx%send(ind_x)%je(n)
tMe = boundx%send(ind_x)%tileMe(n)
select case( boundx%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
case( NINETY )
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case (ONE_HUNDRED_EIGHTY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
end select
end if ! if(send(boundx%dir(n)))
end do !do n = 1, boundx%count
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = boundx%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = boundy%send(ind_y)%pe
do n = 1, boundy%send(ind_y)%count
if(sendy(boundy%send(ind_y)%dir(n))) then
is = boundy%send(ind_y)%is(n); ie = boundy%send(ind_y)%ie(n)
js = boundy%send(ind_y)%js(n); je = boundy%send(ind_y)%je(n)
tMe = boundy%send(ind_y)%tileMe(n)
select case( boundy%send(ind_y)%rotation(n) )
case(ZERO)
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case( NINETY )
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case (ONE_HUNDRED_EIGHTY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select
end if ! if(send(boundy%dir(n)))
end do ! do n = 1, boundy%count
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = boundy%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
!--- maybe we do not need the following stack size check.
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_V_: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_4 )
buffer_pos = pos
end if
end do
call mpp_sync_self(check=EVENT_RECV)
!unpack recv
!unpack buffer in reverse order.
buffer_pos = buffer_recv_size
cur_rank = get_rank_unpack(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while(ind_x >0 .OR. ind_y >0)
if(cur_rank == rank_y) then
do n = boundy%recv(ind_y)%count, 1, -1
if(recvy(boundy%recv(ind_y)%dir(n))) then
is = boundy%recv(ind_y)%is(n); ie = boundy%recv(ind_y)%ie(n)
js = boundy%recv(ind_y)%js(n); je = boundy%recv(ind_y)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = boundy%recv(ind_y)%tileMe(n)
select case( boundy%recv(ind_y)%dir(n) )
case ( 1 ) ! EAST
do l=1,l_size
ptr_ebuffery = b_addrsy(1, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
ebuffery(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 2 ) ! SOUTH
do l=1,l_size
ptr_sbuffery = b_addrsy(2, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
sbuffery(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 3 ) ! WEST
do l=1,l_size
ptr_wbuffery = b_addrsy(3, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
wbuffery(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 4 ) ! norTH
do l=1,l_size
ptr_nbuffery = b_addrsy(4, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
nbuffery(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
end select
end if
end do
ind_y = ind_y-1
if(ind_y .GT. 0) then
rank_y = boundy%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
if(cur_rank == rank_x) then
do n = boundx%recv(ind_x)%count, 1, -1
if(recvx(boundx%recv(ind_x)%dir(n))) then
is = boundx%recv(ind_x)%is(n); ie = boundx%recv(ind_x)%ie(n)
js = boundx%recv(ind_x)%js(n); je = boundx%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = boundx%recv(ind_x)%tileMe(n)
select case( boundx%recv(ind_x)%dir(n) )
case ( 1 ) ! EAST
do l=1,l_size
ptr_ebufferx = b_addrsx(1, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
ebufferx(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 2 ) ! SOUTH
do l=1,l_size
ptr_sbufferx = b_addrsx(2, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
sbufferx(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 3 ) ! WEST
do l=1,l_size
ptr_wbufferx = b_addrsx(3, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
wbufferx(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 4 ) ! norTH
do l=1,l_size
ptr_nbufferx = b_addrsx(4, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
nbufferx(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
end select
end if
end do
ind_x = ind_x-1
if(ind_x .GT. 0) then
rank_x = boundx%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
end do
!--- domain always is symmetry
shift = 1
tMe = 1
if( BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then
j = domain%y(1)%global%end+shift
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
j = domain%y(1)%global%end+shift - domain%y(1)%compute%begin + 1
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
do i = is ,ie, midpoint
if( domain%x(1)%compute%begin == i )then
do l=1,l_size
ptr_wbufferx = b_addrsx(3, l, tMe)
ptr_wbuffery = b_addrsy(3, l, tMe)
do k = 1,ke
wbufferx(j,k) = 0
wbuffery(j,k) = 0
end do
end do
end if
end do
endif
endif
endif
call mpp_sync_self( )
end subroutine mpp_do_get_boundary_r4_3dv
# 1804 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_do_get_boundary_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_get_boundary_ad_r4_3d( f_addrs, domain, bound, b_addrs, bsize, ke, d_type)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: bound
integer(8), intent(in) :: f_addrs(:,:)
integer(8), intent(in) :: b_addrs(:,:,:)
integer, intent(in) :: bsize(:), ke
real(4), intent(in) :: d_type ! creates unique interface
real(4) :: field(bound%xbegin:bound%xend, bound%ybegin:bound%yend,ke)
real(4) :: ebuffer(bsize(1), ke), sbuffer(bsize(2), ke), wbuffer(bsize(3), ke), nbuffer(bsize(4), ke)
pointer(ptr_field, field)
pointer(ptr_ebuffer, ebuffer)
pointer(ptr_sbuffer, sbuffer)
pointer(ptr_wbuffer, wbuffer)
pointer(ptr_nbuffer, nbuffer)
integer, allocatable :: msg1(:), msg2(:)
logical :: recv(4), send(4)
integer :: nlist, buffer_pos, pos, tMe, from_pe
integer :: i, j, k, l, m, n, index, buffer_recv_size
integer :: is, ie, js, je, msgsize, l_size, num
character(len=8) :: text
integer :: outunit
real(4) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
ptr = LOC(mpp_domains_stack)
outunit = stdout()
l_size = size(f_addrs,1)
!---- determine recv(1) based on b_addrs ( east boundary )
num = count(b_addrs(1,:,1) == 0)
if( num == 0 ) then
recv(1) = .true.
else if( num == l_size ) then
recv(1) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of ebuffer with null address should be 0 or l_size")
endif
!---- determine recv(2) based on b_addrs ( south boundary )
num = count(b_addrs(2,:,1) == 0)
if( num == 0 ) then
recv(2) = .true.
else if( num == l_size ) then
recv(2) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of sbuffer with null address should be 0 or l_size")
endif
!---- determine recv(3) based on b_addrs ( west boundary )
num = count(b_addrs(3,:,1) == 0)
if( num == 0 ) then
recv(3) = .true.
else if( num == l_size ) then
recv(3) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of wbuffer with null address should be 0 or l_size")
endif
!---- determine recv(4) based on b_addrs ( north boundary )
num = count(b_addrs(4,:,1) == 0)
if( num == 0 ) then
recv(4) = .true.
else if( num == l_size ) then
recv(4) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary: number of nbuffer with null address should be 0 or l_size")
endif
send = recv
nlist = size(domain%list(:))
if(debug_message_passing) then
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
do m = 1, bound%nrecv
msgsize = 0
do n = 1, bound%recv(m)%count
if(recv(bound%recv(m)%dir(n))) then
is = bound%recv(m)%is(n); ie = bound%recv(m)%ie(n)
js = bound%recv(m)%js(n); je = bound%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
from_pe = bound%recv(m)%pe
l = from_pe-mpp_root_pe()
call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1)
msg2(l) = msgsize
enddo
do m = 1, bound%nsend
msgsize = 0
do n = 1, bound%send(m)%count
if(recv(bound%send(m)%dir(n))) then
is = bound%send(m)%is(n); ie = bound%send(m)%ie(n)
js = bound%send(m)%js(n); je = bound%send(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
call mpp_send( msgsize, plen=1, to_pe=bound%send(m)%pe, tag=COMM_TAG_1)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_get_boundary: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_get_boundary: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
!recv
buffer_pos = 0
do m = 1, bound%nrecv
msgsize = 0
do n = 1, bound%recv(m)%count
if(recv(bound%recv(m)%dir(n))) then
is = bound%recv(m)%is(n); ie = bound%recv(m)%ie(n)
js = bound%recv(m)%js(n); je = bound%recv(m)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_OLD: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=bound%recv(m)%pe, block=.false., tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
! send
do m = 1, bound%nsend
pos = buffer_pos
do n = 1, bound%send(m)%count
if(send(bound%send(m)%dir(n))) then
is = bound%send(m)%is(n); ie = bound%send(m)%ie(n)
js = bound%send(m)%js(n); je = bound%send(m)%je(n)
tMe = bound%send(m)%tileMe(n)
select case( bound%send(m)%rotation(n) )
case(ZERO)
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case (ONE_HUNDRED_EIGHTY)
do l=1,l_size
ptr_field = f_addrs(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
end if ! if(send(bound%dir(n)))
end do ! do n = 1, bound%count
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
!--- maybe we do not need the following stack size check.
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_OLD: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=bound%send(m)%pe, tag=COMM_TAG_2 )
buffer_pos = pos
end if
end do
call mpp_clock_begin(wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(wait_clock)
buffer_pos = buffer_recv_size
!unpack recv
!unpack buffer in reverse order.
do m = bound%nrecv, 1, -1
do n = bound%recv(m)%count, 1, -1
if(recv(bound%recv(m)%dir(n))) then
is = bound%recv(m)%is(n); ie = bound%recv(m)%ie(n)
js = bound%recv(m)%js(n); je = bound%recv(m)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = bound%recv(m)%tileMe(n)
select case( bound%recv(m)%dir(n) )
case ( 1 ) ! EAST
do l=1,l_size
ptr_ebuffer = b_addrs(1, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
ebuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 2 ) ! SOUTH
do l=1,l_size
ptr_sbuffer = b_addrs(2, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
sbuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 3 ) ! WEST
do l=1,l_size
ptr_wbuffer = b_addrs(3, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
wbuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
case ( 4 ) ! norTH
do l=1,l_size
ptr_nbuffer = b_addrs(4, l, tMe)
do k = 1, ke
index = bound%recv(m)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
nbuffer(index,k) = buffer(pos)
index = index + 1
end do
end do
end do
end do
end select
end if
end do
end do
call mpp_sync_self( )
end subroutine mpp_do_get_boundary_ad_r4_3d
subroutine mpp_do_get_boundary_ad_r4_3dv(f_addrsx, f_addrsy, domain, boundx, boundy, b_addrsx, b_addrsy, &
bsizex, bsizey, ke, d_type, flags, gridtype)
type(domain2D), intent(in) :: domain
type(overlapSpec), intent(in) :: boundx, boundy
integer(8), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
integer(8), intent(in) :: b_addrsx(:,:,:), b_addrsy(:,:,:)
integer, intent(in) :: bsizex(:), bsizey(:), ke
real(4), intent(in) :: d_type ! creates unique interface
integer, intent(in) :: flags
integer, intent(in) :: gridtype
real(4) :: fieldx(boundx%xbegin:boundx%xend, boundx%ybegin:boundx%yend,ke)
real(4) :: fieldy(boundy%xbegin:boundy%xend, boundy%ybegin:boundy%yend,ke)
real(4) :: ebufferx(bsizex(1), ke), sbufferx(bsizex(2), ke), wbufferx(bsizex(3), ke), nbufferx(bsizex(4), ke)
real(4) :: ebuffery(bsizey(1), ke), sbuffery(bsizey(2), ke), wbuffery(bsizey(3), ke), nbuffery(bsizey(4), ke)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
pointer(ptr_ebufferx, ebufferx)
pointer(ptr_sbufferx, sbufferx)
pointer(ptr_wbufferx, wbufferx)
pointer(ptr_nbufferx, nbufferx)
pointer(ptr_ebuffery, ebuffery)
pointer(ptr_sbuffery, sbuffery)
pointer(ptr_wbuffery, wbuffery)
pointer(ptr_nbuffery, nbuffery)
integer, allocatable :: msg1(:), msg2(:)
logical :: recvx(4), sendx(4)
logical :: recvy(4), sendy(4)
integer :: nlist, buffer_pos,buffer_pos_old, pos, pos_, tMe, m
integer :: is, ie, js, je, msgsize, l_size, buffer_recv_size, msgsize_send
integer :: i, j, k, l, n, index, to_pe, from_pe
integer :: rank_x, rank_y, cur_rank, ind_x, ind_y
integer :: nsend_x, nsend_y, nrecv_x, nrecv_y, num
character(len=8) :: text
integer :: outunit, shift, midpoint
real(4) :: buffer(size(mpp_domains_stack(:)))
pointer( ptr, buffer )
ptr = LOC(mpp_domains_stack)
outunit = stdout()
l_size = size(f_addrsx,1)
!---- determine recv(1) based on b_addrs ( east boundary )
num = count(b_addrsx(1,:,1) == 0)
if( num == 0 ) then
recvx(1) = .true.
else if( num == l_size ) then
recvx(1) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of ebufferx with null address should be 0 or l_size")
endif
!---- determine recv(2) based on b_addrs ( south boundary )
num = count(b_addrsx(2,:,1) == 0)
if( num == 0 ) then
recvx(2) = .true.
else if( num == l_size ) then
recvx(2) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of sbufferx with null address should be 0 or l_size")
endif
!---- determine recv(3) based on b_addrs ( west boundary )
num = count(b_addrsx(3,:,1) == 0)
if( num == 0 ) then
recvx(3) = .true.
else if( num == l_size ) then
recvx(3) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of wbufferx with null address should be 0 or l_size")
endif
!---- determine recv(4) based on b_addrs ( north boundary )
num = count(b_addrsx(4,:,1) == 0)
if( num == 0 ) then
recvx(4) = .true.
else if( num == l_size ) then
recvx(4) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of nbufferx with null address should be 0 or l_size")
endif
!---- determine recv(1) based on b_addrs ( east boundary )
num = count(b_addrsy(1,:,1) == 0)
if( num == 0 ) then
recvy(1) = .true.
else if( num == l_size ) then
recvy(1) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of ebuffery with null address should be 0 or l_size")
endif
!---- determine recv(2) based on b_addrs ( south boundary )
num = count(b_addrsy(2,:,1) == 0)
if( num == 0 ) then
recvy(2) = .true.
else if( num == l_size ) then
recvy(2) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of sbuffery with null address should be 0 or l_size")
endif
!---- determine recv(3) based on b_addrs ( west boundary )
num = count(b_addrsy(3,:,1) == 0)
if( num == 0 ) then
recvy(3) = .true.
else if( num == l_size ) then
recvy(3) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of wbuffery with null address should be 0 or l_size")
endif
!---- determine recv(4) based on b_addrs ( north boundary )
num = count(b_addrsy(4,:,1) == 0)
if( num == 0 ) then
recvy(4) = .true.
else if( num == l_size ) then
recvy(4) = .false.
else
if( num .NE. 0 ) call mpp_error(FATAL, &
"mpp_do_get_boundary_V: number of nbuffery with null address should be 0 or l_size")
endif
sendx = recvx
sendy = recvy
nlist = size(domain%list(:))
nsend_x = boundx%nsend
nsend_y = boundy%nsend
nrecv_x = boundx%nrecv
nrecv_y = boundy%nrecv
if(debug_message_passing) then
allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
msg1 = 0
msg2 = 0
cur_rank = get_rank_recv(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
msgsize = 0
if(cur_rank == rank_x) then
from_pe = boundx%recv(ind_x)%pe
do n = 1, boundx%recv(ind_x)%count
if(recvx(boundx%recv(ind_x)%dir(n))) then
is = boundx%recv(ind_x)%is(n); ie = boundx%recv(ind_x)%ie(n)
js = boundx%recv(ind_x)%js(n); je = boundx%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = boundx%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = boundy%recv(ind_y)%pe
do n = 1, boundy%recv(ind_y)%count
if(recvy(boundy%recv(ind_y)%dir(n))) then
is = boundy%recv(ind_y)%is(n); ie = boundy%recv(ind_y)%ie(n)
js = boundy%recv(ind_y)%js(n); je = boundy%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = boundy%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
m = from_pe-mpp_root_pe()
call mpp_recv( msg1(m), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_3)
msg2(m) = msgsize
end do
cur_rank = get_rank_send(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
msgsize = 0
if(cur_rank == rank_x) then
to_pe = boundx%send(ind_x)%pe
do n = 1, boundx%send(ind_x)%count
if(sendx(boundx%send(ind_x)%dir(n))) then
is = boundx%send(ind_x)%is(n); ie = boundx%send(ind_x)%ie(n)
js = boundx%send(ind_x)%js(n); je = boundx%send(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
endif
enddo
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = boundx%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = boundy%send(ind_y)%pe
do n = 1, boundy%send(ind_y)%count
if(sendy(boundy%send(ind_y)%dir(n))) then
is = boundy%send(ind_y)%is(n); ie = boundy%send(ind_y)%ie(n)
js = boundy%send(ind_y)%js(n); je = boundy%send(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = boundy%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
call mpp_send( msgsize, plen=1, to_pe=to_pe, tag=COMM_TAG_3)
enddo
call mpp_sync_self(check=EVENT_RECV)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
call mpp_error(FATAL, "mpp_do_get_boundaryV: mismatch on send and recv size")
endif
enddo
call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_get_boundary_V: message sizes are matched between send and recv for domain " &
//trim(domain%name)
deallocate(msg1, msg2)
endif
!--- domain always is symmetry
shift = 1
tMe = 1
if( BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then
j = domain%y(1)%global%end+shift
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
j = domain%y(1)%global%end+shift - domain%y(1)%compute%begin + 1
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
do i = is ,ie, midpoint
if( domain%x(1)%compute%begin == i )then
do l=1,l_size
ptr_wbufferx = b_addrsx(3, l, tMe)
ptr_wbuffery = b_addrsy(3, l, tMe)
do k = 1,ke
wbufferx(j,k) = 0
wbuffery(j,k) = 0
end do
end do
end if
end do
endif
endif
endif
call mpp_sync_self( )
!unpack recv
!unpack buffer in reverse order.
buffer_pos = 0
cur_rank = get_rank_recv(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
msgsize = 0
if(cur_rank == rank_x) then
from_pe = boundx%recv(ind_x)%pe
do n = 1, boundx%recv(ind_x)%count
if(recvx(boundx%recv(ind_x)%dir(n))) then
is = boundx%recv(ind_x)%is(n); ie = boundx%recv(ind_x)%ie(n)
js = boundx%recv(ind_x)%js(n); je = boundx%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = boundx%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = boundy%recv(ind_y)%pe
do n = 1, boundy%recv(ind_y)%count
if(recvy(boundy%recv(ind_y)%dir(n))) then
is = boundy%recv(ind_y)%is(n); ie = boundy%recv(ind_y)%ie(n)
js = boundy%recv(ind_y)%js(n); je = boundy%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = boundy%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
cur_rank = get_rank_unpack(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while(ind_x >0 .OR. ind_y >0)
if(cur_rank == rank_y) then
do n = boundy%recv(ind_y)%count, 1, -1
if(recvy(boundy%recv(ind_y)%dir(n))) then
is = boundy%recv(ind_y)%is(n); ie = boundy%recv(ind_y)%ie(n)
js = boundy%recv(ind_y)%js(n); je = boundy%recv(ind_y)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = boundy%recv(ind_y)%tileMe(n)
select case( boundy%recv(ind_y)%dir(n) )
case ( 1 ) ! EAST
do l=1,l_size
ptr_ebuffery = b_addrsy(1, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = ebuffery(index,k)
ebuffery(index,k) = 0.
index = index + 1
end do
end do
end do
end do
case ( 2 ) ! SOUTH
do l=1,l_size
ptr_sbuffery = b_addrsy(2, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = sbuffery(index,k)
sbuffery(index,k) = 0.
index = index + 1
end do
end do
end do
end do
case ( 3 ) ! WEST
do l=1,l_size
ptr_wbuffery = b_addrsy(3, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = wbuffery(index,k)
wbuffery(index,k) = 0.
index = index + 1
end do
end do
end do
end do
case ( 4 ) ! norTH
do l=1,l_size
ptr_nbuffery = b_addrsy(4, l, tMe)
do k = 1, ke
index = boundy%recv(ind_y)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = nbuffery(index,k)
nbuffery(index,k) = 0.
index = index + 1
end do
end do
end do
end do
end select
end if
end do
ind_y = ind_y-1
if(ind_y .GT. 0) then
rank_y = boundy%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
if(cur_rank == rank_x) then
do n = boundx%recv(ind_x)%count, 1, -1
if(recvx(boundx%recv(ind_x)%dir(n))) then
is = boundx%recv(ind_x)%is(n); ie = boundx%recv(ind_x)%ie(n)
js = boundx%recv(ind_x)%js(n); je = boundx%recv(ind_x)%je(n)
msgsize = (ie-is+1)*(je-js+1)*ke*l_size
pos = buffer_pos - msgsize
buffer_pos = pos
tMe = boundx%recv(ind_x)%tileMe(n)
select case( boundx%recv(ind_x)%dir(n) )
case ( 1 ) ! EAST
do l=1,l_size
ptr_ebufferx = b_addrsx(1, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = ebufferx(index,k)
ebufferx(index,k) = 0.
index = index + 1
end do
end do
end do
end do
case ( 2 ) ! SOUTH
do l=1,l_size
ptr_sbufferx = b_addrsx(2, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = sbufferx(index,k)
sbufferx(index,k) = 0.
index = index + 1
end do
end do
end do
end do
case ( 3 ) ! WEST
do l=1,l_size
ptr_wbufferx = b_addrsx(3, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = wbufferx(index,k)
wbufferx(index,k) = 0.
index = index + 1
end do
end do
end do
end do
case ( 4 ) ! norTH
do l=1,l_size
ptr_nbufferx = b_addrsx(4, l, tMe)
do k = 1, ke
index = boundx%recv(ind_x)%index(n)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = nbufferx(index,k)
nbufferx(index,k) = 0.
index = index + 1
end do
end do
end do
end do
end select
end if
end do
ind_x = ind_x-1
if(ind_x .GT. 0) then
rank_x = boundx%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
end do
!recv
buffer_pos = 0
cur_rank = get_rank_recv(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
msgsize = 0
if(cur_rank == rank_x) then
from_pe = boundx%recv(ind_x)%pe
do n = 1, boundx%recv(ind_x)%count
if(recvx(boundx%recv(ind_x)%dir(n))) then
is = boundx%recv(ind_x)%is(n); ie = boundx%recv(ind_x)%ie(n)
js = boundx%recv(ind_x)%js(n); je = boundx%recv(ind_x)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
msgsize_send = (ie-is+1)*(je-js+1)*ke*l_size
end if
end do
ind_x = ind_x+1
if(ind_x .LE. nrecv_x) then
rank_x = boundx%recv(ind_x)%pe - domain%pe
if(rank_x .LE.0) rank_x = rank_x + nlist
else
rank_x = -1
endif
endif
if(cur_rank == rank_y) then
from_pe = boundy%recv(ind_y)%pe
do n = 1, boundy%recv(ind_y)%count
if(recvy(boundy%recv(ind_y)%dir(n))) then
is = boundy%recv(ind_y)%is(n); ie = boundy%recv(ind_y)%ie(n)
js = boundy%recv(ind_y)%js(n); je = boundy%recv(ind_y)%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
msgsize_send = (ie-is+1)*(je-js+1)*ke*l_size
end if
end do
ind_y = ind_y+1
if(ind_y .LE. nrecv_y) then
rank_y = boundy%recv(ind_y)%pe - domain%pe
if(rank_y .LE.0) rank_y = rank_y + nlist
else
rank_y = -1
endif
endif
cur_rank = max(rank_x, rank_y)
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=from_pe, tag=COMM_TAG_4 )
buffer_pos = buffer_pos + msgsize
end if
end do
buffer_recv_size = buffer_pos
! send
cur_rank = get_rank_send(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
pos = buffer_pos
if(cur_rank == rank_x) then
to_pe = boundx%send(ind_x)%pe
do n = 1, boundx%send(ind_x)%count
if(sendx(boundx%send(ind_x)%dir(n))) then
is = boundx%send(ind_x)%is(n); ie = boundx%send(ind_x)%ie(n)
js = boundx%send(ind_x)%js(n); je = boundx%send(ind_x)%je(n)
tMe = boundx%send(ind_x)%tileMe(n)
pos = pos + (ie-is+1)*(je-js+1)*ke*l_size
end if ! if(send(boundx%dir(n)))
end do !do n = 1, boundx%count
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = boundx%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = boundy%send(ind_y)%pe
do n = 1, boundy%send(ind_y)%count
if(sendy(boundy%send(ind_y)%dir(n))) then
is = boundy%send(ind_y)%is(n); ie = boundy%send(ind_y)%ie(n)
js = boundy%send(ind_y)%js(n); je = boundy%send(ind_y)%je(n)
tMe = boundy%send(ind_y)%tileMe(n)
pos = pos + (ie-is+1)*(je-js+1)*ke*l_size
end if ! if(send(boundy%dir(n)))
end do ! do n = 1, boundy%count
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = boundy%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
!--- maybe we do not need the following stack size check.
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_V_: mpp_domains_stack overflow, ' // &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=to_pe, block=.FALSE., tag=COMM_TAG_4 )
buffer_pos = pos
end if
end do
call mpp_sync_self(check=EVENT_RECV)
!send second part---------------------------------------------------------------
buffer_pos = buffer_recv_size
cur_rank = get_rank_send(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
buffer_pos_old = buffer_pos
pos = buffer_pos
do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
pos = buffer_pos
if(cur_rank == rank_x) then
to_pe = boundx%send(ind_x)%pe
do n = boundx%send(ind_x)%count,1,-1
if(sendx(boundx%send(ind_x)%dir(n))) then
is = boundx%send(ind_x)%is(n); ie = boundx%send(ind_x)%ie(n)
js = boundx%send(ind_x)%js(n); je = boundx%send(ind_x)%je(n)
tMe = boundx%send(ind_x)%tileMe(n)
select case( boundx%send(ind_x)%rotation(n) )
case(ZERO)
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k)= fieldx(i,j,k)+ buffer(pos)
end do
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
fieldy(i,j,k)= fieldy(i,j,k)+ buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
fieldy(i,j,k)= fieldy(i,j,k)- buffer(pos)
end do
end do
end do
end do
end if
case( NINETY )
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
fieldy(i,j,k)= fieldy(i,j,k)+ buffer(pos)
end do
end do
end do
end do
case (ONE_HUNDRED_EIGHTY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldx(i,j,k)= fieldx(i,j,k)+ buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldx(i,j,k)= fieldx(i,j,k)- buffer(pos)
end do
end do
end do
end do
end if
end select
end if ! if(send(boundx%dir(n)))
end do !do n = 1, boundx%count
ind_x = ind_x+1
if(ind_x .LE. nsend_x) then
rank_x = boundx%send(ind_x)%pe - domain%pe
if(rank_x .LT.0) rank_x = rank_x + nlist
else
rank_x = nlist+1
endif
endif
if(cur_rank == rank_y) then
to_pe = boundy%send(ind_y)%pe
do n = boundy%send(ind_y)%count,1,-1
if(sendy(boundy%send(ind_y)%dir(n))) then
is = boundy%send(ind_y)%is(n); ie = boundy%send(ind_y)%ie(n)
js = boundy%send(ind_y)%js(n); je = boundy%send(ind_y)%je(n)
tMe = boundy%send(ind_y)%tileMe(n)
select case( boundy%send(ind_y)%rotation(n) )
case(ZERO)
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k)= fieldy(i,j,k)+ buffer(pos)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = is, ie
pos = pos + 1
fieldx(i,j,k)= fieldx(i,j,k)+ buffer(pos)
end do
end do
end do
end do
case( NINETY )
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
fieldx(i,j,k)= fieldx(i,j,k)+ buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldx = f_addrsx(l, tMe)
do k = 1, ke
do j = js, je
do i = ie, is, -1
pos = pos + 1
fieldx(i,j,k)= fieldx(i,j,k)- buffer(pos)
end do
end do
end do
end do
end if
case (ONE_HUNDRED_EIGHTY)
if( BTEST(flags,SCALAR_BIT) ) then
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldy(i,j,k)= fieldy(i,j,k)+ buffer(pos)
end do
end do
end do
end do
else
do l=1,l_size
ptr_fieldy = f_addrsy(l, tMe)
do k = 1, ke
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
fieldy(i,j,k)= fieldy(i,j,k)- buffer(pos)
end do
end do
end do
end do
end if
end select
end if ! if(send(boundy%dir(n)))
end do ! do n = 1, boundy%count
ind_y = ind_y+1
if(ind_y .LE. nsend_y) then
rank_y = boundy%send(ind_y)%pe - domain%pe
if(rank_y .LT.0) rank_y = rank_y + nlist
else
rank_y = nlist+1
endif
endif
cur_rank = min(rank_x, rank_y)
msgsize = pos - buffer_pos
if( msgsize.GT.0 )then
buffer_pos = pos
end if
end do
call mpp_sync_self( )
end subroutine mpp_do_get_boundary_ad_r4_3dv
# 1814 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_group_update.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
! -*-f90-*-
subroutine mpp_create_group_update_r8_2d(group, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo)
type(mpp_group_update_type), intent(inout) :: group
real(8), intent(inout) :: field(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
real(8) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_create_group_update(group, field3D, domain, flags, position, whalo, ehalo, shalo, nhalo)
return
end subroutine mpp_create_group_update_r8_2d
subroutine mpp_create_group_update_r8_3d(group, field, domain, flags, position, whalo, ehalo, shalo, nhalo)
type(mpp_group_update_type), intent(inout) :: group
real(8), intent(inout) :: field(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
integer :: update_position, update_whalo, update_ehalo, update_shalo, update_nhalo
integer :: update_flags, isize, jsize, ksize
integer :: nscalar
character(len=3) :: text
logical :: set_mismatch, update_edge_only
logical :: recv(8)
if(group%initialized) then
call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_3D: group is already initialized")
endif
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
update_position = CENTER
!--- when there is NINETY or MINUS_NINETY rotation for some contact, the salar data can not be on E or N-cell,
if(present(position)) then
update_position = position
if(domain%rotated_ninety .AND. ( position == EAST .OR. position == NORTH ) ) &
call mpp_error(FATAL, 'MPP_CREATE_GROUP_UPDATE_3D: hen there is NINETY or MINUS_NINETY rotation, ' // &
'can not use scalar version update_domain for data on E or N-cell' )
end if
if( domain%max_ntile_pe > 1 ) then
call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE: do not support multiple tile per processor')
endif
update_flags = XUPDATE+YUPDATE
if(present(flags)) update_flags = flags
group%nscalar = group%nscalar + 1
nscalar = group%nscalar
if( nscalar > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
isize = size(field,1); jsize=size(field,2); ksize = size(field,3)
group%addrs_s(nscalar) = LOC(field)
if( group%nscalar == 1 ) then
group%flags_s = update_flags
group%whalo_s = update_whalo
group%ehalo_s = update_ehalo
group%shalo_s = update_shalo
group%nhalo_s = update_nhalo
group%position = update_position
group%isize_s = isize
group%jsize_s = jsize
group%ksize_s = ksize
call mpp_get_memory_domain(domain, group%is_s, group%ie_s, group%js_s, group%je_s, position=position)
update_edge_only = BTEST(update_flags, EDGEONLY)
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
recv(2) = .false.
recv(4) = .false.
recv(6) = .false.
recv(8) = .false.
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
group%recv_s = recv
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (group%flags_s .NE. update_flags)
set_mismatch = set_mismatch .OR. (group%whalo_s .NE. update_whalo)
set_mismatch = set_mismatch .OR. (group%ehalo_s .NE. update_ehalo)
set_mismatch = set_mismatch .OR. (group%shalo_s .NE. update_shalo)
set_mismatch = set_mismatch .OR. (group%nhalo_s .NE. update_nhalo)
set_mismatch = set_mismatch .OR. (group%position .NE. update_position)
set_mismatch = set_mismatch .OR. (group%isize_s .NE. isize)
set_mismatch = set_mismatch .OR. (group%jsize_s .NE. jsize)
set_mismatch = set_mismatch .OR. (group%ksize_s .NE. ksize)
if(set_mismatch)then
write( text,'(i2)' ) nscalar
call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE_3D: Incompatible field at count '//text//' for group update.' )
endif
endif
return
end subroutine mpp_create_group_update_r8_3d
subroutine mpp_create_group_update_r8_4d(group, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo)
type(mpp_group_update_type), intent(inout) :: group
real(8), intent(inout) :: field(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
real(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_create_group_update(group, field3D, domain, flags, position, whalo, ehalo, shalo, nhalo)
return
end subroutine mpp_create_group_update_r8_4d
subroutine mpp_create_group_update_r8_2dv( group, fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo)
type(mpp_group_update_type), intent(inout) :: group
real(8), intent(inout) :: fieldx(:,:), fieldy(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
real(8) :: field3Dx(size(fieldx,1),size(fieldx,2),1)
real(8) :: field3Dy(size(fieldy,1),size(fieldy,2),1)
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_create_group_update(group, field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo)
return
end subroutine mpp_create_group_update_r8_2dv
subroutine mpp_create_group_update_r8_3dv( group, fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo)
type(mpp_group_update_type), intent(inout) :: group
real(8), intent(inout) :: fieldx(:,:,:), fieldy(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo
integer :: update_flags, isize_x, jsize_x, ksize_x, isize_y, jsize_y, ksize_y
integer :: nvector, update_gridtype, position_x, position_y
character(len=3) :: text
logical :: set_mismatch, update_edge_only
logical :: recv(8)
if(group%initialized) then
call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_V: group is already initialized")
endif
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_V: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_V: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_V: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_V: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
update_gridtype = AGRID
if(PRESENT(gridtype)) update_gridtype = gridtype
if( domain%max_ntile_pe > 1 ) then
call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE_V: do not support multiple tile per processor')
endif
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
! The following test is so that SCALAR_PAIR can be used alone with the
! same default update pattern as without.
if (BTEST(update_flags,SCALAR_BIT)) then
if (.NOT.(BTEST(update_flags,WEST) .OR. BTEST(update_flags,EAST) &
.OR. BTEST(update_flags,NORTH) .OR. BTEST(update_flags,SOUTH))) &
update_flags = update_flags + XUPDATE+YUPDATE !default with SCALAR_PAIR
end if
group%nvector = group%nvector + 1
nvector = group%nvector
if( nvector > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
isize_x = size(fieldx,1); jsize_x = size(fieldx,2); ksize_x = size(fieldx,3)
isize_y = size(fieldy,1); jsize_y = size(fieldy,2); ksize_y = size(fieldy,3)
if(ksize_x .NE. ksize_y) call mpp_error(FATAL, &
'MPP_CREATE_GROUP_UPDATE_V: mismatch of ksize between fieldx and fieldy')
group%addrs_x(nvector) = LOC(fieldx)
group%addrs_y(nvector) = LOC(fieldy)
if( group%nvector == 1 ) then
group%flags_v = update_flags
group%whalo_v = update_whalo
group%ehalo_v = update_ehalo
group%shalo_v = update_shalo
group%nhalo_v = update_nhalo
group%gridtype = update_gridtype
group%isize_x = isize_x
group%jsize_x = jsize_x
group%isize_y = isize_y
group%jsize_y = jsize_y
group%ksize_v = ksize_x
update_edge_only = BTEST(update_flags, EDGEONLY)
group%nonsym_edge = .false.
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
recv(2) = .false.
recv(4) = .false.
recv(6) = .false.
recv(8) = .false.
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
group%recv_x = recv
group%recv_y = recv
!--- NONSYMEDGE is only for non-symmetric domain and CGRID/DGRID
if( .not. domain%symmetry .and. (update_gridtype==CGRID_NE .OR. update_gridtype==DGRID_NE)) then
group%nonsym_edge = BTEST(update_flags, NONSYMEDGE)
endif
if( group%nonsym_edge ) then
group%recv_x(2:8:2) = .false.
group%recv_y(2:8:2) = .false.
if(update_gridtype==CGRID_NE) then
group%recv_x(3) = .false.
group%recv_x(7) = .false.
group%recv_y(1) = .false.
group%recv_y(5) = .false.
else if(update_gridtype==DGRID_NE) then
group%recv_x(1) = .false.
group%recv_x(5) = .false.
group%recv_y(3) = .false.
group%recv_y(7) = .false.
endif
endif
select case(group%gridtype)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case (DGRID_NE, DGRID_SW)
position_x = NORTH
position_y = EAST
case default
call mpp_error(FATAL, "mpp_CREATE_GROUP_UPDATE_V: invalid value of gridtype")
end select
call mpp_get_memory_domain(domain, group%is_x, group%ie_x, group%js_x, group%je_x, position=position_x)
call mpp_get_memory_domain(domain, group%is_y, group%ie_y, group%js_y, group%je_y, position=position_y)
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (group%flags_v .NE. update_flags)
set_mismatch = set_mismatch .OR. (group%whalo_v .NE. update_whalo)
set_mismatch = set_mismatch .OR. (group%ehalo_v .NE. update_ehalo)
set_mismatch = set_mismatch .OR. (group%shalo_v .NE. update_shalo)
set_mismatch = set_mismatch .OR. (group%nhalo_v .NE. update_nhalo)
set_mismatch = set_mismatch .OR. (group%gridtype .NE. update_gridtype)
set_mismatch = set_mismatch .OR. (group%isize_x .NE. isize_x)
set_mismatch = set_mismatch .OR. (group%jsize_x .NE. jsize_x)
set_mismatch = set_mismatch .OR. (group%isize_y .NE. isize_y)
set_mismatch = set_mismatch .OR. (group%jsize_y .NE. jsize_y)
set_mismatch = set_mismatch .OR. (group%ksize_v .NE. ksize_x)
if(set_mismatch)then
write( text,'(i2)' ) nvector
call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE_V: Incompatible field at count '//text//' for group update.' )
endif
endif
return
end subroutine mpp_create_group_update_r8_3dv
subroutine mpp_create_group_update_r8_4dv( group, fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo)
type(mpp_group_update_type), intent(inout) :: group
real(8), intent(inout) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
real(8) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4))
real(8) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_create_group_update(group, field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo)
return
end subroutine mpp_create_group_update_r8_4dv
subroutine mpp_do_group_update_r8(group, domain, d_type)
type(mpp_group_update_type), intent(inout) :: group
type(domain2D), intent(inout) :: domain
real(8), intent(in) :: d_type
integer :: nscalar, nvector, nlist
logical :: recv_y(8)
integer :: nsend, nrecv, flags_v
integer :: msgsize
integer :: from_pe, to_pe, buffer_pos, pos
integer :: ksize, is, ie, js, je
integer :: n, l, m, i, j, k, buffer_start_pos, nk
integer :: shift, gridtype, midpoint
integer :: npack, nunpack, rotation, isd
character(len=8) :: text
real(8) :: buffer(mpp_domains_stack_size)
real(8) :: field (group%is_s:group%ie_s,group%js_s:group%je_s, group%ksize_s)
real(8) :: fieldx(group%is_x:group%ie_x,group%js_x:group%je_x, group%ksize_v)
real(8) :: fieldy(group%is_y:group%ie_y,group%js_y:group%je_y, group%ksize_v)
pointer(ptr, buffer )
pointer(ptr_field, field)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
nscalar = group%nscalar
nvector = group%nvector
nlist = size(domain%list(:))
gridtype = group%gridtype
!--- ksize_s must equal ksize_v
if(nvector > 0 .AND. nscalar > 0) then
if(group%ksize_s .NE. group%ksize_v) then
call mpp_error(FATAL, "MPP_DO_GROUP_UPDATE: ksize_s and ksize_v are not equal")
endif
ksize = group%ksize_s
else if (nscalar > 0) then
ksize = group%ksize_s
else if (nvector > 0) then
ksize = group%ksize_v
else
call mpp_error(FATAL, "MPP_DO_GROUP_UPDATE: nscalar and nvector are all 0")
endif
if(nvector > 0) recv_y = group%recv_y
ptr = LOC(mpp_domains_stack)
!--- set reset_index_s and reset_index_v to 0
group%reset_index_s = 0
group%reset_index_v = 0
if(.not. group%initialized) call set_group_update(group,domain)
nrecv = group%nrecv
nsend = group%nsend
!---pre-post receive.
call mpp_clock_begin(group_recv_clock)
do m = 1, nrecv
msgsize = group%recv_size(m)
from_pe = group%from_pe(m)
if( msgsize .GT. 0 )then
buffer_pos = group%buffer_pos_recv(m)
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.false., &
tag=COMM_TAG_1)
end if
end do
!pack the data
call mpp_clock_end(group_recv_clock)
flags_v = group%flags_v
npack = group%npack
call mpp_clock_begin(group_pack_clock)
!pack the data
buffer_start_pos = 0
# 1 "../mpp/include/group_update_pack.inc" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
if( group%k_loop_inside ) then
!$OMP parallel do default(none) shared(npack,group,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is,ie,js,je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy,n,k)
do n = 1, npack
buffer_pos = group%pack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos
is = group%pack_is(n); ie = group%pack_ie(n)
js = group%pack_js(n); je = group%pack_je(n)
rotation = group%pack_rotation(n)
if( group%pack_type(n) == FIELD_S ) then
select case( rotation )
case(ZERO)
do l=1, group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
enddo
enddo
case( MINUS_NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
else if( group%pack_type(n) == FIELD_X ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
case( NINETY )
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
end select ! select case( rotation(n) )
else if( group%pack_type(n) == FIELD_Y ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case( NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select ! select case( rotation(n) )
endif
enddo
else
!$OMP parallel do default(none) shared(npack,group,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is,ie,js,je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy,n,k)
do nk = 1, npack*ksize
n = (nk-1)/ksize + 1
k = mod((nk-1), ksize) + 1
buffer_pos = group%pack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos + (k-1)*group%pack_size(n)
is = group%pack_is(n); ie = group%pack_ie(n)
js = group%pack_js(n); je = group%pack_je(n)
rotation = group%pack_rotation(n)
if( group%pack_type(n) == FIELD_S ) then
select case( rotation )
case(ZERO)
do l=1, group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
enddo
case( MINUS_NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
case( NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end select
else if( group%pack_type(n) == FIELD_X ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end if
case( NINETY )
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end if
end select ! select case( rotation(n) )
else if( group%pack_type(n) == FIELD_Y ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
case( MINUS_NINETY )
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
case( NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end if
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end if
end select ! select case( rotation(n) )
endif
enddo
endif
# 498 "../mpp/include/mpp_group_update.h" 2
call mpp_clock_end(group_pack_clock)
call mpp_clock_begin(group_send_clock)
do n = 1, nsend
msgsize = group%send_size(n)
if( msgsize .GT. 0 )then
buffer_pos = group%buffer_pos_send(n)
to_pe = group%to_pe(n)
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1)
endif
enddo
call mpp_clock_end(group_send_clock)
if(nrecv>0) then
call mpp_clock_begin(group_wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(group_wait_clock)
endif
!---unpack the buffer
nunpack = group%nunpack
call mpp_clock_begin(group_unpk_clock)
# 1 "../mpp/include/group_update_unpack.inc" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
if( group%k_loop_inside ) then
!$OMP parallel do default(none) shared(nunpack,group,nscalar,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is, ie, js, je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy, n,k )
do n = nunpack, 1, -1
buffer_pos = group%unpack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos
is = group%unpack_is(n); ie = group%unpack_ie(n)
js = group%unpack_js(n); je = group%unpack_je(n)
if( group%unpack_type(n) == FIELD_S ) then
do l=1,nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = buffer(pos)
end do
end do
end do
end do
else if( group%unpack_type(n) == FIELD_X ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k) = buffer(pos)
end do
end do
end do
end do
else if( group%unpack_type(n) == FIELD_Y ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k) = buffer(pos)
end do
end do
end do
end do
endif
enddo
else
!$OMP parallel do default(none) shared(nunpack,group,nscalar,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is, ie, js, je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy,n,k)
do nk = nunpack*ksize, 1, -1
n = (nk-1)/ksize + 1
k = mod((nk-1), ksize) + 1
buffer_pos = group%unpack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos + (k-1)*group%unpack_size(n)
is = group%unpack_is(n); ie = group%unpack_ie(n)
js = group%unpack_js(n); je = group%unpack_je(n)
if( group%unpack_type(n) == FIELD_S ) then
do l=1,nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = buffer(pos)
end do
end do
end do
else if( group%unpack_type(n) == FIELD_X ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k) = buffer(pos)
end do
end do
end do
else if( group%unpack_type(n) == FIELD_Y ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k) = buffer(pos)
end do
end do
end do
endif
enddo
endif
# 521 "../mpp/include/mpp_group_update.h" 2
call mpp_clock_end(group_unpk_clock)
! ---northern boundary fold
shift = 0
if(domain%symmetry) shift = 1
if( nvector >0 .AND. BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(flags_v,SCALAR_BIT)) )then
j = domain%y(1)%global%end+shift
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
j = domain%y(1)%global%end+shift
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
if( .NOT. domain%symmetry ) is = is - 1
do i = is ,ie, midpoint
if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
do l=1,nvector
ptr_fieldx = group%addrs_x(l)
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%end+shift
if ( recv_y(7) .OR. recv_y(5) ) then
select case(gridtype)
case(BGRID_NE)
if(domain%symmetry) then
is = domain%x(1)%global%begin
else
is = domain%x(1)%global%begin - 1
end if
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north BGRID_NE west edge ubound error.' )
do l=1,nvector
ptr_fieldx = group%addrs_x(l)
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = domain%x(1)%data%begin,is-1
fieldx(i,j,k) = fieldx(2*is-i,j,k)
fieldy(i,j,k) = fieldy(2*is-i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
isd = domain%x(1)%compute%begin - group%whalo_v
if( is.GT.isd )then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north CGRID_NE west edge ubound error.' )
do l=1,nvector
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = isd,is-1
fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
ie = domain%x(1)%compute%end+group%ehalo_v
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,nvector
ptr_fieldx = group%addrs_x(l)
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,nvector
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,SOUTH) .OR. BTEST(domain%fold,WEST) .OR. BTEST(domain%fold,EAST) ) then
call mpp_error(FATAL, "MPP_DO_GROUP_UPDATE: this interface does not support folded_south, " // &
"folded_west of folded_east, contact developer")
endif
if(nsend>0) then
call mpp_clock_begin(group_wait_clock)
call mpp_sync_self( )
call mpp_clock_end(group_wait_clock)
endif
end subroutine mpp_do_group_update_r8
subroutine mpp_start_group_update_r8(group, domain, d_type, reuse_buffer)
type(mpp_group_update_type), intent(inout) :: group
type(domain2D), intent(inout) :: domain
real(8), intent(in) :: d_type
logical, optional, intent(in) :: reuse_buffer
integer :: nscalar, nvector
integer :: nsend, nrecv, flags_v
integer :: msgsize, npack, rotation
integer :: from_pe, to_pe, buffer_pos, pos
integer :: ksize, is, ie, js, je
integer :: n, l, m, i, j, k, buffer_start_pos, nk
logical :: reuse_buf_pos
character(len=8) :: text
real(8) :: buffer(size(mpp_domains_stack_nonblock(:)))
real(8) :: field (group%is_s:group%ie_s,group%js_s:group%je_s, group%ksize_s)
real(8) :: fieldx(group%is_x:group%ie_x,group%js_x:group%je_x, group%ksize_v)
real(8) :: fieldy(group%is_y:group%ie_y,group%js_y:group%je_y, group%ksize_v)
pointer( ptr, buffer )
pointer(ptr_field, field)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
nscalar = group%nscalar
nvector = group%nvector
if(nscalar>0) then
ksize = group%ksize_s
else
ksize = group%ksize_v
endif
!--- set reset_index_s and reset_index_v to 0
group%reset_index_s = 0
group%reset_index_v = 0
reuse_buf_pos = .FALSE.
if (PRESENT(reuse_buffer)) reuse_buf_pos = reuse_buffer
if (.not. group%initialized) then
call set_group_update(group,domain)
endif
if (.not. reuse_buf_pos) then
group%buffer_start_pos = nonblock_group_buffer_pos
nonblock_group_buffer_pos = nonblock_group_buffer_pos + group%tot_msgsize
mpp_domains_stack_hwm = nonblock_group_buffer_pos + 1
if( mpp_domains_stack_hwm .GT. mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'set_group_update: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
else if( group%buffer_start_pos < 0 ) then
call mpp_error(FATAL, "MPP_START_GROUP_UPDATE: group%buffer_start_pos is not set")
endif
nrecv = group%nrecv
nsend = group%nsend
ptr = LOC(mpp_domains_stack_nonblock)
! Make sure it is not in the middle of the old version of non-blocking halo update.
if(num_update>0) call mpp_error(FATAL, "MPP_START_GROUP_UPDATE: can not be called in the middle of "// &
"mpp_start_update_domains/mpp_complete_update_domains call")
num_nonblock_group_update = num_nonblock_group_update + 1
!---pre-post receive.
call mpp_clock_begin(nonblock_group_recv_clock)
do m = 1, nrecv
msgsize = group%recv_size(m)
from_pe = group%from_pe(m)
if( msgsize .GT. 0 )then
buffer_pos = group%buffer_pos_recv(m) + group%buffer_start_pos
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.false., &
tag=COMM_TAG_1, request=group%request_recv(m))
group%type_recv(m) = MPI_REAL8
end if
end do
call mpp_clock_end(nonblock_group_recv_clock)
flags_v = group%flags_v
!pack the data
call mpp_clock_begin(nonblock_group_pack_clock)
npack = group%npack
buffer_start_pos = group%buffer_start_pos
# 1 "../mpp/include/group_update_pack.inc" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
if( group%k_loop_inside ) then
!$OMP parallel do default(none) shared(npack,group,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is,ie,js,je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy,n,k)
do n = 1, npack
buffer_pos = group%pack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos
is = group%pack_is(n); ie = group%pack_ie(n)
js = group%pack_js(n); je = group%pack_je(n)
rotation = group%pack_rotation(n)
if( group%pack_type(n) == FIELD_S ) then
select case( rotation )
case(ZERO)
do l=1, group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
enddo
enddo
case( MINUS_NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
else if( group%pack_type(n) == FIELD_X ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
case( NINETY )
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
end select ! select case( rotation(n) )
else if( group%pack_type(n) == FIELD_Y ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case( NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select ! select case( rotation(n) )
endif
enddo
else
!$OMP parallel do default(none) shared(npack,group,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is,ie,js,je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy,n,k)
do nk = 1, npack*ksize
n = (nk-1)/ksize + 1
k = mod((nk-1), ksize) + 1
buffer_pos = group%pack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos + (k-1)*group%pack_size(n)
is = group%pack_is(n); ie = group%pack_ie(n)
js = group%pack_js(n); je = group%pack_je(n)
rotation = group%pack_rotation(n)
if( group%pack_type(n) == FIELD_S ) then
select case( rotation )
case(ZERO)
do l=1, group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
enddo
case( MINUS_NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
case( NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end select
else if( group%pack_type(n) == FIELD_X ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end if
case( NINETY )
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end if
end select ! select case( rotation(n) )
else if( group%pack_type(n) == FIELD_Y ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
case( MINUS_NINETY )
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
case( NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end if
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end if
end select ! select case( rotation(n) )
endif
enddo
endif
# 729 "../mpp/include/mpp_group_update.h" 2
call mpp_clock_end(nonblock_group_pack_clock)
call mpp_clock_begin(nonblock_group_send_clock)
do n = 1, nsend
msgsize = group%send_size(n)
if( msgsize .GT. 0 )then
buffer_pos = group%buffer_pos_send(n) + group%buffer_start_pos
to_pe = group%to_pe(n)
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1, &
request=group%request_send(n))
endif
enddo
call mpp_clock_end(nonblock_group_send_clock)
end subroutine mpp_start_group_update_r8
subroutine mpp_complete_group_update_r8(group, domain, d_type)
type(mpp_group_update_type), intent(inout) :: group
type(domain2D), intent(inout) :: domain
real(8), intent(in) :: d_type
integer :: nsend, nrecv, nscalar, nvector
integer :: k, buffer_pos, msgsize, pos, m, n, l
integer :: is, ie, js, je, dir, ksize, i, j
integer :: shift, gridtype, midpoint, flags_v
integer :: nunpack, rotation, buffer_start_pos, nk, isd
logical :: recv_y(8)
real(8) :: buffer(size(mpp_domains_stack_nonblock(:)))
real(8) :: field (group%is_s:group%ie_s,group%js_s:group%je_s, group%ksize_s)
real(8) :: fieldx(group%is_x:group%ie_x,group%js_x:group%je_x, group%ksize_v)
real(8) :: fieldy(group%is_y:group%ie_y,group%js_y:group%je_y, group%ksize_v)
pointer(ptr, buffer )
pointer(ptr_field, field)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
gridtype = group%gridtype
flags_v = group%flags_v
nscalar = group%nscalar
nvector = group%nvector
nrecv = group%nrecv
nsend = group%nsend
if(nscalar>0) then
ksize = group%ksize_s
else
ksize = group%ksize_v
endif
if(nvector > 0) recv_y = group%recv_y
ptr = LOC(mpp_domains_stack_nonblock)
if(num_nonblock_group_update < 1) call mpp_error(FATAL, &
'mpp_start_group_update must be called before calling mpp_end_group_update')
num_nonblock_group_update = num_nonblock_group_update - 1
complete_group_update_on = .true.
if(nrecv>0) then
call mpp_clock_begin(nonblock_group_wait_clock)
call mpp_sync_self(check=EVENT_RECV, request=group%request_recv(1:nrecv), &
msg_size=group%recv_size(1:nrecv), msg_type=group%type_recv(1:nrecv))
call mpp_clock_end(nonblock_group_wait_clock)
endif
!---unpack the buffer
nunpack = group%nunpack
call mpp_clock_begin(nonblock_group_unpk_clock)
buffer_start_pos = group%buffer_start_pos
# 1 "../mpp/include/group_update_unpack.inc" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
if( group%k_loop_inside ) then
!$OMP parallel do default(none) shared(nunpack,group,nscalar,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is, ie, js, je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy, n,k )
do n = nunpack, 1, -1
buffer_pos = group%unpack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos
is = group%unpack_is(n); ie = group%unpack_ie(n)
js = group%unpack_js(n); je = group%unpack_je(n)
if( group%unpack_type(n) == FIELD_S ) then
do l=1,nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = buffer(pos)
end do
end do
end do
end do
else if( group%unpack_type(n) == FIELD_X ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k) = buffer(pos)
end do
end do
end do
end do
else if( group%unpack_type(n) == FIELD_Y ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k) = buffer(pos)
end do
end do
end do
end do
endif
enddo
else
!$OMP parallel do default(none) shared(nunpack,group,nscalar,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is, ie, js, je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy,n,k)
do nk = nunpack*ksize, 1, -1
n = (nk-1)/ksize + 1
k = mod((nk-1), ksize) + 1
buffer_pos = group%unpack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos + (k-1)*group%unpack_size(n)
is = group%unpack_is(n); ie = group%unpack_ie(n)
js = group%unpack_js(n); je = group%unpack_je(n)
if( group%unpack_type(n) == FIELD_S ) then
do l=1,nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = buffer(pos)
end do
end do
end do
else if( group%unpack_type(n) == FIELD_X ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k) = buffer(pos)
end do
end do
end do
else if( group%unpack_type(n) == FIELD_Y ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k) = buffer(pos)
end do
end do
end do
endif
enddo
endif
# 797 "../mpp/include/mpp_group_update.h" 2
call mpp_clock_end(nonblock_group_unpk_clock)
! ---northern boundary fold
shift = 0
if(domain%symmetry) shift = 1
if( nvector >0 .AND. BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(flags_v,SCALAR_BIT)) )then
j = domain%y(1)%global%end+shift
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
j = domain%y(1)%global%end+shift
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
if( .NOT. domain%symmetry ) is = is - 1
do i = is ,ie, midpoint
if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
do l=1,nvector
ptr_fieldx = group%addrs_x(l)
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%end+shift
if ( recv_y(7) .OR. recv_y(5) ) then
select case(gridtype)
case(BGRID_NE)
if(domain%symmetry) then
is = domain%x(1)%global%begin
else
is = domain%x(1)%global%begin - 1
end if
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north BGRID_NE west edge ubound error.' )
do l=1,nvector
ptr_fieldx = group%addrs_x(l)
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = domain%x(1)%data%begin,is-1
fieldx(i,j,k) = fieldx(2*is-i,j,k)
fieldy(i,j,k) = fieldy(2*is-i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
isd = domain%x(1)%compute%begin - group%whalo_v
if( is.GT.isd)then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north CGRID_NE west edge ubound error.' )
do l=1,nvector
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = isd,is-1
fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
ie = domain%x(1)%compute%end+group%ehalo_v
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,nvector
ptr_fieldx = group%addrs_x(l)
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,nvector
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,SOUTH) .OR. BTEST(domain%fold,WEST) .OR. BTEST(domain%fold,EAST) ) then
call mpp_error(FATAL, "MPP_COMPLETE_GROUP_UPDATE: this interface does not support folded_south, " // &
"folded_west of folded_east, contact developer")
endif
if(nsend>0) then
call mpp_clock_begin(nonblock_group_wait_clock)
call mpp_sync_self(check=EVENT_SEND, request=group%request_send(1:nsend) )
call mpp_clock_end(nonblock_group_wait_clock)
endif
if( num_nonblock_group_update == 0) then
nonblock_group_buffer_pos = 0
endif
end subroutine mpp_complete_group_update_r8
subroutine mpp_reset_group_update_field_r8_2d(group, field)
type(mpp_group_update_type), intent(inout) :: group
real(8), intent(in) :: field(:,:)
group%reset_index_s = group%reset_index_s + 1
if(group%reset_index_s > group%nscalar) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_2D_: group%reset_index_s > group%nscalar")
if(size(field,1) .NE. group%isize_s .OR. size(field,2) .NE. group%jsize_s .OR. group%ksize_s .NE. 1) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_2D_: size of field does not match the size stored in group")
group%addrs_s(group%reset_index_s) = LOC(field)
end subroutine mpp_reset_group_update_field_r8_2d
subroutine mpp_reset_group_update_field_r8_3d(group, field)
type(mpp_group_update_type), intent(inout) :: group
real(8), intent(in) :: field(:,:,:)
group%reset_index_s = group%reset_index_s + 1
if(group%reset_index_s > group%nscalar) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_3D_: group%reset_index_s > group%nscalar")
if(size(field,1) .NE. group%isize_s .OR. size(field,2) .NE. group%jsize_s .OR. size(field,3) .NE. group%ksize_s) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_3D_: size of field does not match the size stored in group")
group%addrs_s(group%reset_index_s) = LOC(field)
end subroutine mpp_reset_group_update_field_r8_3d
subroutine mpp_reset_group_update_field_r8_4d(group, field)
type(mpp_group_update_type), intent(inout) :: group
real(8), intent(in) :: field(:,:,:,:)
group%reset_index_s = group%reset_index_s + 1
if(group%reset_index_s > group%nscalar) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_4D_: group%reset_index_s > group%nscalar")
if(size(field,1) .NE. group%isize_s .OR. size(field,2) .NE. group%jsize_s .OR. &
size(field,3)*size(field,4) .NE. group%ksize_s) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_4D_: size of field does not match the size stored in group")
group%addrs_s(group%reset_index_s) = LOC(field)
end subroutine mpp_reset_group_update_field_r8_4d
subroutine mpp_reset_group_update_field_r8_2dv(group, fieldx, fieldy)
type(mpp_group_update_type), intent(inout) :: group
real(8), intent(in) :: fieldx(:,:), fieldy(:,:)
integer :: indx
group%reset_index_v = group%reset_index_v + 1
if(group%reset_index_v > group%nvector) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_2D_V_: group%reset_index_v > group%nvector")
if(size(fieldx,1) .NE. group%isize_x .OR. size(fieldx,2) .NE. group%jsize_x .OR. group%ksize_v .NE. 1) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_2D_V_: size of fieldx does not match the size stored in group")
if(size(fieldy,1) .NE. group%isize_y .OR. size(fieldy,2) .NE. group%jsize_y ) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_2D_V_: size of fieldy does not match the size stored in group")
group%addrs_x(group%reset_index_v) = LOC(fieldx)
group%addrs_y(group%reset_index_v) = LOC(fieldy)
end subroutine mpp_reset_group_update_field_r8_2dv
subroutine mpp_reset_group_update_field_r8_3dv(group, fieldx, fieldy)
type(mpp_group_update_type), intent(inout) :: group
real(8), intent(in) :: fieldx(:,:,:), fieldy(:,:,:)
integer :: indx
group%reset_index_v = group%reset_index_v + 1
if(group%reset_index_v > group%nvector) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_3D_V_: group%reset_index_v > group%nvector")
if(size(fieldx,1) .NE. group%isize_x .OR. size(fieldx,2) .NE. group%jsize_x .OR. size(fieldx,3) .NE. group%ksize_v) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_3D_V_: size of fieldx does not match the size stored in group")
if(size(fieldy,1) .NE. group%isize_y .OR. size(fieldy,2) .NE. group%jsize_y .OR. size(fieldy,3) .NE. group%ksize_v) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_3D_V_: size of fieldy does not match the size stored in group")
group%addrs_x(group%reset_index_v) = LOC(fieldx)
group%addrs_y(group%reset_index_v) = LOC(fieldy)
end subroutine mpp_reset_group_update_field_r8_3dv
subroutine mpp_reset_group_update_field_r8_4dv(group, fieldx, fieldy)
type(mpp_group_update_type), intent(inout) :: group
real(8), intent(in) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
integer :: indx
group%reset_index_v = group%reset_index_v + 1
if(group%reset_index_v > group%nvector) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_4D_V_: group%reset_index_v > group%nvector")
if(size(fieldx,1) .NE. group%isize_x .OR. size(fieldx,2) .NE. group%jsize_x .OR. &
size(fieldx,3)*size(fieldx,4) .NE. group%ksize_v) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_4D_V_: size of fieldx does not match the size stored in group")
if(size(fieldy,1) .NE. group%isize_y .OR. size(fieldy,2) .NE. group%jsize_y .OR. &
size(fieldy,3)*size(fieldy,4) .NE. group%ksize_v) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_4D_V_: size of fieldy does not match the size stored in group")
group%addrs_x(group%reset_index_v) = LOC(fieldx)
group%addrs_y(group%reset_index_v) = LOC(fieldy)
end subroutine mpp_reset_group_update_field_r8_4dv
# 1851 "../mpp/include/mpp_domains_misc.inc" 2
# 1 "../mpp/include/mpp_group_update.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
! -*-f90-*-
subroutine mpp_create_group_update_r4_2d(group, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo)
type(mpp_group_update_type), intent(inout) :: group
real(4), intent(inout) :: field(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
real(4) :: field3D(size(field,1),size(field,2),1)
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_create_group_update(group, field3D, domain, flags, position, whalo, ehalo, shalo, nhalo)
return
end subroutine mpp_create_group_update_r4_2d
subroutine mpp_create_group_update_r4_3d(group, field, domain, flags, position, whalo, ehalo, shalo, nhalo)
type(mpp_group_update_type), intent(inout) :: group
real(4), intent(inout) :: field(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
integer :: update_position, update_whalo, update_ehalo, update_shalo, update_nhalo
integer :: update_flags, isize, jsize, ksize
integer :: nscalar
character(len=3) :: text
logical :: set_mismatch, update_edge_only
logical :: recv(8)
if(group%initialized) then
call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_3D: group is already initialized")
endif
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
update_position = CENTER
!--- when there is NINETY or MINUS_NINETY rotation for some contact, the salar data can not be on E or N-cell,
if(present(position)) then
update_position = position
if(domain%rotated_ninety .AND. ( position == EAST .OR. position == NORTH ) ) &
call mpp_error(FATAL, 'MPP_CREATE_GROUP_UPDATE_3D: hen there is NINETY or MINUS_NINETY rotation, ' // &
'can not use scalar version update_domain for data on E or N-cell' )
end if
if( domain%max_ntile_pe > 1 ) then
call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE: do not support multiple tile per processor')
endif
update_flags = XUPDATE+YUPDATE
if(present(flags)) update_flags = flags
group%nscalar = group%nscalar + 1
nscalar = group%nscalar
if( nscalar > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
isize = size(field,1); jsize=size(field,2); ksize = size(field,3)
group%addrs_s(nscalar) = LOC(field)
if( group%nscalar == 1 ) then
group%flags_s = update_flags
group%whalo_s = update_whalo
group%ehalo_s = update_ehalo
group%shalo_s = update_shalo
group%nhalo_s = update_nhalo
group%position = update_position
group%isize_s = isize
group%jsize_s = jsize
group%ksize_s = ksize
call mpp_get_memory_domain(domain, group%is_s, group%ie_s, group%js_s, group%je_s, position=position)
update_edge_only = BTEST(update_flags, EDGEONLY)
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
recv(2) = .false.
recv(4) = .false.
recv(6) = .false.
recv(8) = .false.
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
group%recv_s = recv
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (group%flags_s .NE. update_flags)
set_mismatch = set_mismatch .OR. (group%whalo_s .NE. update_whalo)
set_mismatch = set_mismatch .OR. (group%ehalo_s .NE. update_ehalo)
set_mismatch = set_mismatch .OR. (group%shalo_s .NE. update_shalo)
set_mismatch = set_mismatch .OR. (group%nhalo_s .NE. update_nhalo)
set_mismatch = set_mismatch .OR. (group%position .NE. update_position)
set_mismatch = set_mismatch .OR. (group%isize_s .NE. isize)
set_mismatch = set_mismatch .OR. (group%jsize_s .NE. jsize)
set_mismatch = set_mismatch .OR. (group%ksize_s .NE. ksize)
if(set_mismatch)then
write( text,'(i2)' ) nscalar
call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE_3D: Incompatible field at count '//text//' for group update.' )
endif
endif
return
end subroutine mpp_create_group_update_r4_3d
subroutine mpp_create_group_update_r4_4d(group, field, domain, flags, position, &
whalo, ehalo, shalo, nhalo)
type(mpp_group_update_type), intent(inout) :: group
real(4), intent(inout) :: field(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
real(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
pointer( ptr, field3D )
ptr = LOC(field)
call mpp_create_group_update(group, field3D, domain, flags, position, whalo, ehalo, shalo, nhalo)
return
end subroutine mpp_create_group_update_r4_4d
subroutine mpp_create_group_update_r4_2dv( group, fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo)
type(mpp_group_update_type), intent(inout) :: group
real(4), intent(inout) :: fieldx(:,:), fieldy(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
real(4) :: field3Dx(size(fieldx,1),size(fieldx,2),1)
real(4) :: field3Dy(size(fieldy,1),size(fieldy,2),1)
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_create_group_update(group, field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo)
return
end subroutine mpp_create_group_update_r4_2dv
subroutine mpp_create_group_update_r4_3dv( group, fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo)
type(mpp_group_update_type), intent(inout) :: group
real(4), intent(inout) :: fieldx(:,:,:), fieldy(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
integer :: update_whalo, update_ehalo, update_shalo, update_nhalo
integer :: update_flags, isize_x, jsize_x, ksize_x, isize_y, jsize_y, ksize_y
integer :: nvector, update_gridtype, position_x, position_y
character(len=3) :: text
logical :: set_mismatch, update_edge_only
logical :: recv(8)
if(group%initialized) then
call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_V: group is already initialized")
endif
if(present(whalo)) then
update_whalo = whalo
if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_V: "// &
"optional argument whalo should not be larger than the whalo when define domain.")
else
update_whalo = domain%whalo
end if
if(present(ehalo)) then
update_ehalo = ehalo
if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_V: "// &
"optional argument ehalo should not be larger than the ehalo when define domain.")
else
update_ehalo = domain%ehalo
end if
if(present(shalo)) then
update_shalo = shalo
if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_V: "// &
"optional argument shalo should not be larger than the shalo when define domain.")
else
update_shalo = domain%shalo
end if
if(present(nhalo)) then
update_nhalo = nhalo
if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_V: "// &
"optional argument nhalo should not be larger than the nhalo when define domain.")
else
update_nhalo = domain%nhalo
end if
update_gridtype = AGRID
if(PRESENT(gridtype)) update_gridtype = gridtype
if( domain%max_ntile_pe > 1 ) then
call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE_V: do not support multiple tile per processor')
endif
update_flags = XUPDATE+YUPDATE !default
if( PRESENT(flags) )update_flags = flags
! The following test is so that SCALAR_PAIR can be used alone with the
! same default update pattern as without.
if (BTEST(update_flags,SCALAR_BIT)) then
if (.NOT.(BTEST(update_flags,WEST) .OR. BTEST(update_flags,EAST) &
.OR. BTEST(update_flags,NORTH) .OR. BTEST(update_flags,SOUTH))) &
update_flags = update_flags + XUPDATE+YUPDATE !default with SCALAR_PAIR
end if
group%nvector = group%nvector + 1
nvector = group%nvector
if( nvector > MAX_DOMAIN_FIELDS)then
write( text,'(i2)' ) MAX_DOMAIN_FIELDS
call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
endif
isize_x = size(fieldx,1); jsize_x = size(fieldx,2); ksize_x = size(fieldx,3)
isize_y = size(fieldy,1); jsize_y = size(fieldy,2); ksize_y = size(fieldy,3)
if(ksize_x .NE. ksize_y) call mpp_error(FATAL, &
'MPP_CREATE_GROUP_UPDATE_V: mismatch of ksize between fieldx and fieldy')
group%addrs_x(nvector) = LOC(fieldx)
group%addrs_y(nvector) = LOC(fieldy)
if( group%nvector == 1 ) then
group%flags_v = update_flags
group%whalo_v = update_whalo
group%ehalo_v = update_ehalo
group%shalo_v = update_shalo
group%nhalo_v = update_nhalo
group%gridtype = update_gridtype
group%isize_x = isize_x
group%jsize_x = jsize_x
group%isize_y = isize_y
group%jsize_y = jsize_y
group%ksize_v = ksize_x
update_edge_only = BTEST(update_flags, EDGEONLY)
group%nonsym_edge = .false.
recv(1) = BTEST(update_flags,EAST)
recv(3) = BTEST(update_flags,SOUTH)
recv(5) = BTEST(update_flags,WEST)
recv(7) = BTEST(update_flags,NORTH)
if( update_edge_only ) then
recv(2) = .false.
recv(4) = .false.
recv(6) = .false.
recv(8) = .false.
if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
recv(1) = .true.
recv(3) = .true.
recv(5) = .true.
recv(7) = .true.
endif
else
recv(2) = recv(1) .AND. recv(3)
recv(4) = recv(3) .AND. recv(5)
recv(6) = recv(5) .AND. recv(7)
recv(8) = recv(7) .AND. recv(1)
endif
group%recv_x = recv
group%recv_y = recv
!--- NONSYMEDGE is only for non-symmetric domain and CGRID/DGRID
if( .not. domain%symmetry .and. (update_gridtype==CGRID_NE .OR. update_gridtype==DGRID_NE)) then
group%nonsym_edge = BTEST(update_flags, NONSYMEDGE)
endif
if( group%nonsym_edge ) then
group%recv_x(2:8:2) = .false.
group%recv_y(2:8:2) = .false.
if(update_gridtype==CGRID_NE) then
group%recv_x(3) = .false.
group%recv_x(7) = .false.
group%recv_y(1) = .false.
group%recv_y(5) = .false.
else if(update_gridtype==DGRID_NE) then
group%recv_x(1) = .false.
group%recv_x(5) = .false.
group%recv_y(3) = .false.
group%recv_y(7) = .false.
endif
endif
select case(group%gridtype)
case (AGRID)
position_x = CENTER
position_y = CENTER
case (BGRID_NE, BGRID_SW)
position_x = CORNER
position_y = CORNER
case (CGRID_NE, CGRID_SW)
position_x = EAST
position_y = NORTH
case (DGRID_NE, DGRID_SW)
position_x = NORTH
position_y = EAST
case default
call mpp_error(FATAL, "mpp_CREATE_GROUP_UPDATE_V: invalid value of gridtype")
end select
call mpp_get_memory_domain(domain, group%is_x, group%ie_x, group%js_x, group%je_x, position=position_x)
call mpp_get_memory_domain(domain, group%is_y, group%ie_y, group%js_y, group%je_y, position=position_y)
else
set_mismatch = .false.
set_mismatch = set_mismatch .OR. (group%flags_v .NE. update_flags)
set_mismatch = set_mismatch .OR. (group%whalo_v .NE. update_whalo)
set_mismatch = set_mismatch .OR. (group%ehalo_v .NE. update_ehalo)
set_mismatch = set_mismatch .OR. (group%shalo_v .NE. update_shalo)
set_mismatch = set_mismatch .OR. (group%nhalo_v .NE. update_nhalo)
set_mismatch = set_mismatch .OR. (group%gridtype .NE. update_gridtype)
set_mismatch = set_mismatch .OR. (group%isize_x .NE. isize_x)
set_mismatch = set_mismatch .OR. (group%jsize_x .NE. jsize_x)
set_mismatch = set_mismatch .OR. (group%isize_y .NE. isize_y)
set_mismatch = set_mismatch .OR. (group%jsize_y .NE. jsize_y)
set_mismatch = set_mismatch .OR. (group%ksize_v .NE. ksize_x)
if(set_mismatch)then
write( text,'(i2)' ) nvector
call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE_V: Incompatible field at count '//text//' for group update.' )
endif
endif
return
end subroutine mpp_create_group_update_r4_3dv
subroutine mpp_create_group_update_r4_4dv( group, fieldx, fieldy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo)
type(mpp_group_update_type), intent(inout) :: group
real(4), intent(inout) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
real(4) :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4))
real(4) :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4))
pointer( ptrx, field3Dx )
pointer( ptry, field3Dy )
ptrx = LOC(fieldx)
ptry = LOC(fieldy)
call mpp_create_group_update(group, field3Dx, field3Dy, domain, flags, gridtype, &
whalo, ehalo, shalo, nhalo)
return
end subroutine mpp_create_group_update_r4_4dv
subroutine mpp_do_group_update_r4(group, domain, d_type)
type(mpp_group_update_type), intent(inout) :: group
type(domain2D), intent(inout) :: domain
real(4), intent(in) :: d_type
integer :: nscalar, nvector, nlist
logical :: recv_y(8)
integer :: nsend, nrecv, flags_v
integer :: msgsize
integer :: from_pe, to_pe, buffer_pos, pos
integer :: ksize, is, ie, js, je
integer :: n, l, m, i, j, k, buffer_start_pos, nk
integer :: shift, gridtype, midpoint
integer :: npack, nunpack, rotation, isd
character(len=8) :: text
real(4) :: buffer(mpp_domains_stack_size)
real(4) :: field (group%is_s:group%ie_s,group%js_s:group%je_s, group%ksize_s)
real(4) :: fieldx(group%is_x:group%ie_x,group%js_x:group%je_x, group%ksize_v)
real(4) :: fieldy(group%is_y:group%ie_y,group%js_y:group%je_y, group%ksize_v)
pointer(ptr, buffer )
pointer(ptr_field, field)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
nscalar = group%nscalar
nvector = group%nvector
nlist = size(domain%list(:))
gridtype = group%gridtype
!--- ksize_s must equal ksize_v
if(nvector > 0 .AND. nscalar > 0) then
if(group%ksize_s .NE. group%ksize_v) then
call mpp_error(FATAL, "MPP_DO_GROUP_UPDATE: ksize_s and ksize_v are not equal")
endif
ksize = group%ksize_s
else if (nscalar > 0) then
ksize = group%ksize_s
else if (nvector > 0) then
ksize = group%ksize_v
else
call mpp_error(FATAL, "MPP_DO_GROUP_UPDATE: nscalar and nvector are all 0")
endif
if(nvector > 0) recv_y = group%recv_y
ptr = LOC(mpp_domains_stack)
!--- set reset_index_s and reset_index_v to 0
group%reset_index_s = 0
group%reset_index_v = 0
if(.not. group%initialized) call set_group_update(group,domain)
nrecv = group%nrecv
nsend = group%nsend
!---pre-post receive.
call mpp_clock_begin(group_recv_clock)
do m = 1, nrecv
msgsize = group%recv_size(m)
from_pe = group%from_pe(m)
if( msgsize .GT. 0 )then
buffer_pos = group%buffer_pos_recv(m)
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.false., &
tag=COMM_TAG_1)
end if
end do
!pack the data
call mpp_clock_end(group_recv_clock)
flags_v = group%flags_v
npack = group%npack
call mpp_clock_begin(group_pack_clock)
!pack the data
buffer_start_pos = 0
# 1 "../mpp/include/group_update_pack.inc" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
if( group%k_loop_inside ) then
!$OMP parallel do default(none) shared(npack,group,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is,ie,js,je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy,n,k)
do n = 1, npack
buffer_pos = group%pack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos
is = group%pack_is(n); ie = group%pack_ie(n)
js = group%pack_js(n); je = group%pack_je(n)
rotation = group%pack_rotation(n)
if( group%pack_type(n) == FIELD_S ) then
select case( rotation )
case(ZERO)
do l=1, group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
enddo
enddo
case( MINUS_NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
else if( group%pack_type(n) == FIELD_X ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
case( NINETY )
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
end select ! select case( rotation(n) )
else if( group%pack_type(n) == FIELD_Y ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case( NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select ! select case( rotation(n) )
endif
enddo
else
!$OMP parallel do default(none) shared(npack,group,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is,ie,js,je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy,n,k)
do nk = 1, npack*ksize
n = (nk-1)/ksize + 1
k = mod((nk-1), ksize) + 1
buffer_pos = group%pack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos + (k-1)*group%pack_size(n)
is = group%pack_is(n); ie = group%pack_ie(n)
js = group%pack_js(n); je = group%pack_je(n)
rotation = group%pack_rotation(n)
if( group%pack_type(n) == FIELD_S ) then
select case( rotation )
case(ZERO)
do l=1, group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
enddo
case( MINUS_NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
case( NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end select
else if( group%pack_type(n) == FIELD_X ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end if
case( NINETY )
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end if
end select ! select case( rotation(n) )
else if( group%pack_type(n) == FIELD_Y ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
case( MINUS_NINETY )
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
case( NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end if
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end if
end select ! select case( rotation(n) )
endif
enddo
endif
# 498 "../mpp/include/mpp_group_update.h" 2
call mpp_clock_end(group_pack_clock)
call mpp_clock_begin(group_send_clock)
do n = 1, nsend
msgsize = group%send_size(n)
if( msgsize .GT. 0 )then
buffer_pos = group%buffer_pos_send(n)
to_pe = group%to_pe(n)
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1)
endif
enddo
call mpp_clock_end(group_send_clock)
if(nrecv>0) then
call mpp_clock_begin(group_wait_clock)
call mpp_sync_self(check=EVENT_RECV)
call mpp_clock_end(group_wait_clock)
endif
!---unpack the buffer
nunpack = group%nunpack
call mpp_clock_begin(group_unpk_clock)
# 1 "../mpp/include/group_update_unpack.inc" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
if( group%k_loop_inside ) then
!$OMP parallel do default(none) shared(nunpack,group,nscalar,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is, ie, js, je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy, n,k )
do n = nunpack, 1, -1
buffer_pos = group%unpack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos
is = group%unpack_is(n); ie = group%unpack_ie(n)
js = group%unpack_js(n); je = group%unpack_je(n)
if( group%unpack_type(n) == FIELD_S ) then
do l=1,nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = buffer(pos)
end do
end do
end do
end do
else if( group%unpack_type(n) == FIELD_X ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k) = buffer(pos)
end do
end do
end do
end do
else if( group%unpack_type(n) == FIELD_Y ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k) = buffer(pos)
end do
end do
end do
end do
endif
enddo
else
!$OMP parallel do default(none) shared(nunpack,group,nscalar,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is, ie, js, je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy,n,k)
do nk = nunpack*ksize, 1, -1
n = (nk-1)/ksize + 1
k = mod((nk-1), ksize) + 1
buffer_pos = group%unpack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos + (k-1)*group%unpack_size(n)
is = group%unpack_is(n); ie = group%unpack_ie(n)
js = group%unpack_js(n); je = group%unpack_je(n)
if( group%unpack_type(n) == FIELD_S ) then
do l=1,nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = buffer(pos)
end do
end do
end do
else if( group%unpack_type(n) == FIELD_X ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k) = buffer(pos)
end do
end do
end do
else if( group%unpack_type(n) == FIELD_Y ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k) = buffer(pos)
end do
end do
end do
endif
enddo
endif
# 521 "../mpp/include/mpp_group_update.h" 2
call mpp_clock_end(group_unpk_clock)
! ---northern boundary fold
shift = 0
if(domain%symmetry) shift = 1
if( nvector >0 .AND. BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(flags_v,SCALAR_BIT)) )then
j = domain%y(1)%global%end+shift
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
j = domain%y(1)%global%end+shift
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
if( .NOT. domain%symmetry ) is = is - 1
do i = is ,ie, midpoint
if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
do l=1,nvector
ptr_fieldx = group%addrs_x(l)
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%end+shift
if ( recv_y(7) .OR. recv_y(5) ) then
select case(gridtype)
case(BGRID_NE)
if(domain%symmetry) then
is = domain%x(1)%global%begin
else
is = domain%x(1)%global%begin - 1
end if
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north BGRID_NE west edge ubound error.' )
do l=1,nvector
ptr_fieldx = group%addrs_x(l)
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = domain%x(1)%data%begin,is-1
fieldx(i,j,k) = fieldx(2*is-i,j,k)
fieldy(i,j,k) = fieldy(2*is-i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
isd = domain%x(1)%compute%begin - group%whalo_v
if( is.GT.isd )then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north CGRID_NE west edge ubound error.' )
do l=1,nvector
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = isd,is-1
fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
ie = domain%x(1)%compute%end+group%ehalo_v
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,nvector
ptr_fieldx = group%addrs_x(l)
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,nvector
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,SOUTH) .OR. BTEST(domain%fold,WEST) .OR. BTEST(domain%fold,EAST) ) then
call mpp_error(FATAL, "MPP_DO_GROUP_UPDATE: this interface does not support folded_south, " // &
"folded_west of folded_east, contact developer")
endif
if(nsend>0) then
call mpp_clock_begin(group_wait_clock)
call mpp_sync_self( )
call mpp_clock_end(group_wait_clock)
endif
end subroutine mpp_do_group_update_r4
subroutine mpp_start_group_update_r4(group, domain, d_type, reuse_buffer)
type(mpp_group_update_type), intent(inout) :: group
type(domain2D), intent(inout) :: domain
real(4), intent(in) :: d_type
logical, optional, intent(in) :: reuse_buffer
integer :: nscalar, nvector
integer :: nsend, nrecv, flags_v
integer :: msgsize, npack, rotation
integer :: from_pe, to_pe, buffer_pos, pos
integer :: ksize, is, ie, js, je
integer :: n, l, m, i, j, k, buffer_start_pos, nk
logical :: reuse_buf_pos
character(len=8) :: text
real(4) :: buffer(size(mpp_domains_stack_nonblock(:)))
real(4) :: field (group%is_s:group%ie_s,group%js_s:group%je_s, group%ksize_s)
real(4) :: fieldx(group%is_x:group%ie_x,group%js_x:group%je_x, group%ksize_v)
real(4) :: fieldy(group%is_y:group%ie_y,group%js_y:group%je_y, group%ksize_v)
pointer( ptr, buffer )
pointer(ptr_field, field)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
nscalar = group%nscalar
nvector = group%nvector
if(nscalar>0) then
ksize = group%ksize_s
else
ksize = group%ksize_v
endif
!--- set reset_index_s and reset_index_v to 0
group%reset_index_s = 0
group%reset_index_v = 0
reuse_buf_pos = .FALSE.
if (PRESENT(reuse_buffer)) reuse_buf_pos = reuse_buffer
if (.not. group%initialized) then
call set_group_update(group,domain)
endif
if (.not. reuse_buf_pos) then
group%buffer_start_pos = nonblock_group_buffer_pos
nonblock_group_buffer_pos = nonblock_group_buffer_pos + group%tot_msgsize
mpp_domains_stack_hwm = nonblock_group_buffer_pos + 1
if( mpp_domains_stack_hwm .GT. mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'set_group_update: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
else if( group%buffer_start_pos < 0 ) then
call mpp_error(FATAL, "MPP_START_GROUP_UPDATE: group%buffer_start_pos is not set")
endif
nrecv = group%nrecv
nsend = group%nsend
ptr = LOC(mpp_domains_stack_nonblock)
! Make sure it is not in the middle of the old version of non-blocking halo update.
if(num_update>0) call mpp_error(FATAL, "MPP_START_GROUP_UPDATE: can not be called in the middle of "// &
"mpp_start_update_domains/mpp_complete_update_domains call")
num_nonblock_group_update = num_nonblock_group_update + 1
!---pre-post receive.
call mpp_clock_begin(nonblock_group_recv_clock)
do m = 1, nrecv
msgsize = group%recv_size(m)
from_pe = group%from_pe(m)
if( msgsize .GT. 0 )then
buffer_pos = group%buffer_pos_recv(m) + group%buffer_start_pos
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.false., &
tag=COMM_TAG_1, request=group%request_recv(m))
group%type_recv(m) = MPI_REAL4
end if
end do
call mpp_clock_end(nonblock_group_recv_clock)
flags_v = group%flags_v
!pack the data
call mpp_clock_begin(nonblock_group_pack_clock)
npack = group%npack
buffer_start_pos = group%buffer_start_pos
# 1 "../mpp/include/group_update_pack.inc" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
if( group%k_loop_inside ) then
!$OMP parallel do default(none) shared(npack,group,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is,ie,js,je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy,n,k)
do n = 1, npack
buffer_pos = group%pack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos
is = group%pack_is(n); ie = group%pack_ie(n)
js = group%pack_js(n); je = group%pack_je(n)
rotation = group%pack_rotation(n)
if( group%pack_type(n) == FIELD_S ) then
select case( rotation )
case(ZERO)
do l=1, group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
enddo
enddo
case( MINUS_NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end do
end select
else if( group%pack_type(n) == FIELD_X ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
case( NINETY )
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
end select ! select case( rotation(n) )
else if( group%pack_type(n) == FIELD_Y ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
case( MINUS_NINETY )
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
case( NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end do
end if
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end do
end if
end select ! select case( rotation(n) )
endif
enddo
else
!$OMP parallel do default(none) shared(npack,group,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is,ie,js,je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy,n,k)
do nk = 1, npack*ksize
n = (nk-1)/ksize + 1
k = mod((nk-1), ksize) + 1
buffer_pos = group%pack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos + (k-1)*group%pack_size(n)
is = group%pack_is(n); ie = group%pack_ie(n)
js = group%pack_js(n); je = group%pack_je(n)
rotation = group%pack_rotation(n)
if( group%pack_type(n) == FIELD_S ) then
select case( rotation )
case(ZERO)
do l=1, group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
enddo
case( MINUS_NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
case( NINETY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
do l=1,group%nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = field(i,j,k)
end do
end do
end do
end select
else if( group%pack_type(n) == FIELD_X ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
case( MINUS_NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end if
case( NINETY )
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end if
end select ! select case( rotation(n) )
else if( group%pack_type(n) == FIELD_Y ) then
select case( rotation )
case(ZERO)
do l=1, nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = js, je
do i = is, ie
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
case( MINUS_NINETY )
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do i = is, ie
do j = je, js, -1
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
case( NINETY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1, nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = fieldx(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do i = ie, is, -1
do j = js, je
pos = pos + 1
buffer(pos) = -fieldx(i,j,k)
end do
end do
end do
end if
case( ONE_HUNDRED_EIGHTY )
if( BTEST(group%flags_v,SCALAR_BIT) ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = fieldy(i,j,k)
end do
end do
end do
else
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = je, js, -1
do i = ie, is, -1
pos = pos + 1
buffer(pos) = -fieldy(i,j,k)
end do
end do
end do
end if
end select ! select case( rotation(n) )
endif
enddo
endif
# 729 "../mpp/include/mpp_group_update.h" 2
call mpp_clock_end(nonblock_group_pack_clock)
call mpp_clock_begin(nonblock_group_send_clock)
do n = 1, nsend
msgsize = group%send_size(n)
if( msgsize .GT. 0 )then
buffer_pos = group%buffer_pos_send(n) + group%buffer_start_pos
to_pe = group%to_pe(n)
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1, &
request=group%request_send(n))
endif
enddo
call mpp_clock_end(nonblock_group_send_clock)
end subroutine mpp_start_group_update_r4
subroutine mpp_complete_group_update_r4(group, domain, d_type)
type(mpp_group_update_type), intent(inout) :: group
type(domain2D), intent(inout) :: domain
real(4), intent(in) :: d_type
integer :: nsend, nrecv, nscalar, nvector
integer :: k, buffer_pos, msgsize, pos, m, n, l
integer :: is, ie, js, je, dir, ksize, i, j
integer :: shift, gridtype, midpoint, flags_v
integer :: nunpack, rotation, buffer_start_pos, nk, isd
logical :: recv_y(8)
real(4) :: buffer(size(mpp_domains_stack_nonblock(:)))
real(4) :: field (group%is_s:group%ie_s,group%js_s:group%je_s, group%ksize_s)
real(4) :: fieldx(group%is_x:group%ie_x,group%js_x:group%je_x, group%ksize_v)
real(4) :: fieldy(group%is_y:group%ie_y,group%js_y:group%je_y, group%ksize_v)
pointer(ptr, buffer )
pointer(ptr_field, field)
pointer(ptr_fieldx, fieldx)
pointer(ptr_fieldy, fieldy)
gridtype = group%gridtype
flags_v = group%flags_v
nscalar = group%nscalar
nvector = group%nvector
nrecv = group%nrecv
nsend = group%nsend
if(nscalar>0) then
ksize = group%ksize_s
else
ksize = group%ksize_v
endif
if(nvector > 0) recv_y = group%recv_y
ptr = LOC(mpp_domains_stack_nonblock)
if(num_nonblock_group_update < 1) call mpp_error(FATAL, &
'mpp_start_group_update must be called before calling mpp_end_group_update')
num_nonblock_group_update = num_nonblock_group_update - 1
complete_group_update_on = .true.
if(nrecv>0) then
call mpp_clock_begin(nonblock_group_wait_clock)
call mpp_sync_self(check=EVENT_RECV, request=group%request_recv(1:nrecv), &
msg_size=group%recv_size(1:nrecv), msg_type=group%type_recv(1:nrecv))
call mpp_clock_end(nonblock_group_wait_clock)
endif
!---unpack the buffer
nunpack = group%nunpack
call mpp_clock_begin(nonblock_group_unpk_clock)
buffer_start_pos = group%buffer_start_pos
# 1 "../mpp/include/group_update_unpack.inc" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
if( group%k_loop_inside ) then
!$OMP parallel do default(none) shared(nunpack,group,nscalar,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is, ie, js, je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy, n,k )
do n = nunpack, 1, -1
buffer_pos = group%unpack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos
is = group%unpack_is(n); ie = group%unpack_ie(n)
js = group%unpack_js(n); je = group%unpack_je(n)
if( group%unpack_type(n) == FIELD_S ) then
do l=1,nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = buffer(pos)
end do
end do
end do
end do
else if( group%unpack_type(n) == FIELD_X ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k) = buffer(pos)
end do
end do
end do
end do
else if( group%unpack_type(n) == FIELD_Y ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do k = 1, ksize
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k) = buffer(pos)
end do
end do
end do
end do
endif
enddo
else
!$OMP parallel do default(none) shared(nunpack,group,nscalar,ptr,nvector,ksize,buffer_start_pos) &
!$OMP private(buffer_pos,pos,m,is, ie, js, je,rotation, &
!$OMP ptr_field, ptr_fieldx, ptr_fieldy,n,k)
do nk = nunpack*ksize, 1, -1
n = (nk-1)/ksize + 1
k = mod((nk-1), ksize) + 1
buffer_pos = group%unpack_buffer_pos(n) + buffer_start_pos
pos = buffer_pos + (k-1)*group%unpack_size(n)
is = group%unpack_is(n); ie = group%unpack_ie(n)
js = group%unpack_js(n); je = group%unpack_je(n)
if( group%unpack_type(n) == FIELD_S ) then
do l=1,nscalar ! loop over number of fields
ptr_field = group%addrs_s(l)
do j = js, je
do i = is, ie
pos = pos + 1
field(i,j,k) = buffer(pos)
end do
end do
end do
else if( group%unpack_type(n) == FIELD_X ) then
do l=1,nvector ! loop over number of fields
ptr_fieldx = group%addrs_x(l)
do j = js, je
do i = is, ie
pos = pos + 1
fieldx(i,j,k) = buffer(pos)
end do
end do
end do
else if( group%unpack_type(n) == FIELD_Y ) then
do l=1,nvector ! loop over number of fields
ptr_fieldy = group%addrs_y(l)
do j = js, je
do i = is, ie
pos = pos + 1
fieldy(i,j,k) = buffer(pos)
end do
end do
end do
endif
enddo
endif
# 797 "../mpp/include/mpp_group_update.h" 2
call mpp_clock_end(nonblock_group_unpk_clock)
! ---northern boundary fold
shift = 0
if(domain%symmetry) shift = 1
if( nvector >0 .AND. BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(flags_v,SCALAR_BIT)) )then
j = domain%y(1)%global%end+shift
if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
!poles set to 0: BGRID only
if( gridtype.EQ.BGRID_NE )then
midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
j = domain%y(1)%global%end+shift
is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
if( .NOT. domain%symmetry ) is = is - 1
do i = is ,ie, midpoint
if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
do l=1,nvector
ptr_fieldx = group%addrs_x(l)
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
fieldx(i,j,k) = 0.
fieldy(i,j,k) = 0.
end do
end do
end if
end do
endif
! the following code code block correct an error where the data in your halo coming from
! other half may have the wrong sign
!off west edge, when update north or west direction
j = domain%y(1)%global%end+shift
if ( recv_y(7) .OR. recv_y(5) ) then
select case(gridtype)
case(BGRID_NE)
if(domain%symmetry) then
is = domain%x(1)%global%begin
else
is = domain%x(1)%global%begin - 1
end if
if( is.GT.domain%x(1)%data%begin )then
if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north BGRID_NE west edge ubound error.' )
do l=1,nvector
ptr_fieldx = group%addrs_x(l)
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = domain%x(1)%data%begin,is-1
fieldx(i,j,k) = fieldx(2*is-i,j,k)
fieldy(i,j,k) = fieldy(2*is-i,j,k)
end do
end do
end do
end if
case(CGRID_NE)
is = domain%x(1)%global%begin
isd = domain%x(1)%compute%begin - group%whalo_v
if( is.GT.isd)then
if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north CGRID_NE west edge ubound error.' )
do l=1,nvector
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = isd,is-1
fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
end do
end do
end do
end if
end select
end if
!off east edge
is = domain%x(1)%global%end
if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
ie = domain%x(1)%compute%end+group%ehalo_v
is = is + 1
select case(gridtype)
case(BGRID_NE)
is = is + shift
ie = ie + shift
do l=1,nvector
ptr_fieldx = group%addrs_x(l)
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = is,ie
fieldx(i,j,k) = -fieldx(i,j,k)
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
case(CGRID_NE)
do l=1,nvector
ptr_fieldy = group%addrs_y(l)
do k = 1,ksize
do i = is, ie
fieldy(i,j,k) = -fieldy(i,j,k)
end do
end do
end do
end select
end if
end if
else if( BTEST(domain%fold,SOUTH) .OR. BTEST(domain%fold,WEST) .OR. BTEST(domain%fold,EAST) ) then
call mpp_error(FATAL, "MPP_COMPLETE_GROUP_UPDATE: this interface does not support folded_south, " // &
"folded_west of folded_east, contact developer")
endif
if(nsend>0) then
call mpp_clock_begin(nonblock_group_wait_clock)
call mpp_sync_self(check=EVENT_SEND, request=group%request_send(1:nsend) )
call mpp_clock_end(nonblock_group_wait_clock)
endif
if( num_nonblock_group_update == 0) then
nonblock_group_buffer_pos = 0
endif
end subroutine mpp_complete_group_update_r4
subroutine mpp_reset_group_update_field_r4_2d(group, field)
type(mpp_group_update_type), intent(inout) :: group
real(4), intent(in) :: field(:,:)
group%reset_index_s = group%reset_index_s + 1
if(group%reset_index_s > group%nscalar) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_2D_: group%reset_index_s > group%nscalar")
if(size(field,1) .NE. group%isize_s .OR. size(field,2) .NE. group%jsize_s .OR. group%ksize_s .NE. 1) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_2D_: size of field does not match the size stored in group")
group%addrs_s(group%reset_index_s) = LOC(field)
end subroutine mpp_reset_group_update_field_r4_2d
subroutine mpp_reset_group_update_field_r4_3d(group, field)
type(mpp_group_update_type), intent(inout) :: group
real(4), intent(in) :: field(:,:,:)
group%reset_index_s = group%reset_index_s + 1
if(group%reset_index_s > group%nscalar) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_3D_: group%reset_index_s > group%nscalar")
if(size(field,1) .NE. group%isize_s .OR. size(field,2) .NE. group%jsize_s .OR. size(field,3) .NE. group%ksize_s) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_3D_: size of field does not match the size stored in group")
group%addrs_s(group%reset_index_s) = LOC(field)
end subroutine mpp_reset_group_update_field_r4_3d
subroutine mpp_reset_group_update_field_r4_4d(group, field)
type(mpp_group_update_type), intent(inout) :: group
real(4), intent(in) :: field(:,:,:,:)
group%reset_index_s = group%reset_index_s + 1
if(group%reset_index_s > group%nscalar) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_4D_: group%reset_index_s > group%nscalar")
if(size(field,1) .NE. group%isize_s .OR. size(field,2) .NE. group%jsize_s .OR. &
size(field,3)*size(field,4) .NE. group%ksize_s) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_4D_: size of field does not match the size stored in group")
group%addrs_s(group%reset_index_s) = LOC(field)
end subroutine mpp_reset_group_update_field_r4_4d
subroutine mpp_reset_group_update_field_r4_2dv(group, fieldx, fieldy)
type(mpp_group_update_type), intent(inout) :: group
real(4), intent(in) :: fieldx(:,:), fieldy(:,:)
integer :: indx
group%reset_index_v = group%reset_index_v + 1
if(group%reset_index_v > group%nvector) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_2D_V_: group%reset_index_v > group%nvector")
if(size(fieldx,1) .NE. group%isize_x .OR. size(fieldx,2) .NE. group%jsize_x .OR. group%ksize_v .NE. 1) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_2D_V_: size of fieldx does not match the size stored in group")
if(size(fieldy,1) .NE. group%isize_y .OR. size(fieldy,2) .NE. group%jsize_y ) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_2D_V_: size of fieldy does not match the size stored in group")
group%addrs_x(group%reset_index_v) = LOC(fieldx)
group%addrs_y(group%reset_index_v) = LOC(fieldy)
end subroutine mpp_reset_group_update_field_r4_2dv
subroutine mpp_reset_group_update_field_r4_3dv(group, fieldx, fieldy)
type(mpp_group_update_type), intent(inout) :: group
real(4), intent(in) :: fieldx(:,:,:), fieldy(:,:,:)
integer :: indx
group%reset_index_v = group%reset_index_v + 1
if(group%reset_index_v > group%nvector) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_3D_V_: group%reset_index_v > group%nvector")
if(size(fieldx,1) .NE. group%isize_x .OR. size(fieldx,2) .NE. group%jsize_x .OR. size(fieldx,3) .NE. group%ksize_v) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_3D_V_: size of fieldx does not match the size stored in group")
if(size(fieldy,1) .NE. group%isize_y .OR. size(fieldy,2) .NE. group%jsize_y .OR. size(fieldy,3) .NE. group%ksize_v) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_3D_V_: size of fieldy does not match the size stored in group")
group%addrs_x(group%reset_index_v) = LOC(fieldx)
group%addrs_y(group%reset_index_v) = LOC(fieldy)
end subroutine mpp_reset_group_update_field_r4_3dv
subroutine mpp_reset_group_update_field_r4_4dv(group, fieldx, fieldy)
type(mpp_group_update_type), intent(inout) :: group
real(4), intent(in) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
integer :: indx
group%reset_index_v = group%reset_index_v + 1
if(group%reset_index_v > group%nvector) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_4D_V_: group%reset_index_v > group%nvector")
if(size(fieldx,1) .NE. group%isize_x .OR. size(fieldx,2) .NE. group%jsize_x .OR. &
size(fieldx,3)*size(fieldx,4) .NE. group%ksize_v) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_4D_V_: size of fieldx does not match the size stored in group")
if(size(fieldy,1) .NE. group%isize_y .OR. size(fieldy,2) .NE. group%jsize_y .OR. &
size(fieldy,3)*size(fieldy,4) .NE. group%ksize_v) &
call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_4D_V_: size of fieldy does not match the size stored in group")
group%addrs_x(group%reset_index_v) = LOC(fieldx)
group%addrs_y(group%reset_index_v) = LOC(fieldy)
end subroutine mpp_reset_group_update_field_r4_4dv
# 1887 "../mpp/include/mpp_domains_misc.inc" 2
# 2788 "../mpp/mpp_domains.F90" 2
# 1 "../mpp/include/mpp_domains_reduce.inc" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_GLOBAL_REDUCE: get global max/min of field !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# 1 "../mpp/include/mpp_global_reduce.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_max_r8_2d( domain, field, locus, position )
real(8) :: mpp_global_max_r8_2d
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(:,:)
integer, intent(out), optional :: locus(2)
integer, intent(in), optional :: position
real(8) :: field3D(size(field,1),size(field,2),1)
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_max_r8_2d = mpp_global_max_r8_3d( domain, field3D, locus3D, position )
locus = locus3D(1:2)
else
mpp_global_max_r8_2d = mpp_global_max_r8_3d( domain, field3D, position = position )
end if
return
end function mpp_global_max_r8_2d
function mpp_global_max_r8_3d( domain, field, locus, position )
real(8) :: mpp_global_max_r8_3d
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(0:,0:,:)
integer, intent(out), optional :: locus(3)
integer, intent(in), optional :: position
real(8) :: local
integer, save :: l_locus(3)
real(8), save :: g_val ! need storage class w/ global address; not sure whether fn result has required class
integer, save :: here ! need storage class w/ global address
integer :: ioff, joff, isc, iec, jsc, jec, ishift, jshift
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE: You must first call mpp_domains_init.' )
call mpp_get_compute_domain(domain, isc, iec, jsc, jec )
call mpp_get_domain_shift(domain, ishift, jshift, position)
iec = iec + ishift
jec = jec + jshift
if( size(field,1).EQ. iec-isc+1 .AND. size(field,2).EQ. jec-jsc+1 )then
!field is on compute domain
ioff = isc
joff = jsc
else if( size(field,1).EQ.domain%x(1)%memory%size+ishift .AND. size(field,2).EQ.domain%y(1)%memory%size+jshift )then
!field is on data domain
ioff = domain%x(1)%data%begin
joff = domain%y(1)%data%begin
else
call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE_: incoming field array must match either compute domain or data domain.' )
end if
!get your local max/min
local = maxval(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
!find the global
g_val = local
call mpp_max( g_val, domain%list(:)%pe )
!find locus of the global max/min
if( PRESENT(locus) )then
!which PE is it on? min of all the PEs that have it
here = mpp_npes()+1
if( g_val == local )here = pe
call mpp_min( here, domain%list(:)%pe )
!find the locus here
if( pe.EQ.here )l_locus = maxloc(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
l_locus(1) = l_locus(1) + ioff
l_locus(2) = l_locus(2) + joff
call mpp_broadcast( l_locus, 3, here, domain%list(:)%pe )
locus = l_locus
end if
mpp_global_max_r8_3d = g_val
return
end function mpp_global_max_r8_3d
function mpp_global_max_r8_4d( domain, field, locus, position )
real(8) :: mpp_global_max_r8_4d
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(:,:,:,:)
integer, intent(out), optional :: locus(4)
integer, intent(in), optional :: position
real(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_max_r8_4d = mpp_global_max_r8_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = (locus3D(3)-locus(3))/size(field,3) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_max_r8_4d = mpp_global_max_r8_3d( domain, field3D, position = position )
end if
return
end function mpp_global_max_r8_4d
function mpp_global_max_r8_5d( domain, field, locus, position )
real(8) :: mpp_global_max_r8_5d
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(:,:,:,:,:)
integer, intent(out), optional :: locus(5)
integer, intent(in), optional :: position
real(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_max_r8_5d = mpp_global_max_r8_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = modulo(locus3D(3),size(field,3)*size(field,4))
locus(5) = (locus3D(3)-locus(4))/size(field,3)/size(field,4) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_max_r8_5d = mpp_global_max_r8_3d( domain, field3D, position = position )
end if
return
end function mpp_global_max_r8_5d
# 45 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_reduce.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_min_r8_2d( domain, field, locus, position )
real(8) :: mpp_global_min_r8_2d
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(:,:)
integer, intent(out), optional :: locus(2)
integer, intent(in), optional :: position
real(8) :: field3D(size(field,1),size(field,2),1)
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_min_r8_2d = mpp_global_min_r8_3d( domain, field3D, locus3D, position )
locus = locus3D(1:2)
else
mpp_global_min_r8_2d = mpp_global_min_r8_3d( domain, field3D, position = position )
end if
return
end function mpp_global_min_r8_2d
function mpp_global_min_r8_3d( domain, field, locus, position )
real(8) :: mpp_global_min_r8_3d
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(0:,0:,:)
integer, intent(out), optional :: locus(3)
integer, intent(in), optional :: position
real(8) :: local
integer, save :: l_locus(3)
real(8), save :: g_val ! need storage class w/ global address; not sure whether fn result has required class
integer, save :: here ! need storage class w/ global address
integer :: ioff, joff, isc, iec, jsc, jec, ishift, jshift
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE: You must first call mpp_domains_init.' )
call mpp_get_compute_domain(domain, isc, iec, jsc, jec )
call mpp_get_domain_shift(domain, ishift, jshift, position)
iec = iec + ishift
jec = jec + jshift
if( size(field,1).EQ. iec-isc+1 .AND. size(field,2).EQ. jec-jsc+1 )then
!field is on compute domain
ioff = isc
joff = jsc
else if( size(field,1).EQ.domain%x(1)%memory%size+ishift .AND. size(field,2).EQ.domain%y(1)%memory%size+jshift )then
!field is on data domain
ioff = domain%x(1)%data%begin
joff = domain%y(1)%data%begin
else
call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE_: incoming field array must match either compute domain or data domain.' )
end if
!get your local max/min
local = minval(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
!find the global
g_val = local
call mpp_min( g_val, domain%list(:)%pe )
!find locus of the global max/min
if( PRESENT(locus) )then
!which PE is it on? min of all the PEs that have it
here = mpp_npes()+1
if( g_val == local )here = pe
call mpp_min( here, domain%list(:)%pe )
!find the locus here
if( pe.EQ.here )l_locus = minloc(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
l_locus(1) = l_locus(1) + ioff
l_locus(2) = l_locus(2) + joff
call mpp_broadcast( l_locus, 3, here, domain%list(:)%pe )
locus = l_locus
end if
mpp_global_min_r8_3d = g_val
return
end function mpp_global_min_r8_3d
function mpp_global_min_r8_4d( domain, field, locus, position )
real(8) :: mpp_global_min_r8_4d
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(:,:,:,:)
integer, intent(out), optional :: locus(4)
integer, intent(in), optional :: position
real(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_min_r8_4d = mpp_global_min_r8_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = (locus3D(3)-locus(3))/size(field,3) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_min_r8_4d = mpp_global_min_r8_3d( domain, field3D, position = position )
end if
return
end function mpp_global_min_r8_4d
function mpp_global_min_r8_5d( domain, field, locus, position )
real(8) :: mpp_global_min_r8_5d
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(:,:,:,:,:)
integer, intent(out), optional :: locus(5)
integer, intent(in), optional :: position
real(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_min_r8_5d = mpp_global_min_r8_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = modulo(locus3D(3),size(field,3)*size(field,4))
locus(5) = (locus3D(3)-locus(4))/size(field,3)/size(field,4) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_min_r8_5d = mpp_global_min_r8_3d( domain, field3D, position = position )
end if
return
end function mpp_global_min_r8_5d
# 63 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_reduce.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_max_r4_2d( domain, field, locus, position )
real(4) :: mpp_global_max_r4_2d
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(:,:)
integer, intent(out), optional :: locus(2)
integer, intent(in), optional :: position
real(4) :: field3D(size(field,1),size(field,2),1)
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_max_r4_2d = mpp_global_max_r4_3d( domain, field3D, locus3D, position )
locus = locus3D(1:2)
else
mpp_global_max_r4_2d = mpp_global_max_r4_3d( domain, field3D, position = position )
end if
return
end function mpp_global_max_r4_2d
function mpp_global_max_r4_3d( domain, field, locus, position )
real(4) :: mpp_global_max_r4_3d
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(0:,0:,:)
integer, intent(out), optional :: locus(3)
integer, intent(in), optional :: position
real(4) :: local
integer, save :: l_locus(3)
real(4), save :: g_val ! need storage class w/ global address; not sure whether fn result has required class
integer, save :: here ! need storage class w/ global address
integer :: ioff, joff, isc, iec, jsc, jec, ishift, jshift
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE: You must first call mpp_domains_init.' )
call mpp_get_compute_domain(domain, isc, iec, jsc, jec )
call mpp_get_domain_shift(domain, ishift, jshift, position)
iec = iec + ishift
jec = jec + jshift
if( size(field,1).EQ. iec-isc+1 .AND. size(field,2).EQ. jec-jsc+1 )then
!field is on compute domain
ioff = isc
joff = jsc
else if( size(field,1).EQ.domain%x(1)%memory%size+ishift .AND. size(field,2).EQ.domain%y(1)%memory%size+jshift )then
!field is on data domain
ioff = domain%x(1)%data%begin
joff = domain%y(1)%data%begin
else
call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE_: incoming field array must match either compute domain or data domain.' )
end if
!get your local max/min
local = maxval(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
!find the global
g_val = local
call mpp_max( g_val, domain%list(:)%pe )
!find locus of the global max/min
if( PRESENT(locus) )then
!which PE is it on? min of all the PEs that have it
here = mpp_npes()+1
if( g_val == local )here = pe
call mpp_min( here, domain%list(:)%pe )
!find the locus here
if( pe.EQ.here )l_locus = maxloc(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
l_locus(1) = l_locus(1) + ioff
l_locus(2) = l_locus(2) + joff
call mpp_broadcast( l_locus, 3, here, domain%list(:)%pe )
locus = l_locus
end if
mpp_global_max_r4_3d = g_val
return
end function mpp_global_max_r4_3d
function mpp_global_max_r4_4d( domain, field, locus, position )
real(4) :: mpp_global_max_r4_4d
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(:,:,:,:)
integer, intent(out), optional :: locus(4)
integer, intent(in), optional :: position
real(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_max_r4_4d = mpp_global_max_r4_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = (locus3D(3)-locus(3))/size(field,3) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_max_r4_4d = mpp_global_max_r4_3d( domain, field3D, position = position )
end if
return
end function mpp_global_max_r4_4d
function mpp_global_max_r4_5d( domain, field, locus, position )
real(4) :: mpp_global_max_r4_5d
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(:,:,:,:,:)
integer, intent(out), optional :: locus(5)
integer, intent(in), optional :: position
real(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_max_r4_5d = mpp_global_max_r4_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = modulo(locus3D(3),size(field,3)*size(field,4))
locus(5) = (locus3D(3)-locus(4))/size(field,3)/size(field,4) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_max_r4_5d = mpp_global_max_r4_3d( domain, field3D, position = position )
end if
return
end function mpp_global_max_r4_5d
# 82 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_reduce.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_min_r4_2d( domain, field, locus, position )
real(4) :: mpp_global_min_r4_2d
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(:,:)
integer, intent(out), optional :: locus(2)
integer, intent(in), optional :: position
real(4) :: field3D(size(field,1),size(field,2),1)
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_min_r4_2d = mpp_global_min_r4_3d( domain, field3D, locus3D, position )
locus = locus3D(1:2)
else
mpp_global_min_r4_2d = mpp_global_min_r4_3d( domain, field3D, position = position )
end if
return
end function mpp_global_min_r4_2d
function mpp_global_min_r4_3d( domain, field, locus, position )
real(4) :: mpp_global_min_r4_3d
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(0:,0:,:)
integer, intent(out), optional :: locus(3)
integer, intent(in), optional :: position
real(4) :: local
integer, save :: l_locus(3)
real(4), save :: g_val ! need storage class w/ global address; not sure whether fn result has required class
integer, save :: here ! need storage class w/ global address
integer :: ioff, joff, isc, iec, jsc, jec, ishift, jshift
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE: You must first call mpp_domains_init.' )
call mpp_get_compute_domain(domain, isc, iec, jsc, jec )
call mpp_get_domain_shift(domain, ishift, jshift, position)
iec = iec + ishift
jec = jec + jshift
if( size(field,1).EQ. iec-isc+1 .AND. size(field,2).EQ. jec-jsc+1 )then
!field is on compute domain
ioff = isc
joff = jsc
else if( size(field,1).EQ.domain%x(1)%memory%size+ishift .AND. size(field,2).EQ.domain%y(1)%memory%size+jshift )then
!field is on data domain
ioff = domain%x(1)%data%begin
joff = domain%y(1)%data%begin
else
call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE_: incoming field array must match either compute domain or data domain.' )
end if
!get your local max/min
local = minval(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
!find the global
g_val = local
call mpp_min( g_val, domain%list(:)%pe )
!find locus of the global max/min
if( PRESENT(locus) )then
!which PE is it on? min of all the PEs that have it
here = mpp_npes()+1
if( g_val == local )here = pe
call mpp_min( here, domain%list(:)%pe )
!find the locus here
if( pe.EQ.here )l_locus = minloc(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
l_locus(1) = l_locus(1) + ioff
l_locus(2) = l_locus(2) + joff
call mpp_broadcast( l_locus, 3, here, domain%list(:)%pe )
locus = l_locus
end if
mpp_global_min_r4_3d = g_val
return
end function mpp_global_min_r4_3d
function mpp_global_min_r4_4d( domain, field, locus, position )
real(4) :: mpp_global_min_r4_4d
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(:,:,:,:)
integer, intent(out), optional :: locus(4)
integer, intent(in), optional :: position
real(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_min_r4_4d = mpp_global_min_r4_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = (locus3D(3)-locus(3))/size(field,3) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_min_r4_4d = mpp_global_min_r4_3d( domain, field3D, position = position )
end if
return
end function mpp_global_min_r4_4d
function mpp_global_min_r4_5d( domain, field, locus, position )
real(4) :: mpp_global_min_r4_5d
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(:,:,:,:,:)
integer, intent(out), optional :: locus(5)
integer, intent(in), optional :: position
real(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_min_r4_5d = mpp_global_min_r4_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = modulo(locus3D(3),size(field,3)*size(field,4))
locus(5) = (locus3D(3)-locus(4))/size(field,3)/size(field,4) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_min_r4_5d = mpp_global_min_r4_3d( domain, field3D, position = position )
end if
return
end function mpp_global_min_r4_5d
# 100 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_reduce.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_max_i8_2d( domain, field, locus, position )
integer(8) :: mpp_global_max_i8_2d
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: field(:,:)
integer, intent(out), optional :: locus(2)
integer, intent(in), optional :: position
integer(8) :: field3D(size(field,1),size(field,2),1)
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_max_i8_2d = mpp_global_max_i8_3d( domain, field3D, locus3D, position )
locus = locus3D(1:2)
else
mpp_global_max_i8_2d = mpp_global_max_i8_3d( domain, field3D, position = position )
end if
return
end function mpp_global_max_i8_2d
function mpp_global_max_i8_3d( domain, field, locus, position )
integer(8) :: mpp_global_max_i8_3d
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: field(0:,0:,:)
integer, intent(out), optional :: locus(3)
integer, intent(in), optional :: position
integer(8) :: local
integer, save :: l_locus(3)
integer(8), save :: g_val ! need storage class w/ global address; not sure whether fn result has required class
integer, save :: here ! need storage class w/ global address
integer :: ioff, joff, isc, iec, jsc, jec, ishift, jshift
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE: You must first call mpp_domains_init.' )
call mpp_get_compute_domain(domain, isc, iec, jsc, jec )
call mpp_get_domain_shift(domain, ishift, jshift, position)
iec = iec + ishift
jec = jec + jshift
if( size(field,1).EQ. iec-isc+1 .AND. size(field,2).EQ. jec-jsc+1 )then
!field is on compute domain
ioff = isc
joff = jsc
else if( size(field,1).EQ.domain%x(1)%memory%size+ishift .AND. size(field,2).EQ.domain%y(1)%memory%size+jshift )then
!field is on data domain
ioff = domain%x(1)%data%begin
joff = domain%y(1)%data%begin
else
call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE_: incoming field array must match either compute domain or data domain.' )
end if
!get your local max/min
local = maxval(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
!find the global
g_val = local
call mpp_max( g_val, domain%list(:)%pe )
!find locus of the global max/min
if( PRESENT(locus) )then
!which PE is it on? min of all the PEs that have it
here = mpp_npes()+1
if( g_val == local )here = pe
call mpp_min( here, domain%list(:)%pe )
!find the locus here
if( pe.EQ.here )l_locus = maxloc(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
l_locus(1) = l_locus(1) + ioff
l_locus(2) = l_locus(2) + joff
call mpp_broadcast( l_locus, 3, here, domain%list(:)%pe )
locus = l_locus
end if
mpp_global_max_i8_3d = g_val
return
end function mpp_global_max_i8_3d
function mpp_global_max_i8_4d( domain, field, locus, position )
integer(8) :: mpp_global_max_i8_4d
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: field(:,:,:,:)
integer, intent(out), optional :: locus(4)
integer, intent(in), optional :: position
integer(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_max_i8_4d = mpp_global_max_i8_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = (locus3D(3)-locus(3))/size(field,3) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_max_i8_4d = mpp_global_max_i8_3d( domain, field3D, position = position )
end if
return
end function mpp_global_max_i8_4d
function mpp_global_max_i8_5d( domain, field, locus, position )
integer(8) :: mpp_global_max_i8_5d
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: field(:,:,:,:,:)
integer, intent(out), optional :: locus(5)
integer, intent(in), optional :: position
integer(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_max_i8_5d = mpp_global_max_i8_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = modulo(locus3D(3),size(field,3)*size(field,4))
locus(5) = (locus3D(3)-locus(4))/size(field,3)/size(field,4) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_max_i8_5d = mpp_global_max_i8_3d( domain, field3D, position = position )
end if
return
end function mpp_global_max_i8_5d
# 120 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_reduce.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_min_i8_2d( domain, field, locus, position )
integer(8) :: mpp_global_min_i8_2d
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: field(:,:)
integer, intent(out), optional :: locus(2)
integer, intent(in), optional :: position
integer(8) :: field3D(size(field,1),size(field,2),1)
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_min_i8_2d = mpp_global_min_i8_3d( domain, field3D, locus3D, position )
locus = locus3D(1:2)
else
mpp_global_min_i8_2d = mpp_global_min_i8_3d( domain, field3D, position = position )
end if
return
end function mpp_global_min_i8_2d
function mpp_global_min_i8_3d( domain, field, locus, position )
integer(8) :: mpp_global_min_i8_3d
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: field(0:,0:,:)
integer, intent(out), optional :: locus(3)
integer, intent(in), optional :: position
integer(8) :: local
integer, save :: l_locus(3)
integer(8), save :: g_val ! need storage class w/ global address; not sure whether fn result has required class
integer, save :: here ! need storage class w/ global address
integer :: ioff, joff, isc, iec, jsc, jec, ishift, jshift
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE: You must first call mpp_domains_init.' )
call mpp_get_compute_domain(domain, isc, iec, jsc, jec )
call mpp_get_domain_shift(domain, ishift, jshift, position)
iec = iec + ishift
jec = jec + jshift
if( size(field,1).EQ. iec-isc+1 .AND. size(field,2).EQ. jec-jsc+1 )then
!field is on compute domain
ioff = isc
joff = jsc
else if( size(field,1).EQ.domain%x(1)%memory%size+ishift .AND. size(field,2).EQ.domain%y(1)%memory%size+jshift )then
!field is on data domain
ioff = domain%x(1)%data%begin
joff = domain%y(1)%data%begin
else
call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE_: incoming field array must match either compute domain or data domain.' )
end if
!get your local max/min
local = minval(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
!find the global
g_val = local
call mpp_min( g_val, domain%list(:)%pe )
!find locus of the global max/min
if( PRESENT(locus) )then
!which PE is it on? min of all the PEs that have it
here = mpp_npes()+1
if( g_val == local )here = pe
call mpp_min( here, domain%list(:)%pe )
!find the locus here
if( pe.EQ.here )l_locus = minloc(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
l_locus(1) = l_locus(1) + ioff
l_locus(2) = l_locus(2) + joff
call mpp_broadcast( l_locus, 3, here, domain%list(:)%pe )
locus = l_locus
end if
mpp_global_min_i8_3d = g_val
return
end function mpp_global_min_i8_3d
function mpp_global_min_i8_4d( domain, field, locus, position )
integer(8) :: mpp_global_min_i8_4d
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: field(:,:,:,:)
integer, intent(out), optional :: locus(4)
integer, intent(in), optional :: position
integer(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_min_i8_4d = mpp_global_min_i8_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = (locus3D(3)-locus(3))/size(field,3) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_min_i8_4d = mpp_global_min_i8_3d( domain, field3D, position = position )
end if
return
end function mpp_global_min_i8_4d
function mpp_global_min_i8_5d( domain, field, locus, position )
integer(8) :: mpp_global_min_i8_5d
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: field(:,:,:,:,:)
integer, intent(out), optional :: locus(5)
integer, intent(in), optional :: position
integer(8) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_min_i8_5d = mpp_global_min_i8_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = modulo(locus3D(3),size(field,3)*size(field,4))
locus(5) = (locus3D(3)-locus(4))/size(field,3)/size(field,4) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_min_i8_5d = mpp_global_min_i8_3d( domain, field3D, position = position )
end if
return
end function mpp_global_min_i8_5d
# 138 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_reduce.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_max_i4_2d( domain, field, locus, position )
integer(4) :: mpp_global_max_i4_2d
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: field(:,:)
integer, intent(out), optional :: locus(2)
integer, intent(in), optional :: position
integer(4) :: field3D(size(field,1),size(field,2),1)
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_max_i4_2d = mpp_global_max_i4_3d( domain, field3D, locus3D, position )
locus = locus3D(1:2)
else
mpp_global_max_i4_2d = mpp_global_max_i4_3d( domain, field3D, position = position )
end if
return
end function mpp_global_max_i4_2d
function mpp_global_max_i4_3d( domain, field, locus, position )
integer(4) :: mpp_global_max_i4_3d
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: field(0:,0:,:)
integer, intent(out), optional :: locus(3)
integer, intent(in), optional :: position
integer(4) :: local
integer, save :: l_locus(3)
integer(4), save :: g_val ! need storage class w/ global address; not sure whether fn result has required class
integer, save :: here ! need storage class w/ global address
integer :: ioff, joff, isc, iec, jsc, jec, ishift, jshift
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE: You must first call mpp_domains_init.' )
call mpp_get_compute_domain(domain, isc, iec, jsc, jec )
call mpp_get_domain_shift(domain, ishift, jshift, position)
iec = iec + ishift
jec = jec + jshift
if( size(field,1).EQ. iec-isc+1 .AND. size(field,2).EQ. jec-jsc+1 )then
!field is on compute domain
ioff = isc
joff = jsc
else if( size(field,1).EQ.domain%x(1)%memory%size+ishift .AND. size(field,2).EQ.domain%y(1)%memory%size+jshift )then
!field is on data domain
ioff = domain%x(1)%data%begin
joff = domain%y(1)%data%begin
else
call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE_: incoming field array must match either compute domain or data domain.' )
end if
!get your local max/min
local = maxval(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
!find the global
g_val = local
call mpp_max( g_val, domain%list(:)%pe )
!find locus of the global max/min
if( PRESENT(locus) )then
!which PE is it on? min of all the PEs that have it
here = mpp_npes()+1
if( g_val == local )here = pe
call mpp_min( here, domain%list(:)%pe )
!find the locus here
if( pe.EQ.here )l_locus = maxloc(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
l_locus(1) = l_locus(1) + ioff
l_locus(2) = l_locus(2) + joff
call mpp_broadcast( l_locus, 3, here, domain%list(:)%pe )
locus = l_locus
end if
mpp_global_max_i4_3d = g_val
return
end function mpp_global_max_i4_3d
function mpp_global_max_i4_4d( domain, field, locus, position )
integer(4) :: mpp_global_max_i4_4d
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: field(:,:,:,:)
integer, intent(out), optional :: locus(4)
integer, intent(in), optional :: position
integer(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_max_i4_4d = mpp_global_max_i4_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = (locus3D(3)-locus(3))/size(field,3) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_max_i4_4d = mpp_global_max_i4_3d( domain, field3D, position = position )
end if
return
end function mpp_global_max_i4_4d
function mpp_global_max_i4_5d( domain, field, locus, position )
integer(4) :: mpp_global_max_i4_5d
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: field(:,:,:,:,:)
integer, intent(out), optional :: locus(5)
integer, intent(in), optional :: position
integer(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_max_i4_5d = mpp_global_max_i4_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = modulo(locus3D(3),size(field,3)*size(field,4))
locus(5) = (locus3D(3)-locus(4))/size(field,3)/size(field,4) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_max_i4_5d = mpp_global_max_i4_3d( domain, field3D, position = position )
end if
return
end function mpp_global_max_i4_5d
# 157 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_reduce.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_min_i4_2d( domain, field, locus, position )
integer(4) :: mpp_global_min_i4_2d
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: field(:,:)
integer, intent(out), optional :: locus(2)
integer, intent(in), optional :: position
integer(4) :: field3D(size(field,1),size(field,2),1)
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_min_i4_2d = mpp_global_min_i4_3d( domain, field3D, locus3D, position )
locus = locus3D(1:2)
else
mpp_global_min_i4_2d = mpp_global_min_i4_3d( domain, field3D, position = position )
end if
return
end function mpp_global_min_i4_2d
function mpp_global_min_i4_3d( domain, field, locus, position )
integer(4) :: mpp_global_min_i4_3d
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: field(0:,0:,:)
integer, intent(out), optional :: locus(3)
integer, intent(in), optional :: position
integer(4) :: local
integer, save :: l_locus(3)
integer(4), save :: g_val ! need storage class w/ global address; not sure whether fn result has required class
integer, save :: here ! need storage class w/ global address
integer :: ioff, joff, isc, iec, jsc, jec, ishift, jshift
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE: You must first call mpp_domains_init.' )
call mpp_get_compute_domain(domain, isc, iec, jsc, jec )
call mpp_get_domain_shift(domain, ishift, jshift, position)
iec = iec + ishift
jec = jec + jshift
if( size(field,1).EQ. iec-isc+1 .AND. size(field,2).EQ. jec-jsc+1 )then
!field is on compute domain
ioff = isc
joff = jsc
else if( size(field,1).EQ.domain%x(1)%memory%size+ishift .AND. size(field,2).EQ.domain%y(1)%memory%size+jshift )then
!field is on data domain
ioff = domain%x(1)%data%begin
joff = domain%y(1)%data%begin
else
call mpp_error( FATAL, 'MPP_GLOBAL_REDUCE_: incoming field array must match either compute domain or data domain.' )
end if
!get your local max/min
local = minval(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
!find the global
g_val = local
call mpp_min( g_val, domain%list(:)%pe )
!find locus of the global max/min
if( PRESENT(locus) )then
!which PE is it on? min of all the PEs that have it
here = mpp_npes()+1
if( g_val == local )here = pe
call mpp_min( here, domain%list(:)%pe )
!find the locus here
if( pe.EQ.here )l_locus = minloc(field(isc-ioff: iec-ioff, jsc-joff: jec-joff,:))
l_locus(1) = l_locus(1) + ioff
l_locus(2) = l_locus(2) + joff
call mpp_broadcast( l_locus, 3, here, domain%list(:)%pe )
locus = l_locus
end if
mpp_global_min_i4_3d = g_val
return
end function mpp_global_min_i4_3d
function mpp_global_min_i4_4d( domain, field, locus, position )
integer(4) :: mpp_global_min_i4_4d
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: field(:,:,:,:)
integer, intent(out), optional :: locus(4)
integer, intent(in), optional :: position
integer(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_min_i4_4d = mpp_global_min_i4_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = (locus3D(3)-locus(3))/size(field,3) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_min_i4_4d = mpp_global_min_i4_3d( domain, field3D, position = position )
end if
return
end function mpp_global_min_i4_4d
function mpp_global_min_i4_5d( domain, field, locus, position )
integer(4) :: mpp_global_min_i4_5d
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: field(:,:,:,:,:)
integer, intent(out), optional :: locus(5)
integer, intent(in), optional :: position
integer(4) :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4)*size(field,5))
integer :: locus3D(3)
pointer( ptr, field3D )
ptr = LOC(field)
if( PRESENT(locus) )then
mpp_global_min_i4_5d = mpp_global_min_i4_3d( domain, field3D, locus3D, position )
locus(1:2) = locus3D(1:2)
locus(3) = modulo(locus3D(3),size(field,3))
locus(4) = modulo(locus3D(3),size(field,3)*size(field,4))
locus(5) = (locus3D(3)-locus(4))/size(field,3)/size(field,4) + 1
if( locus(3).EQ.0 )then
locus(3) = size(field,3)
locus(4) = locus(4) - 1
end if
else
mpp_global_min_i4_5d = mpp_global_min_i4_3d( domain, field3D, position = position )
end if
return
end function mpp_global_min_i4_5d
# 175 "../mpp/include/mpp_domains_reduce.inc" 2
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_GLOBAL_SUM: global sum of field !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_r8_2d( domain, field, flags, position, tile_count, overflow_check)
real(8) :: mpp_global_sum_r8_2d
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(:,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(8), dimension(:,:), allocatable :: field2D
real(8), dimension(:,:), allocatable :: global2D
real(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_r8_2d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_r8_2d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_r8_2d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_r8_2d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
!this is bitwise across different PE counts using EFP sum
if( ntile > 1 ) then
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: multiple tile per pe is not supported for BITWISE_EFP_SUM')
endif
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ) )
end do
end do
!--- using efp sum.
if(efp_sum_overflow_check) then
mpp_global_sum_r8_2d = mpp_reproducing_sum(field2D, overflow_check=.true.)
else
mpp_global_sum_r8_2d = mpp_reproducing_sum(field2D, overflow_check=overflow_check)
endif
# 141
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ) )
if(tile == ntile) then
mpp_global_sum_r8_2d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_r8_2d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_r8_2d
# 189 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_r8_3d( domain, field, flags, position, tile_count, overflow_check)
real(8) :: mpp_global_sum_r8_3d
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(:,: ,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(8), dimension(:,:), allocatable :: field2D
real(8), dimension(:,:), allocatable :: global2D
real(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_r8_3d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_r8_3d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_r8_3d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_r8_3d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
!this is bitwise across different PE counts using EFP sum
if( ntile > 1 ) then
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: multiple tile per pe is not supported for BITWISE_EFP_SUM')
endif
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:) )
end do
end do
!--- using efp sum.
if(efp_sum_overflow_check) then
mpp_global_sum_r8_3d = mpp_reproducing_sum(field2D, overflow_check=.true.)
else
mpp_global_sum_r8_3d = mpp_reproducing_sum(field2D, overflow_check=overflow_check)
endif
# 141
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:) )
if(tile == ntile) then
mpp_global_sum_r8_3d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_r8_3d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_r8_3d
# 197 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_r8_4d( domain, field, flags, position, tile_count, overflow_check)
real(8) :: mpp_global_sum_r8_4d
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(:,: ,:,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(8), dimension(:,:), allocatable :: field2D
real(8), dimension(:,:), allocatable :: global2D
real(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_r8_4d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:,:) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_r8_4d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_r8_4d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_r8_4d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
!this is bitwise across different PE counts using EFP sum
if( ntile > 1 ) then
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: multiple tile per pe is not supported for BITWISE_EFP_SUM')
endif
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:,:) )
end do
end do
!--- using efp sum.
if(efp_sum_overflow_check) then
mpp_global_sum_r8_4d = mpp_reproducing_sum(field2D, overflow_check=.true.)
else
mpp_global_sum_r8_4d = mpp_reproducing_sum(field2D, overflow_check=overflow_check)
endif
# 141
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:) )
if(tile == ntile) then
mpp_global_sum_r8_4d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_r8_4d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_r8_4d
# 205 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_r8_5d( domain, field, flags, position, tile_count, overflow_check)
real(8) :: mpp_global_sum_r8_5d
type(domain2D), intent(in) :: domain
real(8), intent(in) :: field(:,: ,:,:,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(8), dimension(:,:), allocatable :: field2D
real(8), dimension(:,:), allocatable :: global2D
real(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_r8_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:,:,:) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_r8_5d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_r8_5d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_r8_5d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
!this is bitwise across different PE counts using EFP sum
if( ntile > 1 ) then
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: multiple tile per pe is not supported for BITWISE_EFP_SUM')
endif
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:,:,:) )
end do
end do
!--- using efp sum.
if(efp_sum_overflow_check) then
mpp_global_sum_r8_5d = mpp_reproducing_sum(field2D, overflow_check=.true.)
else
mpp_global_sum_r8_5d = mpp_reproducing_sum(field2D, overflow_check=overflow_check)
endif
# 141
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:,:) )
if(tile == ntile) then
mpp_global_sum_r8_5d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_r8_5d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_r8_5d
# 213 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_r4_2d( domain, field, flags, position, tile_count, overflow_check)
real(4) :: mpp_global_sum_r4_2d
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(:,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(4), dimension(:,:), allocatable :: field2D
real(4), dimension(:,:), allocatable :: global2D
real(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_r4_2d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_r4_2d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_r4_2d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_r4_2d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
!this is bitwise across different PE counts using EFP sum
if( ntile > 1 ) then
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: multiple tile per pe is not supported for BITWISE_EFP_SUM')
endif
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ) )
end do
end do
!--- using efp sum.
if(efp_sum_overflow_check) then
mpp_global_sum_r4_2d = mpp_reproducing_sum(field2D, overflow_check=.true.)
else
mpp_global_sum_r4_2d = mpp_reproducing_sum(field2D, overflow_check=overflow_check)
endif
# 141
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ) )
if(tile == ntile) then
mpp_global_sum_r4_2d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_r4_2d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_r4_2d
# 222 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_r4_3d( domain, field, flags, position, tile_count, overflow_check)
real(4) :: mpp_global_sum_r4_3d
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(:,: ,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(4), dimension(:,:), allocatable :: field2D
real(4), dimension(:,:), allocatable :: global2D
real(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_r4_3d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_r4_3d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_r4_3d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_r4_3d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
!this is bitwise across different PE counts using EFP sum
if( ntile > 1 ) then
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: multiple tile per pe is not supported for BITWISE_EFP_SUM')
endif
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:) )
end do
end do
!--- using efp sum.
if(efp_sum_overflow_check) then
mpp_global_sum_r4_3d = mpp_reproducing_sum(field2D, overflow_check=.true.)
else
mpp_global_sum_r4_3d = mpp_reproducing_sum(field2D, overflow_check=overflow_check)
endif
# 141
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:) )
if(tile == ntile) then
mpp_global_sum_r4_3d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_r4_3d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_r4_3d
# 230 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_r4_4d( domain, field, flags, position, tile_count, overflow_check)
real(4) :: mpp_global_sum_r4_4d
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(:,: ,:,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(4), dimension(:,:), allocatable :: field2D
real(4), dimension(:,:), allocatable :: global2D
real(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_r4_4d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:,:) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_r4_4d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_r4_4d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_r4_4d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
!this is bitwise across different PE counts using EFP sum
if( ntile > 1 ) then
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: multiple tile per pe is not supported for BITWISE_EFP_SUM')
endif
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:,:) )
end do
end do
!--- using efp sum.
if(efp_sum_overflow_check) then
mpp_global_sum_r4_4d = mpp_reproducing_sum(field2D, overflow_check=.true.)
else
mpp_global_sum_r4_4d = mpp_reproducing_sum(field2D, overflow_check=overflow_check)
endif
# 141
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:) )
if(tile == ntile) then
mpp_global_sum_r4_4d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_r4_4d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_r4_4d
# 238 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_r4_5d( domain, field, flags, position, tile_count, overflow_check)
real(4) :: mpp_global_sum_r4_5d
type(domain2D), intent(in) :: domain
real(4), intent(in) :: field(:,: ,:,:,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(4), dimension(:,:), allocatable :: field2D
real(4), dimension(:,:), allocatable :: global2D
real(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_r4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:,:,:) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_r4_5d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_r4_5d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_r4_5d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
!this is bitwise across different PE counts using EFP sum
if( ntile > 1 ) then
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: multiple tile per pe is not supported for BITWISE_EFP_SUM')
endif
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:,:,:) )
end do
end do
!--- using efp sum.
if(efp_sum_overflow_check) then
mpp_global_sum_r4_5d = mpp_reproducing_sum(field2D, overflow_check=.true.)
else
mpp_global_sum_r4_5d = mpp_reproducing_sum(field2D, overflow_check=overflow_check)
endif
# 141
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:,:) )
if(tile == ntile) then
mpp_global_sum_r4_5d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_r4_5d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_r4_5d
# 246 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_i8_2d( domain, field, flags, position, tile_count, overflow_check)
integer(8) :: mpp_global_sum_i8_2d
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: field(:,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(8), dimension(:,:), allocatable :: field2D
integer(8), dimension(:,:), allocatable :: global2D
integer(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i8_2d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i8_2d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_i8_2d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_i8_2d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ) )
if(tile == ntile) then
mpp_global_sum_i8_2d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_i8_2d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_i8_2d
# 257 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_i8_3d( domain, field, flags, position, tile_count, overflow_check)
integer(8) :: mpp_global_sum_i8_3d
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: field(:,: ,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(8), dimension(:,:), allocatable :: field2D
integer(8), dimension(:,:), allocatable :: global2D
integer(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i8_3d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i8_3d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_i8_3d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_i8_3d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:) )
if(tile == ntile) then
mpp_global_sum_i8_3d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_i8_3d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_i8_3d
# 265 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_i8_4d( domain, field, flags, position, tile_count, overflow_check)
integer(8) :: mpp_global_sum_i8_4d
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: field(:,: ,:,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(8), dimension(:,:), allocatable :: field2D
integer(8), dimension(:,:), allocatable :: global2D
integer(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i8_4d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:,:) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i8_4d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_i8_4d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_i8_4d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:) )
if(tile == ntile) then
mpp_global_sum_i8_4d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_i8_4d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_i8_4d
# 273 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_i8_5d( domain, field, flags, position, tile_count, overflow_check)
integer(8) :: mpp_global_sum_i8_5d
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: field(:,: ,:,:,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(8), dimension(:,:), allocatable :: field2D
integer(8), dimension(:,:), allocatable :: global2D
integer(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i8_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:,:,:) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i8_5d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_i8_5d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_i8_5d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:,:) )
if(tile == ntile) then
mpp_global_sum_i8_5d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_i8_5d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_i8_5d
# 281 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_i4_2d( domain, field, flags, position, tile_count, overflow_check)
integer(4) :: mpp_global_sum_i4_2d
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: field(:,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(4), dimension(:,:), allocatable :: field2D
integer(4), dimension(:,:), allocatable :: global2D
integer(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_2d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_2d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_i4_2d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_i4_2d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ) )
if(tile == ntile) then
mpp_global_sum_i4_2d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_i4_2d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_i4_2d
# 290 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_i4_3d( domain, field, flags, position, tile_count, overflow_check)
integer(4) :: mpp_global_sum_i4_3d
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: field(:,: ,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(4), dimension(:,:), allocatable :: field2D
integer(4), dimension(:,:), allocatable :: global2D
integer(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_3d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_3d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_i4_3d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_i4_3d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:) )
if(tile == ntile) then
mpp_global_sum_i4_3d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_i4_3d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_i4_3d
# 298 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_i4_4d( domain, field, flags, position, tile_count, overflow_check)
integer(4) :: mpp_global_sum_i4_4d
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: field(:,: ,:,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(4), dimension(:,:), allocatable :: field2D
integer(4), dimension(:,:), allocatable :: global2D
integer(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_4d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:,:) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_4d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_i4_4d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_i4_4d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:) )
if(tile == ntile) then
mpp_global_sum_i4_4d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_i4_4d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_i4_4d
# 306 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
function mpp_global_sum_i4_5d( domain, field, flags, position, tile_count, overflow_check)
integer(4) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: field(:,: ,:,:,: )
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(4), dimension(:,:), allocatable :: field2D
integer(4), dimension(:,:), allocatable :: global2D
integer(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
do j = jsc, jec
do i = isc, iec
field2D(i,j) = sum( field(i+ioff:i+ioff,j+joff:j+joff ,:,:,:) )
end do
end do
allocate( global2D( gxsize+ishift, gysize+jshift ) )
global2D = 0.
!call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
if ( present( tile_count ) ) then
call mpp_global_field( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field( domain, field2D, global2D, position=position )
endif
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
mygsum(tile) = sum(global2D(1:gxsize+ioffset,1:gysize+joffset))
deallocate(global2D, field2d)
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = mygsum(tile)
else if( nlist == 1) then
mpp_global_sum_i4_5d = sum(mygsum(1:ntile))
else ! need to sum by the order of tile_count
! first fill the global sum on current pe.
do n = 1, ntile
gsum(domain%tile_id(n)) = mygsum(n)
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_send( mygsum(1), plen=ntile, to_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
call mpp_recv( nbrgsum(1), glen=size(domain%list(m)%x(:)), from_pe=domain%list(m)%pe, tag=COMM_TAG_1)
do n = 1, size(domain%list(m)%x(:))
gsum(domain%list(m)%tile_id(n)) = nbrgsum(n)
end do
end if
end do
mpp_global_sum_i4_5d = sum(gsum(1:domain%ntiles))
end if
end if
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(tile) = sum( field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:,:) )
if(tile == ntile) then
mpp_global_sum_i4_5d = sum(mygsum(1:ntile))
call mpp_sum( mpp_global_sum_i4_5d, domain%list(:)%pe )
end if
end if
return
end function mpp_global_sum_i4_5d
# 314 "../mpp/include/mpp_domains_reduce.inc" 2
!gag
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_GLOBAL_SUM_TL: global sum of forward and tangent-linear fields !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_r8_2d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
real(8), intent(inout) :: field(:,: )
real(8), intent(inout) :: field_tl(:,: )
real(8), intent(inout) :: gsum
real(8), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_r8_2d
# 330 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_r8_3d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
real(8), intent(inout) :: field(:,: ,: )
real(8), intent(inout) :: field_tl(:,: ,: )
real(8), intent(inout) :: gsum
real(8), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_r8_3d
# 338 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_r8_4d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
real(8), intent(inout) :: field(:,: ,:,: )
real(8), intent(inout) :: field_tl(:,: ,:,: )
real(8), intent(inout) :: gsum
real(8), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_r8_4d
# 346 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_r8_5d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
real(8), intent(inout) :: field(:,: ,:,:,: )
real(8), intent(inout) :: field_tl(:,: ,:,:,: )
real(8), intent(inout) :: gsum
real(8), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_r8_5d
# 354 "../mpp/include/mpp_domains_reduce.inc" 2
# 387
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_r4_2d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
real(4), intent(inout) :: field(:,: )
real(4), intent(inout) :: field_tl(:,: )
real(4), intent(inout) :: gsum
real(4), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_r4_2d
# 397 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_r4_3d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
real(4), intent(inout) :: field(:,: ,: )
real(4), intent(inout) :: field_tl(:,: ,: )
real(4), intent(inout) :: gsum
real(4), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_r4_3d
# 405 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_r4_4d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
real(4), intent(inout) :: field(:,: ,:,: )
real(4), intent(inout) :: field_tl(:,: ,:,: )
real(4), intent(inout) :: gsum
real(4), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_r4_4d
# 413 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_r4_5d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
real(4), intent(inout) :: field(:,: ,:,:,: )
real(4), intent(inout) :: field_tl(:,: ,:,:,: )
real(4), intent(inout) :: gsum
real(4), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_r4_5d
# 421 "../mpp/include/mpp_domains_reduce.inc" 2
# 455
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_i8_2d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
integer(8), intent(inout) :: field(:,: )
integer(8), intent(inout) :: field_tl(:,: )
integer(8), intent(inout) :: gsum
integer(8), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_i8_2d
# 465 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_i8_3d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
integer(8), intent(inout) :: field(:,: ,: )
integer(8), intent(inout) :: field_tl(:,: ,: )
integer(8), intent(inout) :: gsum
integer(8), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_i8_3d
# 473 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_i8_4d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
integer(8), intent(inout) :: field(:,: ,:,: )
integer(8), intent(inout) :: field_tl(:,: ,:,: )
integer(8), intent(inout) :: gsum
integer(8), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_i8_4d
# 481 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_i8_5d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
integer(8), intent(inout) :: field(:,: ,:,:,: )
integer(8), intent(inout) :: field_tl(:,: ,:,:,: )
integer(8), intent(inout) :: gsum
integer(8), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_i8_5d
# 489 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_i4_2d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
integer(4), intent(inout) :: field(:,: )
integer(4), intent(inout) :: field_tl(:,: )
integer(4), intent(inout) :: gsum
integer(4), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_i4_2d
# 498 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_i4_3d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
integer(4), intent(inout) :: field(:,: ,: )
integer(4), intent(inout) :: field_tl(:,: ,: )
integer(4), intent(inout) :: gsum
integer(4), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_i4_3d
# 506 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_i4_4d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
integer(4), intent(inout) :: field(:,: ,:,: )
integer(4), intent(inout) :: field_tl(:,: ,:,: )
integer(4), intent(inout) :: gsum
integer(4), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_i4_4d
# 514 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_tl.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_tl_i4_5d( domain, field, field_tl, gsum, gsum_tl, flags, position, tile_count )
type(domain2D), intent(in) :: domain
integer(4), intent(inout) :: field(:,: ,:,:,: )
integer(4), intent(inout) :: field_tl(:,: ,:,:,: )
integer(4), intent(inout) :: gsum
integer(4), intent(inout) :: gsum_tl
integer, intent(in), optional :: position
integer, intent(in), optional :: flags
integer, intent(in), optional :: tile_count
gsum = mpp_global_sum(domain, field, flags, position, tile_count )
gsum_tl = mpp_global_sum(domain, field_tl, flags, position, tile_count )
return
end subroutine mpp_global_sum_tl_i4_5d
# 522 "../mpp/include/mpp_domains_reduce.inc" 2
!gag
!bnc
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_GLOBAL_SUM_AD: global adjoint sum of field !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_r8_2d( domain, field, gsum_, flags, position, tile_count, overflow_check)
real(8) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
real(8), intent(inout) :: field(:,: )
real(8), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(8), dimension(:,:), allocatable :: field2D
real(8), dimension(:,:), allocatable :: global2D
real(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_r8_2d
# 538 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_r8_3d( domain, field, gsum_, flags, position, tile_count, overflow_check)
real(8) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
real(8), intent(inout) :: field(:,: ,: )
real(8), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(8), dimension(:,:), allocatable :: field2D
real(8), dimension(:,:), allocatable :: global2D
real(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ,:) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_r8_3d
# 546 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_r8_4d( domain, field, gsum_, flags, position, tile_count, overflow_check)
real(8) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
real(8), intent(inout) :: field(:,: ,:,: )
real(8), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(8), dimension(:,:), allocatable :: field2D
real(8), dimension(:,:), allocatable :: global2D
real(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ,:,:) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_r8_4d
# 554 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_r8_5d( domain, field, gsum_, flags, position, tile_count, overflow_check)
real(8) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
real(8), intent(inout) :: field(:,: ,:,:,: )
real(8), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(8), dimension(:,:), allocatable :: field2D
real(8), dimension(:,:), allocatable :: global2D
real(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ,:,:,:) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:,:) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_r8_5d
# 562 "../mpp/include/mpp_domains_reduce.inc" 2
# 595
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_r4_2d( domain, field, gsum_, flags, position, tile_count, overflow_check)
real(4) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
real(4), intent(inout) :: field(:,: )
real(4), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(4), dimension(:,:), allocatable :: field2D
real(4), dimension(:,:), allocatable :: global2D
real(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_r4_2d
# 605 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_r4_3d( domain, field, gsum_, flags, position, tile_count, overflow_check)
real(4) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
real(4), intent(inout) :: field(:,: ,: )
real(4), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(4), dimension(:,:), allocatable :: field2D
real(4), dimension(:,:), allocatable :: global2D
real(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ,:) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_r4_3d
# 613 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_r4_4d( domain, field, gsum_, flags, position, tile_count, overflow_check)
real(4) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
real(4), intent(inout) :: field(:,: ,:,: )
real(4), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(4), dimension(:,:), allocatable :: field2D
real(4), dimension(:,:), allocatable :: global2D
real(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ,:,:) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_r4_4d
# 621 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_r4_5d( domain, field, gsum_, flags, position, tile_count, overflow_check)
real(4) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
real(4), intent(inout) :: field(:,: ,:,:,: )
real(4), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
real(4), dimension(:,:), allocatable :: field2D
real(4), dimension(:,:), allocatable :: global2D
real(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ,:,:,:) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:,:) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_r4_5d
# 629 "../mpp/include/mpp_domains_reduce.inc" 2
# 663
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_i8_2d( domain, field, gsum_, flags, position, tile_count, overflow_check)
integer(8) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
integer(8), intent(inout) :: field(:,: )
integer(8), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(8), dimension(:,:), allocatable :: field2D
integer(8), dimension(:,:), allocatable :: global2D
integer(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_i8_2d
# 673 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_i8_3d( domain, field, gsum_, flags, position, tile_count, overflow_check)
integer(8) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
integer(8), intent(inout) :: field(:,: ,: )
integer(8), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(8), dimension(:,:), allocatable :: field2D
integer(8), dimension(:,:), allocatable :: global2D
integer(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ,:) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_i8_3d
# 681 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_i8_4d( domain, field, gsum_, flags, position, tile_count, overflow_check)
integer(8) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
integer(8), intent(inout) :: field(:,: ,:,: )
integer(8), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(8), dimension(:,:), allocatable :: field2D
integer(8), dimension(:,:), allocatable :: global2D
integer(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ,:,:) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_i8_4d
# 689 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_i8_5d( domain, field, gsum_, flags, position, tile_count, overflow_check)
integer(8) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
integer(8), intent(inout) :: field(:,: ,:,:,: )
integer(8), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(8), dimension(:,:), allocatable :: field2D
integer(8), dimension(:,:), allocatable :: global2D
integer(8), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ,:,:,:) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:,:) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_i8_5d
# 697 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_i4_2d( domain, field, gsum_, flags, position, tile_count, overflow_check)
integer(4) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
integer(4), intent(inout) :: field(:,: )
integer(4), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(4), dimension(:,:), allocatable :: field2D
integer(4), dimension(:,:), allocatable :: global2D
integer(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_i4_2d
# 706 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_i4_3d( domain, field, gsum_, flags, position, tile_count, overflow_check)
integer(4) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
integer(4), intent(inout) :: field(:,: ,: )
integer(4), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(4), dimension(:,:), allocatable :: field2D
integer(4), dimension(:,:), allocatable :: global2D
integer(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ,:) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_i4_3d
# 714 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_i4_4d( domain, field, gsum_, flags, position, tile_count, overflow_check)
integer(4) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
integer(4), intent(inout) :: field(:,: ,:,: )
integer(4), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(4), dimension(:,:), allocatable :: field2D
integer(4), dimension(:,:), allocatable :: global2D
integer(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ,:,:) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_i4_4d
# 722 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_sum_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_sum_ad_i4_5d( domain, field, gsum_, flags, position, tile_count, overflow_check)
integer(4) :: mpp_global_sum_i4_5d
type(domain2D), intent(in) :: domain
integer(4), intent(inout) :: field(:,: ,:,:,: )
integer(4), intent(in) :: gsum_
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical, intent(in), optional :: overflow_check
integer(4), dimension(:,:), allocatable :: field2D
integer(4), dimension(:,:), allocatable :: global2D
integer(4), dimension(MAX_TILES), save :: gsum, nbrgsum, mygsum
integer :: i,j, ioff,joff, isc, iec, jsc, jec, is, ie, js, je, ishift, jshift, ioffset, joffset
integer :: gxsize, gysize
integer :: global_flag, tile, ntile, nlist, n, list, m
if( domain%max_ntile_pe > MAX_TILES ) call mpp_error(FATAL, "MPP_GLOBAL_SUM: number of tiles is exceed MAX_TILES")
ntile = size(domain%x(:))
nlist = size(domain%list(:))
tile = 1
if(present(tile_count)) tile = tile_count
global_flag = NON_BITWISE_EXACT_SUM
if(present(flags)) global_flag = flags
call mpp_get_domain_shift(domain, ishift, jshift, position)
if( size(field,1).EQ.domain%x(tile)%compute%size+ishift .AND. size(field,2).EQ.domain%y(tile)%compute%size+jshift )then
!field is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(field,1).EQ.domain%x(tile)%memory%size+ishift .AND. size(field,2).EQ.domain%y(tile)%memory%size+jshift )then
!field is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: incoming field array must match either compute domain or data domain.' )
end if
if(domain%ntiles > MAX_TILES) call mpp_error( FATAL, &
'MPP_GLOBAL_SUM_: number of tiles on this mosaic is greater than MAXTILES')
call mpp_get_compute_domain( domain, is, ie, js, je, tile_count = tile_count )
isc = is; iec = ie + ishift; jsc = js; jec = je + jshift
call mpp_get_global_domain(domain, xsize = gxsize, ysize = gysize )
mpp_global_sum_i4_5d = 0
if( global_flag == BITWISE_EXACT_SUM )then
!this is bitwise exact across different PE counts.
allocate( field2D (isc:iec,jsc:jec) )
allocate( global2D( gxsize+ishift, gysize+jshift ) )
field2D = 0.
ioffset = domain%x(tile)%goffset*ishift; joffset = domain%y(tile)%goffset*jshift
if( tile == ntile) then
if(domain%ntiles == 1 ) then
mpp_global_sum_i4_5d = gsum_
mygsum(tile) = mpp_global_sum_i4_5d
else if( nlist == 1) then
mpp_global_sum_i4_5d = gsum_
mygsum(1:ntile) = mpp_global_sum_i4_5d
else ! need to sum by the order of tile_count
mpp_global_sum_i4_5d = gsum_
gsum(1:domain%ntiles) = mpp_global_sum_i4_5d
!--- receive data from root_pe of each tile
do list = 1, nlist - 1
m = mod( domain%pos+nlist-list, nlist )
if( domain%list(m)%pe == domain%list(m)%tile_root_pe ) then
do n = 1, size(domain%list(m)%x(:))
nbrgsum(n) = gsum(domain%list(m)%tile_id(n))
end do
call mpp_send( nbrgsum(1), plen=size(domain%list(m)%x(:)), to_pe=domain%list(m)%pe, tag=COMM_TAG_1)
end if
end do
!--- send the data to other pe if the current pe is the root pe of any tile
if( mpp_domain_is_tile_root_pe(domain) ) then
do list = 1, nlist - 1
m = mod( domain%pos+list, nlist )
call mpp_recv( mygsum(1), glen=ntile, from_pe=domain%list(m)%pe, tag=COMM_TAG_1 )
end do
end if
call mpp_sync_self()
! first fill the global sum on current pe.
do n = 1, ntile
mygsum(n) = gsum(domain%tile_id(n))
end do
end if
end if
global2D(1:gxsize+ioffset,1:gysize+joffset) = mygsum(tile)
if ( present( tile_count ) ) then
call mpp_global_field_ad( domain, field2D, global2D, position=position, tile_count=tile_count )
else
call mpp_global_field_ad( domain, field2D, global2D, position=position )
endif
do j = jsc, jec
do i = isc, iec
field(i+ioff:i+ioff,j+joff:j+joff ,:,:,:) = field2D(i,j)
end do
end do
deallocate(global2D, field2d)
else if ( global_flag == BITWISE_EFP_SUM )then
# 139
call mpp_error( FATAL, 'MPP_GLOBAL_SUM_: BITWISE_EFP_SUM is only implemented for real number, contact developer')
else !this is not bitwise-exact across different PE counts
ioffset = domain%x(tile)%loffset*ishift; joffset = domain%y(tile)%loffset*jshift
mygsum(1:ntile) = 0
if(tile == ntile) then
mpp_global_sum_i4_5d = gsum_
call mpp_sum_ad( mpp_global_sum_i4_5d, domain%list(:)%pe )
mygsum(1:ntile) = mpp_global_sum_i4_5d
end if
field(is+ioff:ie+ioff+ioffset, js+joff:je+joff+joffset ,:,:,:) = mygsum(tile)
end if
return
end subroutine mpp_global_sum_ad_i4_5d
# 730 "../mpp/include/mpp_domains_reduce.inc" 2
!bnc
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! MPP_GLOBAL_FIELD: get global field from domain field !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# 1 "../mpp/include/mpp_global_field.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_r8_2d( domain, local, global, flags, position,tile_count, default_data)
type(domain2D), intent(in) :: domain
real(8), intent(in) :: local(:,:)
real(8), intent(out) :: global(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(8), intent(in), optional :: default_data
real(8) :: local3D (size( local,1),size( local,2),1)
real(8) :: global3D(size(global,1),size(global,2),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_r8_2d
subroutine mpp_global_field2D_r8_3d( domain, local, global, flags, position, tile_count, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
real(8), intent(in) :: local(:,:,:)
real(8), intent(out) :: global(:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(8), intent(in), optional :: default_data
integer :: ishift, jshift
integer :: tile
integer :: isize, jsize
tile = 1; if(PRESENT(tile_count)) tile = tile_count
call mpp_get_domain_shift(domain, ishift, jshift, position)
! The alltoallw method requires that local and global be contiguous.
! We presume that `local` is contiguous if it matches the data domain;
! `global` is presumed to always be contiguous.
! Ideally we would use the F2015 function IS_CONTIGUOUS() to validate
! contiguity, but it is not yet suppored in many compilers.
! Also worth noting that many of the nD->3D conversion also assumes
! contiguity, so there many be other issues here.
isize = domain%x(tile)%data%size + ishift
jsize = domain%y(tile)%data%size + jshift
if ((size(local, 1) .eq. isize) .and. (size(local, 2) .eq. jsize) &
.and. use_alltoallw) then
call mpp_do_global_field_a2a(domain, local, global, tile, &
ishift, jshift, flags, default_data)
else
call mpp_do_global_field(domain, local, global, tile, &
ishift, jshift, flags, default_data)
end if
end subroutine mpp_global_field2D_r8_3d
subroutine mpp_global_field2D_r8_4d( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
real(8), intent(in) :: local(:,:,:,:)
real(8), intent(out) :: global(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(8), intent(in), optional :: default_data
real(8) :: local3D (size( local,1),size( local,2),size( local,3)*size(local,4))
real(8) :: global3D(size(global,1),size(global,2),size(global,3)*size(local,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_r8_4d
subroutine mpp_global_field2D_r8_5d( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
real(8), intent(in) :: local(:,:,:,:,:)
real(8), intent(out) :: global(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(8), intent(in), optional :: default_data
real(8) :: local3D (size( local,1),size( local,2),size( local,3)*size( local,4)*size(local,5))
real(8) :: global3D(size(global,1),size(global,2),size(global,3)*size(global,4)*size(local,5))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_r8_5d
# 749 "../mpp/include/mpp_domains_reduce.inc" 2
# 762
# 1 "../mpp/include/mpp_global_field.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_i8_2d( domain, local, global, flags, position,tile_count, default_data)
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: local(:,:)
integer(8), intent(out) :: global(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(8), intent(in), optional :: default_data
integer(8) :: local3D (size( local,1),size( local,2),1)
integer(8) :: global3D(size(global,1),size(global,2),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_i8_2d
subroutine mpp_global_field2D_i8_3d( domain, local, global, flags, position, tile_count, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: local(:,:,:)
integer(8), intent(out) :: global(:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(8), intent(in), optional :: default_data
integer :: ishift, jshift
integer :: tile
integer :: isize, jsize
tile = 1; if(PRESENT(tile_count)) tile = tile_count
call mpp_get_domain_shift(domain, ishift, jshift, position)
! The alltoallw method requires that local and global be contiguous.
! We presume that `local` is contiguous if it matches the data domain;
! `global` is presumed to always be contiguous.
! Ideally we would use the F2015 function IS_CONTIGUOUS() to validate
! contiguity, but it is not yet suppored in many compilers.
! Also worth noting that many of the nD->3D conversion also assumes
! contiguity, so there many be other issues here.
isize = domain%x(tile)%data%size + ishift
jsize = domain%y(tile)%data%size + jshift
if ((size(local, 1) .eq. isize) .and. (size(local, 2) .eq. jsize) &
.and. use_alltoallw) then
call mpp_do_global_field_a2a(domain, local, global, tile, &
ishift, jshift, flags, default_data)
else
call mpp_do_global_field(domain, local, global, tile, &
ishift, jshift, flags, default_data)
end if
end subroutine mpp_global_field2D_i8_3d
subroutine mpp_global_field2D_i8_4d( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: local(:,:,:,:)
integer(8), intent(out) :: global(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(8), intent(in), optional :: default_data
integer(8) :: local3D (size( local,1),size( local,2),size( local,3)*size(local,4))
integer(8) :: global3D(size(global,1),size(global,2),size(global,3)*size(local,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_i8_4d
subroutine mpp_global_field2D_i8_5d( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: local(:,:,:,:,:)
integer(8), intent(out) :: global(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(8), intent(in), optional :: default_data
integer(8) :: local3D (size( local,1),size( local,2),size( local,3)*size( local,4)*size(local,5))
integer(8) :: global3D(size(global,1),size(global,2),size(global,3)*size(global,4)*size(local,5))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_i8_5d
# 776 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_field.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_l8_2d( domain, local, global, flags, position,tile_count, default_data)
type(domain2D), intent(in) :: domain
logical(8), intent(in) :: local(:,:)
logical(8), intent(out) :: global(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(8), intent(in), optional :: default_data
logical(8) :: local3D (size( local,1),size( local,2),1)
logical(8) :: global3D(size(global,1),size(global,2),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_l8_2d
subroutine mpp_global_field2D_l8_3d( domain, local, global, flags, position, tile_count, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
logical(8), intent(in) :: local(:,:,:)
logical(8), intent(out) :: global(:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(8), intent(in), optional :: default_data
integer :: ishift, jshift
integer :: tile
integer :: isize, jsize
tile = 1; if(PRESENT(tile_count)) tile = tile_count
call mpp_get_domain_shift(domain, ishift, jshift, position)
! The alltoallw method requires that local and global be contiguous.
! We presume that `local` is contiguous if it matches the data domain;
! `global` is presumed to always be contiguous.
! Ideally we would use the F2015 function IS_CONTIGUOUS() to validate
! contiguity, but it is not yet suppored in many compilers.
! Also worth noting that many of the nD->3D conversion also assumes
! contiguity, so there many be other issues here.
isize = domain%x(tile)%data%size + ishift
jsize = domain%y(tile)%data%size + jshift
if ((size(local, 1) .eq. isize) .and. (size(local, 2) .eq. jsize) &
.and. use_alltoallw) then
call mpp_do_global_field_a2a(domain, local, global, tile, &
ishift, jshift, flags, default_data)
else
call mpp_do_global_field(domain, local, global, tile, &
ishift, jshift, flags, default_data)
end if
end subroutine mpp_global_field2D_l8_3d
subroutine mpp_global_field2D_l8_4d( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
logical(8), intent(in) :: local(:,:,:,:)
logical(8), intent(out) :: global(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(8), intent(in), optional :: default_data
logical(8) :: local3D (size( local,1),size( local,2),size( local,3)*size(local,4))
logical(8) :: global3D(size(global,1),size(global,2),size(global,3)*size(local,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_l8_4d
subroutine mpp_global_field2D_l8_5d( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
logical(8), intent(in) :: local(:,:,:,:,:)
logical(8), intent(out) :: global(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(8), intent(in), optional :: default_data
logical(8) :: local3D (size( local,1),size( local,2),size( local,3)*size( local,4)*size(local,5))
logical(8) :: global3D(size(global,1),size(global,2),size(global,3)*size(global,4)*size(local,5))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_l8_5d
# 788 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_field.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_r4_2d( domain, local, global, flags, position,tile_count, default_data)
type(domain2D), intent(in) :: domain
real(4), intent(in) :: local(:,:)
real(4), intent(out) :: global(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(4), intent(in), optional :: default_data
real(4) :: local3D (size( local,1),size( local,2),1)
real(4) :: global3D(size(global,1),size(global,2),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_r4_2d
subroutine mpp_global_field2D_r4_3d( domain, local, global, flags, position, tile_count, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
real(4), intent(in) :: local(:,:,:)
real(4), intent(out) :: global(:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(4), intent(in), optional :: default_data
integer :: ishift, jshift
integer :: tile
integer :: isize, jsize
tile = 1; if(PRESENT(tile_count)) tile = tile_count
call mpp_get_domain_shift(domain, ishift, jshift, position)
! The alltoallw method requires that local and global be contiguous.
! We presume that `local` is contiguous if it matches the data domain;
! `global` is presumed to always be contiguous.
! Ideally we would use the F2015 function IS_CONTIGUOUS() to validate
! contiguity, but it is not yet suppored in many compilers.
! Also worth noting that many of the nD->3D conversion also assumes
! contiguity, so there many be other issues here.
isize = domain%x(tile)%data%size + ishift
jsize = domain%y(tile)%data%size + jshift
if ((size(local, 1) .eq. isize) .and. (size(local, 2) .eq. jsize) &
.and. use_alltoallw) then
call mpp_do_global_field_a2a(domain, local, global, tile, &
ishift, jshift, flags, default_data)
else
call mpp_do_global_field(domain, local, global, tile, &
ishift, jshift, flags, default_data)
end if
end subroutine mpp_global_field2D_r4_3d
subroutine mpp_global_field2D_r4_4d( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
real(4), intent(in) :: local(:,:,:,:)
real(4), intent(out) :: global(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(4), intent(in), optional :: default_data
real(4) :: local3D (size( local,1),size( local,2),size( local,3)*size(local,4))
real(4) :: global3D(size(global,1),size(global,2),size(global,3)*size(local,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_r4_4d
subroutine mpp_global_field2D_r4_5d( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
real(4), intent(in) :: local(:,:,:,:,:)
real(4), intent(out) :: global(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(4), intent(in), optional :: default_data
real(4) :: local3D (size( local,1),size( local,2),size( local,3)*size( local,4)*size(local,5))
real(4) :: global3D(size(global,1),size(global,2),size(global,3)*size(global,4)*size(local,5))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_r4_5d
# 802 "../mpp/include/mpp_domains_reduce.inc" 2
# 816
# 1 "../mpp/include/mpp_global_field.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_i4_2d( domain, local, global, flags, position,tile_count, default_data)
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: local(:,:)
integer(4), intent(out) :: global(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(4), intent(in), optional :: default_data
integer(4) :: local3D (size( local,1),size( local,2),1)
integer(4) :: global3D(size(global,1),size(global,2),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_i4_2d
subroutine mpp_global_field2D_i4_3d( domain, local, global, flags, position, tile_count, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: local(:,:,:)
integer(4), intent(out) :: global(:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(4), intent(in), optional :: default_data
integer :: ishift, jshift
integer :: tile
integer :: isize, jsize
tile = 1; if(PRESENT(tile_count)) tile = tile_count
call mpp_get_domain_shift(domain, ishift, jshift, position)
! The alltoallw method requires that local and global be contiguous.
! We presume that `local` is contiguous if it matches the data domain;
! `global` is presumed to always be contiguous.
! Ideally we would use the F2015 function IS_CONTIGUOUS() to validate
! contiguity, but it is not yet suppored in many compilers.
! Also worth noting that many of the nD->3D conversion also assumes
! contiguity, so there many be other issues here.
isize = domain%x(tile)%data%size + ishift
jsize = domain%y(tile)%data%size + jshift
if ((size(local, 1) .eq. isize) .and. (size(local, 2) .eq. jsize) &
.and. use_alltoallw) then
call mpp_do_global_field_a2a(domain, local, global, tile, &
ishift, jshift, flags, default_data)
else
call mpp_do_global_field(domain, local, global, tile, &
ishift, jshift, flags, default_data)
end if
end subroutine mpp_global_field2D_i4_3d
subroutine mpp_global_field2D_i4_4d( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: local(:,:,:,:)
integer(4), intent(out) :: global(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(4), intent(in), optional :: default_data
integer(4) :: local3D (size( local,1),size( local,2),size( local,3)*size(local,4))
integer(4) :: global3D(size(global,1),size(global,2),size(global,3)*size(local,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_i4_4d
subroutine mpp_global_field2D_i4_5d( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: local(:,:,:,:,:)
integer(4), intent(out) :: global(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(4), intent(in), optional :: default_data
integer(4) :: local3D (size( local,1),size( local,2),size( local,3)*size( local,4)*size(local,5))
integer(4) :: global3D(size(global,1),size(global,2),size(global,3)*size(global,4)*size(local,5))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_i4_5d
# 829 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_field.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_l4_2d( domain, local, global, flags, position,tile_count, default_data)
type(domain2D), intent(in) :: domain
logical(4), intent(in) :: local(:,:)
logical(4), intent(out) :: global(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(4), intent(in), optional :: default_data
logical(4) :: local3D (size( local,1),size( local,2),1)
logical(4) :: global3D(size(global,1),size(global,2),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_l4_2d
subroutine mpp_global_field2D_l4_3d( domain, local, global, flags, position, tile_count, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
logical(4), intent(in) :: local(:,:,:)
logical(4), intent(out) :: global(:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(4), intent(in), optional :: default_data
integer :: ishift, jshift
integer :: tile
integer :: isize, jsize
tile = 1; if(PRESENT(tile_count)) tile = tile_count
call mpp_get_domain_shift(domain, ishift, jshift, position)
! The alltoallw method requires that local and global be contiguous.
! We presume that `local` is contiguous if it matches the data domain;
! `global` is presumed to always be contiguous.
! Ideally we would use the F2015 function IS_CONTIGUOUS() to validate
! contiguity, but it is not yet suppored in many compilers.
! Also worth noting that many of the nD->3D conversion also assumes
! contiguity, so there many be other issues here.
isize = domain%x(tile)%data%size + ishift
jsize = domain%y(tile)%data%size + jshift
if ((size(local, 1) .eq. isize) .and. (size(local, 2) .eq. jsize) &
.and. use_alltoallw) then
call mpp_do_global_field_a2a(domain, local, global, tile, &
ishift, jshift, flags, default_data)
else
call mpp_do_global_field(domain, local, global, tile, &
ishift, jshift, flags, default_data)
end if
end subroutine mpp_global_field2D_l4_3d
subroutine mpp_global_field2D_l4_4d( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
logical(4), intent(in) :: local(:,:,:,:)
logical(4), intent(out) :: global(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(4), intent(in), optional :: default_data
logical(4) :: local3D (size( local,1),size( local,2),size( local,3)*size(local,4))
logical(4) :: global3D(size(global,1),size(global,2),size(global,3)*size(local,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_l4_4d
subroutine mpp_global_field2D_l4_5d( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
logical(4), intent(in) :: local(:,:,:,:,:)
logical(4), intent(out) :: global(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(4), intent(in), optional :: default_data
logical(4) :: local3D (size( local,1),size( local,2),size( local,3)*size( local,4)*size(local,5))
logical(4) :: global3D(size(global,1),size(global,2),size(global,3)*size(global,4)*size(local,5))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_l4_5d
# 841 "../mpp/include/mpp_domains_reduce.inc" 2
!****************************************************
# 1 "../mpp/include/mpp_global_field_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_r8_2d_ad( domain, local, global, flags, position,tile_count, default_data)
type(domain2D), intent(in) :: domain
real(8), intent(out) :: local(:,:)
real(8), intent(in) :: global(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(8), intent(in), optional :: default_data
real(8) :: local3D (size( local,1),size( local,2),1)
real(8) :: global3D(size(global,1),size(global,2),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_r8_2d_ad
subroutine mpp_global_field2D_r8_3d_ad( domain, local, global, flags, position, tile_count, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
real(8), intent(out) :: local(:,:,:)
real(8), intent(in) :: global(:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(8), intent(in), optional :: default_data
integer :: ishift, jshift
integer :: tile
tile = 1; if(PRESENT(tile_count)) tile = tile_count
call mpp_get_domain_shift(domain, ishift, jshift, position)
call mpp_do_global_field_ad( domain, local, global, tile, ishift, jshift, flags, default_data)
end subroutine mpp_global_field2D_r8_3d_ad
subroutine mpp_global_field2D_r8_4d_ad( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
real(8), intent(out) :: local(:,:,:,:)
real(8), intent(in) :: global(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(8), intent(in), optional :: default_data
real(8) :: local3D (size( local,1),size( local,2),size( local,3)*size(local,4))
real(8) :: global3D(size(global,1),size(global,2),size(global,3)*size(local,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_r8_4d_ad
subroutine mpp_global_field2D_r8_5d_ad( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
real(8), intent(out) :: local(:,:,:,:,:)
real(8), intent(in) :: global(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(8), intent(in), optional :: default_data
real(8) :: local3D (size( local,1),size( local,2),size( local,3)*size( local,4)*size(local,5))
real(8) :: global3D(size(global,1),size(global,2),size(global,3)*size(global,4)*size(local,5))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_r8_5d_ad
# 854 "../mpp/include/mpp_domains_reduce.inc" 2
# 867
# 1 "../mpp/include/mpp_global_field_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_i8_2d_ad( domain, local, global, flags, position,tile_count, default_data)
type(domain2D), intent(in) :: domain
integer(8), intent(out) :: local(:,:)
integer(8), intent(in) :: global(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(8), intent(in), optional :: default_data
integer(8) :: local3D (size( local,1),size( local,2),1)
integer(8) :: global3D(size(global,1),size(global,2),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_i8_2d_ad
subroutine mpp_global_field2D_i8_3d_ad( domain, local, global, flags, position, tile_count, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
integer(8), intent(out) :: local(:,:,:)
integer(8), intent(in) :: global(:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(8), intent(in), optional :: default_data
integer :: ishift, jshift
integer :: tile
tile = 1; if(PRESENT(tile_count)) tile = tile_count
call mpp_get_domain_shift(domain, ishift, jshift, position)
call mpp_do_global_field_ad( domain, local, global, tile, ishift, jshift, flags, default_data)
end subroutine mpp_global_field2D_i8_3d_ad
subroutine mpp_global_field2D_i8_4d_ad( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
integer(8), intent(out) :: local(:,:,:,:)
integer(8), intent(in) :: global(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(8), intent(in), optional :: default_data
integer(8) :: local3D (size( local,1),size( local,2),size( local,3)*size(local,4))
integer(8) :: global3D(size(global,1),size(global,2),size(global,3)*size(local,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_i8_4d_ad
subroutine mpp_global_field2D_i8_5d_ad( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
integer(8), intent(out) :: local(:,:,:,:,:)
integer(8), intent(in) :: global(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(8), intent(in), optional :: default_data
integer(8) :: local3D (size( local,1),size( local,2),size( local,3)*size( local,4)*size(local,5))
integer(8) :: global3D(size(global,1),size(global,2),size(global,3)*size(global,4)*size(local,5))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_i8_5d_ad
# 881 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_field_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_l8_2d_ad( domain, local, global, flags, position,tile_count, default_data)
type(domain2D), intent(in) :: domain
logical(8), intent(out) :: local(:,:)
logical(8), intent(in) :: global(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(8), intent(in), optional :: default_data
logical(8) :: local3D (size( local,1),size( local,2),1)
logical(8) :: global3D(size(global,1),size(global,2),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_l8_2d_ad
subroutine mpp_global_field2D_l8_3d_ad( domain, local, global, flags, position, tile_count, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
logical(8), intent(out) :: local(:,:,:)
logical(8), intent(in) :: global(:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(8), intent(in), optional :: default_data
integer :: ishift, jshift
integer :: tile
tile = 1; if(PRESENT(tile_count)) tile = tile_count
call mpp_get_domain_shift(domain, ishift, jshift, position)
call mpp_do_global_field_ad( domain, local, global, tile, ishift, jshift, flags, default_data)
end subroutine mpp_global_field2D_l8_3d_ad
subroutine mpp_global_field2D_l8_4d_ad( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
logical(8), intent(out) :: local(:,:,:,:)
logical(8), intent(in) :: global(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(8), intent(in), optional :: default_data
logical(8) :: local3D (size( local,1),size( local,2),size( local,3)*size(local,4))
logical(8) :: global3D(size(global,1),size(global,2),size(global,3)*size(local,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_l8_4d_ad
subroutine mpp_global_field2D_l8_5d_ad( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
logical(8), intent(out) :: local(:,:,:,:,:)
logical(8), intent(in) :: global(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(8), intent(in), optional :: default_data
logical(8) :: local3D (size( local,1),size( local,2),size( local,3)*size( local,4)*size(local,5))
logical(8) :: global3D(size(global,1),size(global,2),size(global,3)*size(global,4)*size(local,5))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_l8_5d_ad
# 893 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_field_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_r4_2d_ad( domain, local, global, flags, position,tile_count, default_data)
type(domain2D), intent(in) :: domain
real(4), intent(out) :: local(:,:)
real(4), intent(in) :: global(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(4), intent(in), optional :: default_data
real(4) :: local3D (size( local,1),size( local,2),1)
real(4) :: global3D(size(global,1),size(global,2),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_r4_2d_ad
subroutine mpp_global_field2D_r4_3d_ad( domain, local, global, flags, position, tile_count, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
real(4), intent(out) :: local(:,:,:)
real(4), intent(in) :: global(:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(4), intent(in), optional :: default_data
integer :: ishift, jshift
integer :: tile
tile = 1; if(PRESENT(tile_count)) tile = tile_count
call mpp_get_domain_shift(domain, ishift, jshift, position)
call mpp_do_global_field_ad( domain, local, global, tile, ishift, jshift, flags, default_data)
end subroutine mpp_global_field2D_r4_3d_ad
subroutine mpp_global_field2D_r4_4d_ad( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
real(4), intent(out) :: local(:,:,:,:)
real(4), intent(in) :: global(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(4), intent(in), optional :: default_data
real(4) :: local3D (size( local,1),size( local,2),size( local,3)*size(local,4))
real(4) :: global3D(size(global,1),size(global,2),size(global,3)*size(local,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_r4_4d_ad
subroutine mpp_global_field2D_r4_5d_ad( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
real(4), intent(out) :: local(:,:,:,:,:)
real(4), intent(in) :: global(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
real(4), intent(in), optional :: default_data
real(4) :: local3D (size( local,1),size( local,2),size( local,3)*size( local,4)*size(local,5))
real(4) :: global3D(size(global,1),size(global,2),size(global,3)*size(global,4)*size(local,5))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_r4_5d_ad
# 907 "../mpp/include/mpp_domains_reduce.inc" 2
# 921
# 1 "../mpp/include/mpp_global_field_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_i4_2d_ad( domain, local, global, flags, position,tile_count, default_data)
type(domain2D), intent(in) :: domain
integer(4), intent(out) :: local(:,:)
integer(4), intent(in) :: global(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(4), intent(in), optional :: default_data
integer(4) :: local3D (size( local,1),size( local,2),1)
integer(4) :: global3D(size(global,1),size(global,2),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_i4_2d_ad
subroutine mpp_global_field2D_i4_3d_ad( domain, local, global, flags, position, tile_count, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
integer(4), intent(out) :: local(:,:,:)
integer(4), intent(in) :: global(:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(4), intent(in), optional :: default_data
integer :: ishift, jshift
integer :: tile
tile = 1; if(PRESENT(tile_count)) tile = tile_count
call mpp_get_domain_shift(domain, ishift, jshift, position)
call mpp_do_global_field_ad( domain, local, global, tile, ishift, jshift, flags, default_data)
end subroutine mpp_global_field2D_i4_3d_ad
subroutine mpp_global_field2D_i4_4d_ad( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
integer(4), intent(out) :: local(:,:,:,:)
integer(4), intent(in) :: global(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(4), intent(in), optional :: default_data
integer(4) :: local3D (size( local,1),size( local,2),size( local,3)*size(local,4))
integer(4) :: global3D(size(global,1),size(global,2),size(global,3)*size(local,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_i4_4d_ad
subroutine mpp_global_field2D_i4_5d_ad( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
integer(4), intent(out) :: local(:,:,:,:,:)
integer(4), intent(in) :: global(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
integer(4), intent(in), optional :: default_data
integer(4) :: local3D (size( local,1),size( local,2),size( local,3)*size( local,4)*size(local,5))
integer(4) :: global3D(size(global,1),size(global,2),size(global,3)*size(global,4)*size(local,5))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_i4_5d_ad
# 934 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_global_field_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_l4_2d_ad( domain, local, global, flags, position,tile_count, default_data)
type(domain2D), intent(in) :: domain
logical(4), intent(out) :: local(:,:)
logical(4), intent(in) :: global(:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(4), intent(in), optional :: default_data
logical(4) :: local3D (size( local,1),size( local,2),1)
logical(4) :: global3D(size(global,1),size(global,2),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_l4_2d_ad
subroutine mpp_global_field2D_l4_3d_ad( domain, local, global, flags, position, tile_count, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
logical(4), intent(out) :: local(:,:,:)
logical(4), intent(in) :: global(:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(4), intent(in), optional :: default_data
integer :: ishift, jshift
integer :: tile
tile = 1; if(PRESENT(tile_count)) tile = tile_count
call mpp_get_domain_shift(domain, ishift, jshift, position)
call mpp_do_global_field_ad( domain, local, global, tile, ishift, jshift, flags, default_data)
end subroutine mpp_global_field2D_l4_3d_ad
subroutine mpp_global_field2D_l4_4d_ad( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
logical(4), intent(out) :: local(:,:,:,:)
logical(4), intent(in) :: global(:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(4), intent(in), optional :: default_data
logical(4) :: local3D (size( local,1),size( local,2),size( local,3)*size(local,4))
logical(4) :: global3D(size(global,1),size(global,2),size(global,3)*size(local,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_l4_4d_ad
subroutine mpp_global_field2D_l4_5d_ad( domain, local, global, flags, position,tile_count, default_data )
type(domain2D), intent(in) :: domain
logical(4), intent(out) :: local(:,:,:,:,:)
logical(4), intent(in) :: global(:,:,:,:,:)
integer, intent(in), optional :: flags
integer, intent(in), optional :: position
integer, intent(in), optional :: tile_count
logical(4), intent(in), optional :: default_data
logical(4) :: local3D (size( local,1),size( local,2),size( local,3)*size( local,4)*size(local,5))
logical(4) :: global3D(size(global,1),size(global,2),size(global,3)*size(global,4)*size(local,5))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_ad( domain, local3D, global3D, flags, position,tile_count, default_data )
end subroutine mpp_global_field2D_l4_5d_ad
# 946 "../mpp/include/mpp_domains_reduce.inc" 2
!****************************************************
# 1 "../mpp/include/mpp_do_global_field.h" 1
subroutine mpp_do_global_field2D_r8_3d( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
real(8), intent(in) :: local(:,:,:)
integer, intent(in) :: tile, ishift, jshift
real(8), intent(out) :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
real(8), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
real(8) :: clocal ((domain%x(1)%compute%size+ishift) *(domain%y(1)%compute%size+jshift) *size(local,3))
real(8) :: cremote((domain%x(1)%compute%max_size+ishift)*(domain%y(1)%compute%max_size+jshift)*size(local,3))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
nword_me = (iec-isc+1)*(jec-jsc+1)*ke
! make contiguous array from compute domain
m = 0
if(global_on_this_pe) then
!z1l: initialize global = 0 to support mask domain
if(PRESENT(default_data)) then
global = default_data
else
# 104
global = 0
endif
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = local(i+ioff,j+joff,k)
global(i+ipos,j+jpos,k) = clocal(m) !always fill local domain directly
end do
end do
end do
else
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = local(i+ioff,j+joff,k)
end do
end do
end do
endif
! if there is more than one tile on this pe, then no decomposition for all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
!fill off-domains (note loops begin at an offset of 1)
if( xonly )then
nd = size(domain%x(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%x(1)%pos+nd-n,nd)
rpos = mod(domain%x(1)%pos +n,nd)
from_pe = domain%x(1)%list(rpos)%pe
rpos = from_pe - root_pe ! for concurrent run, root_pe may not be 0.
if (from_pe == NULL_PE) then
nwords = 0
else
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
endif
! Force use of scalar, integer ptr interface
call mpp_transmit( put_data=clocal(1), plen=nword_me, to_pe=domain%x(1)%list(lpos)%pe, &
get_data=cremote(1), glen=nwords, from_pe=from_pe )
m = 0
if (from_pe /= NULL_PE) then
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
do k = 1, ke
do j = jsc, jec
do i = is, ie
m = m + 1
global(i,j+jpos,k) = cremote(m)
end do
end do
end do
endif
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else if( yonly )then
nd = size(domain%y(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%y(1)%pos+nd-n,nd)
rpos = mod(domain%y(1)%pos +n,nd)
from_pe = domain%y(1)%list(rpos)%pe
rpos = from_pe - root_pe
if (from_pe == NULL_PE) then
nwords = 0
else
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
endif
! Force use of scalar, integer pointer interface
call mpp_transmit( put_data=clocal(1), plen=nword_me, to_pe=domain%y(1)%list(lpos)%pe, &
get_data=cremote(1), glen=nwords, from_pe=from_pe )
m = 0
if (from_pe /= NULL_PE) then
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = isc, iec
m = m + 1
global(i+ipos,j,k) = cremote(m)
end do
end do
end do
endif
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else
tile_id = domain%tile_id(1)
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
call mpp_recv(cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
global(i,j,k) = cremote(m)
end do
end do
end do
end do
endif
else
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id(1).NE. tile_id ) cycle ! global field only within tile
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle ! global field only within tile
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
call mpp_recv( cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
global(i,j,k) = cremote(m)
end do
end do
end do
end do
endif
end if
call mpp_sync_self()
return
end subroutine mpp_do_global_field2D_r8_3d
subroutine mpp_do_global_field2D_a2a_r8_3d( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
integer, intent(in) :: tile, ishift, jshift
real(8), intent(in), contiguous, target :: local(:,:,:)
real(8), intent(out), contiguous, target :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
real(8), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
! Alltoallw vectors
real(8), dimension(:), pointer :: plocal, pglobal
integer, dimension(:), allocatable :: sendcounts(:), recvcounts(:)
integer, dimension(:), allocatable :: sdispls(:), rdispls(:)
type(mpp_type), allocatable :: sendtypes(:), recvtypes(:)
integer, dimension(3) :: array_of_subsizes, array_of_starts
integer :: n_sends, n_ax, pe
integer :: isg, jsg
integer, allocatable :: pelist(:), axis_pelist(:), pelist_idx(:)
if (.NOT.module_is_initialized) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
! Validate flag consistency and configure the function
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
! Calculate offset for truncated global fields
! NOTE: We do not check contiguity of global subarrays, and assume that
! they have been copied to a contigous array.
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
! NOTE: Since local is assumed to contiguously match the data domain, this
! is not a useful check. But maybe someday we can support compute
! domains.
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin
joff = -domain%y(tile)%compute%begin
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin
joff = -domain%y(tile)%data%begin
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
isg = domain%x(1)%global%begin; jsg = domain%y(1)%global%begin
if(global_on_this_pe) then
!z1l: initialize global = 0 to support mask domain
if(PRESENT(default_data)) then
global = default_data
else
# 360
global = 0
endif
endif
! if there is more than one tile on this pe, then no decomposition for
! all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
! Generate the pelist
! TODO: Add these to the domain API
if (xonly) then
n_ax = size(domain%x(1)%list(:))
allocate(axis_pelist(n_ax))
axis_pelist = [ (domain%x(1)%list(i)%pe, i = 0, n_ax-1) ]
nd = count(axis_pelist >= 0)
allocate(pelist(nd), pelist_idx(0:nd-1))
pelist = pack(axis_pelist, mask=(axis_pelist >= 0))
pelist_idx = pack([(i, i=0, n_ax-1)], mask=(axis_pelist >= 0))
deallocate(axis_pelist)
else if (yonly) then
n_ax = size(domain%y(1)%list(:))
allocate(axis_pelist(n_ax))
axis_pelist = [ (domain%y(1)%list(i)%pe, i = 0, n_ax-1) ]
nd = count(axis_pelist >= 0)
allocate(pelist(nd), pelist_idx(0:nd-1))
pelist = pack(axis_pelist, mask=(axis_pelist >= 0))
pelist_idx = pack([(i, i=0, n_ax-1)], mask=(axis_pelist >= 0))
deallocate(axis_pelist)
else
nd = size(domain%list(:))
allocate(pelist(nd), pelist_idx(0:nd-1))
call mpp_get_pelist(domain, pelist)
pelist_idx = [ (i, i=0, nd-1) ]
end if
! Allocate message data buffers
allocate(sendcounts(0:nd-1))
allocate(sdispls(0:nd-1))
allocate(sendtypes(0:nd-1))
sendcounts(:) = 0
sdispls(:) = 0
sendtypes(:) = mpp_byte
allocate(recvcounts(0:nd-1))
allocate(rdispls(0:nd-1))
allocate(recvtypes(0:nd-1))
recvcounts(:) = 0
rdispls(:) = 0
recvtypes(:) = mpp_byte
array_of_subsizes = [iec - isc + 1, jec - jsc + 1, size(local, 3)]
array_of_starts = [isc + ioff, jsc + joff, 0]
n_sends = merge(1, nd, root_only) ! 1 if root_only else nd
do n = 0, n_sends - 1
sendcounts(n) = 1
call mpp_type_create( &
local, &
array_of_subsizes, &
array_of_starts, &
sendtypes(n) &
)
end do
! Receive configuration
if (global_on_this_pe) then
do n = 0, nd - 1
recvcounts(n) = 1
pe = pelist_idx(n)
if (xonly) then
is = domain%x(1)%list(pe)%compute%begin
ie = domain%x(1)%list(pe)%compute%end + ishift
js = jsc; je = jec
else if (yonly) then
is = isc; ie = iec
js = domain%y(1)%list(pe)%compute%begin
je = domain%y(1)%list(pe)%compute%end + jshift
else
is = domain%list(pe)%x(1)%compute%begin
ie = domain%list(pe)%x(1)%compute%end + ishift
js = domain%list(pe)%y(1)%compute%begin
je = domain%list(pe)%y(1)%compute%end + jshift
end if
array_of_subsizes = [ie - is + 1, je - js + 1, ke]
array_of_starts = [is - isg + ipos, js - jsg + jpos, 0]
call mpp_type_create( &
global, &
array_of_subsizes, &
array_of_starts, &
recvtypes(n) &
)
end do
end if
plocal(1:size(local)) => local
pglobal(1:size(global)) => global
call mpp_alltoall(plocal, sendcounts, sdispls, sendtypes, &
pglobal, recvcounts, rdispls, recvtypes, &
pelist)
plocal => null()
pglobal => null()
! Cleanup
deallocate(pelist)
deallocate(sendcounts, sdispls, sendtypes)
deallocate(recvcounts, rdispls, recvtypes)
call mpp_sync_self()
end subroutine mpp_do_global_field2D_a2a_r8_3d
# 955 "../mpp/include/mpp_domains_reduce.inc" 2
# 964
# 1 "../mpp/include/mpp_do_global_field.h" 1
subroutine mpp_do_global_field2D_i8_3d( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
integer(8), intent(in) :: local(:,:,:)
integer, intent(in) :: tile, ishift, jshift
integer(8), intent(out) :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
integer(8), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
integer(8) :: clocal ((domain%x(1)%compute%size+ishift) *(domain%y(1)%compute%size+jshift) *size(local,3))
integer(8) :: cremote((domain%x(1)%compute%max_size+ishift)*(domain%y(1)%compute%max_size+jshift)*size(local,3))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
nword_me = (iec-isc+1)*(jec-jsc+1)*ke
! make contiguous array from compute domain
m = 0
if(global_on_this_pe) then
!z1l: initialize global = 0 to support mask domain
if(PRESENT(default_data)) then
global = default_data
else
# 104
global = 0
endif
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = local(i+ioff,j+joff,k)
global(i+ipos,j+jpos,k) = clocal(m) !always fill local domain directly
end do
end do
end do
else
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = local(i+ioff,j+joff,k)
end do
end do
end do
endif
! if there is more than one tile on this pe, then no decomposition for all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
!fill off-domains (note loops begin at an offset of 1)
if( xonly )then
nd = size(domain%x(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%x(1)%pos+nd-n,nd)
rpos = mod(domain%x(1)%pos +n,nd)
from_pe = domain%x(1)%list(rpos)%pe
rpos = from_pe - root_pe ! for concurrent run, root_pe may not be 0.
if (from_pe == NULL_PE) then
nwords = 0
else
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
endif
! Force use of scalar, integer ptr interface
call mpp_transmit( put_data=clocal(1), plen=nword_me, to_pe=domain%x(1)%list(lpos)%pe, &
get_data=cremote(1), glen=nwords, from_pe=from_pe )
m = 0
if (from_pe /= NULL_PE) then
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
do k = 1, ke
do j = jsc, jec
do i = is, ie
m = m + 1
global(i,j+jpos,k) = cremote(m)
end do
end do
end do
endif
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else if( yonly )then
nd = size(domain%y(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%y(1)%pos+nd-n,nd)
rpos = mod(domain%y(1)%pos +n,nd)
from_pe = domain%y(1)%list(rpos)%pe
rpos = from_pe - root_pe
if (from_pe == NULL_PE) then
nwords = 0
else
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
endif
! Force use of scalar, integer pointer interface
call mpp_transmit( put_data=clocal(1), plen=nword_me, to_pe=domain%y(1)%list(lpos)%pe, &
get_data=cremote(1), glen=nwords, from_pe=from_pe )
m = 0
if (from_pe /= NULL_PE) then
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = isc, iec
m = m + 1
global(i+ipos,j,k) = cremote(m)
end do
end do
end do
endif
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else
tile_id = domain%tile_id(1)
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
call mpp_recv(cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
global(i,j,k) = cremote(m)
end do
end do
end do
end do
endif
else
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id(1).NE. tile_id ) cycle ! global field only within tile
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle ! global field only within tile
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
call mpp_recv( cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
global(i,j,k) = cremote(m)
end do
end do
end do
end do
endif
end if
call mpp_sync_self()
return
end subroutine mpp_do_global_field2D_i8_3d
subroutine mpp_do_global_field2D_a2a_i8_3d( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
integer, intent(in) :: tile, ishift, jshift
integer(8), intent(in), contiguous, target :: local(:,:,:)
integer(8), intent(out), contiguous, target :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
integer(8), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
! Alltoallw vectors
integer(8), dimension(:), pointer :: plocal, pglobal
integer, dimension(:), allocatable :: sendcounts(:), recvcounts(:)
integer, dimension(:), allocatable :: sdispls(:), rdispls(:)
type(mpp_type), allocatable :: sendtypes(:), recvtypes(:)
integer, dimension(3) :: array_of_subsizes, array_of_starts
integer :: n_sends, n_ax, pe
integer :: isg, jsg
integer, allocatable :: pelist(:), axis_pelist(:), pelist_idx(:)
if (.NOT.module_is_initialized) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
! Validate flag consistency and configure the function
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
! Calculate offset for truncated global fields
! NOTE: We do not check contiguity of global subarrays, and assume that
! they have been copied to a contigous array.
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
! NOTE: Since local is assumed to contiguously match the data domain, this
! is not a useful check. But maybe someday we can support compute
! domains.
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin
joff = -domain%y(tile)%compute%begin
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin
joff = -domain%y(tile)%data%begin
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
isg = domain%x(1)%global%begin; jsg = domain%y(1)%global%begin
if(global_on_this_pe) then
!z1l: initialize global = 0 to support mask domain
if(PRESENT(default_data)) then
global = default_data
else
# 360
global = 0
endif
endif
! if there is more than one tile on this pe, then no decomposition for
! all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
! Generate the pelist
! TODO: Add these to the domain API
if (xonly) then
n_ax = size(domain%x(1)%list(:))
allocate(axis_pelist(n_ax))
axis_pelist = [ (domain%x(1)%list(i)%pe, i = 0, n_ax-1) ]
nd = count(axis_pelist >= 0)
allocate(pelist(nd), pelist_idx(0:nd-1))
pelist = pack(axis_pelist, mask=(axis_pelist >= 0))
pelist_idx = pack([(i, i=0, n_ax-1)], mask=(axis_pelist >= 0))
deallocate(axis_pelist)
else if (yonly) then
n_ax = size(domain%y(1)%list(:))
allocate(axis_pelist(n_ax))
axis_pelist = [ (domain%y(1)%list(i)%pe, i = 0, n_ax-1) ]
nd = count(axis_pelist >= 0)
allocate(pelist(nd), pelist_idx(0:nd-1))
pelist = pack(axis_pelist, mask=(axis_pelist >= 0))
pelist_idx = pack([(i, i=0, n_ax-1)], mask=(axis_pelist >= 0))
deallocate(axis_pelist)
else
nd = size(domain%list(:))
allocate(pelist(nd), pelist_idx(0:nd-1))
call mpp_get_pelist(domain, pelist)
pelist_idx = [ (i, i=0, nd-1) ]
end if
! Allocate message data buffers
allocate(sendcounts(0:nd-1))
allocate(sdispls(0:nd-1))
allocate(sendtypes(0:nd-1))
sendcounts(:) = 0
sdispls(:) = 0
sendtypes(:) = mpp_byte
allocate(recvcounts(0:nd-1))
allocate(rdispls(0:nd-1))
allocate(recvtypes(0:nd-1))
recvcounts(:) = 0
rdispls(:) = 0
recvtypes(:) = mpp_byte
array_of_subsizes = [iec - isc + 1, jec - jsc + 1, size(local, 3)]
array_of_starts = [isc + ioff, jsc + joff, 0]
n_sends = merge(1, nd, root_only) ! 1 if root_only else nd
do n = 0, n_sends - 1
sendcounts(n) = 1
call mpp_type_create( &
local, &
array_of_subsizes, &
array_of_starts, &
sendtypes(n) &
)
end do
! Receive configuration
if (global_on_this_pe) then
do n = 0, nd - 1
recvcounts(n) = 1
pe = pelist_idx(n)
if (xonly) then
is = domain%x(1)%list(pe)%compute%begin
ie = domain%x(1)%list(pe)%compute%end + ishift
js = jsc; je = jec
else if (yonly) then
is = isc; ie = iec
js = domain%y(1)%list(pe)%compute%begin
je = domain%y(1)%list(pe)%compute%end + jshift
else
is = domain%list(pe)%x(1)%compute%begin
ie = domain%list(pe)%x(1)%compute%end + ishift
js = domain%list(pe)%y(1)%compute%begin
je = domain%list(pe)%y(1)%compute%end + jshift
end if
array_of_subsizes = [ie - is + 1, je - js + 1, ke]
array_of_starts = [is - isg + ipos, js - jsg + jpos, 0]
call mpp_type_create( &
global, &
array_of_subsizes, &
array_of_starts, &
recvtypes(n) &
)
end do
end if
plocal(1:size(local)) => local
pglobal(1:size(global)) => global
call mpp_alltoall(plocal, sendcounts, sdispls, sendtypes, &
pglobal, recvcounts, rdispls, recvtypes, &
pelist)
plocal => null()
pglobal => null()
! Cleanup
deallocate(pelist)
deallocate(sendcounts, sdispls, sendtypes)
deallocate(recvcounts, rdispls, recvtypes)
call mpp_sync_self()
end subroutine mpp_do_global_field2D_a2a_i8_3d
# 974 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_do_global_field.h" 1
subroutine mpp_do_global_field2D_l8_3d( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
logical(8), intent(in) :: local(:,:,:)
integer, intent(in) :: tile, ishift, jshift
logical(8), intent(out) :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
logical(8), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
logical(8) :: clocal ((domain%x(1)%compute%size+ishift) *(domain%y(1)%compute%size+jshift) *size(local,3))
logical(8) :: cremote((domain%x(1)%compute%max_size+ishift)*(domain%y(1)%compute%max_size+jshift)*size(local,3))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
nword_me = (iec-isc+1)*(jec-jsc+1)*ke
! make contiguous array from compute domain
m = 0
if(global_on_this_pe) then
!z1l: initialize global = 0 to support mask domain
if(PRESENT(default_data)) then
global = default_data
else
global = .false.
# 106
endif
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = local(i+ioff,j+joff,k)
global(i+ipos,j+jpos,k) = clocal(m) !always fill local domain directly
end do
end do
end do
else
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = local(i+ioff,j+joff,k)
end do
end do
end do
endif
! if there is more than one tile on this pe, then no decomposition for all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
!fill off-domains (note loops begin at an offset of 1)
if( xonly )then
nd = size(domain%x(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%x(1)%pos+nd-n,nd)
rpos = mod(domain%x(1)%pos +n,nd)
from_pe = domain%x(1)%list(rpos)%pe
rpos = from_pe - root_pe ! for concurrent run, root_pe may not be 0.
if (from_pe == NULL_PE) then
nwords = 0
else
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
endif
! Force use of scalar, integer ptr interface
call mpp_transmit( put_data=clocal(1), plen=nword_me, to_pe=domain%x(1)%list(lpos)%pe, &
get_data=cremote(1), glen=nwords, from_pe=from_pe )
m = 0
if (from_pe /= NULL_PE) then
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
do k = 1, ke
do j = jsc, jec
do i = is, ie
m = m + 1
global(i,j+jpos,k) = cremote(m)
end do
end do
end do
endif
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else if( yonly )then
nd = size(domain%y(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%y(1)%pos+nd-n,nd)
rpos = mod(domain%y(1)%pos +n,nd)
from_pe = domain%y(1)%list(rpos)%pe
rpos = from_pe - root_pe
if (from_pe == NULL_PE) then
nwords = 0
else
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
endif
! Force use of scalar, integer pointer interface
call mpp_transmit( put_data=clocal(1), plen=nword_me, to_pe=domain%y(1)%list(lpos)%pe, &
get_data=cremote(1), glen=nwords, from_pe=from_pe )
m = 0
if (from_pe /= NULL_PE) then
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = isc, iec
m = m + 1
global(i+ipos,j,k) = cremote(m)
end do
end do
end do
endif
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else
tile_id = domain%tile_id(1)
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
call mpp_recv(cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
global(i,j,k) = cremote(m)
end do
end do
end do
end do
endif
else
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id(1).NE. tile_id ) cycle ! global field only within tile
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle ! global field only within tile
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
call mpp_recv( cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
global(i,j,k) = cremote(m)
end do
end do
end do
end do
endif
end if
call mpp_sync_self()
return
end subroutine mpp_do_global_field2D_l8_3d
subroutine mpp_do_global_field2D_a2a_l8_3d( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
integer, intent(in) :: tile, ishift, jshift
logical(8), intent(in), contiguous, target :: local(:,:,:)
logical(8), intent(out), contiguous, target :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
logical(8), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
! Alltoallw vectors
logical(8), dimension(:), pointer :: plocal, pglobal
integer, dimension(:), allocatable :: sendcounts(:), recvcounts(:)
integer, dimension(:), allocatable :: sdispls(:), rdispls(:)
type(mpp_type), allocatable :: sendtypes(:), recvtypes(:)
integer, dimension(3) :: array_of_subsizes, array_of_starts
integer :: n_sends, n_ax, pe
integer :: isg, jsg
integer, allocatable :: pelist(:), axis_pelist(:), pelist_idx(:)
if (.NOT.module_is_initialized) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
! Validate flag consistency and configure the function
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
! Calculate offset for truncated global fields
! NOTE: We do not check contiguity of global subarrays, and assume that
! they have been copied to a contigous array.
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
! NOTE: Since local is assumed to contiguously match the data domain, this
! is not a useful check. But maybe someday we can support compute
! domains.
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin
joff = -domain%y(tile)%compute%begin
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin
joff = -domain%y(tile)%data%begin
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
isg = domain%x(1)%global%begin; jsg = domain%y(1)%global%begin
if(global_on_this_pe) then
!z1l: initialize global = 0 to support mask domain
if(PRESENT(default_data)) then
global = default_data
else
global = .false.
# 362
endif
endif
! if there is more than one tile on this pe, then no decomposition for
! all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
! Generate the pelist
! TODO: Add these to the domain API
if (xonly) then
n_ax = size(domain%x(1)%list(:))
allocate(axis_pelist(n_ax))
axis_pelist = [ (domain%x(1)%list(i)%pe, i = 0, n_ax-1) ]
nd = count(axis_pelist >= 0)
allocate(pelist(nd), pelist_idx(0:nd-1))
pelist = pack(axis_pelist, mask=(axis_pelist >= 0))
pelist_idx = pack([(i, i=0, n_ax-1)], mask=(axis_pelist >= 0))
deallocate(axis_pelist)
else if (yonly) then
n_ax = size(domain%y(1)%list(:))
allocate(axis_pelist(n_ax))
axis_pelist = [ (domain%y(1)%list(i)%pe, i = 0, n_ax-1) ]
nd = count(axis_pelist >= 0)
allocate(pelist(nd), pelist_idx(0:nd-1))
pelist = pack(axis_pelist, mask=(axis_pelist >= 0))
pelist_idx = pack([(i, i=0, n_ax-1)], mask=(axis_pelist >= 0))
deallocate(axis_pelist)
else
nd = size(domain%list(:))
allocate(pelist(nd), pelist_idx(0:nd-1))
call mpp_get_pelist(domain, pelist)
pelist_idx = [ (i, i=0, nd-1) ]
end if
! Allocate message data buffers
allocate(sendcounts(0:nd-1))
allocate(sdispls(0:nd-1))
allocate(sendtypes(0:nd-1))
sendcounts(:) = 0
sdispls(:) = 0
sendtypes(:) = mpp_byte
allocate(recvcounts(0:nd-1))
allocate(rdispls(0:nd-1))
allocate(recvtypes(0:nd-1))
recvcounts(:) = 0
rdispls(:) = 0
recvtypes(:) = mpp_byte
array_of_subsizes = [iec - isc + 1, jec - jsc + 1, size(local, 3)]
array_of_starts = [isc + ioff, jsc + joff, 0]
n_sends = merge(1, nd, root_only) ! 1 if root_only else nd
do n = 0, n_sends - 1
sendcounts(n) = 1
call mpp_type_create( &
local, &
array_of_subsizes, &
array_of_starts, &
sendtypes(n) &
)
end do
! Receive configuration
if (global_on_this_pe) then
do n = 0, nd - 1
recvcounts(n) = 1
pe = pelist_idx(n)
if (xonly) then
is = domain%x(1)%list(pe)%compute%begin
ie = domain%x(1)%list(pe)%compute%end + ishift
js = jsc; je = jec
else if (yonly) then
is = isc; ie = iec
js = domain%y(1)%list(pe)%compute%begin
je = domain%y(1)%list(pe)%compute%end + jshift
else
is = domain%list(pe)%x(1)%compute%begin
ie = domain%list(pe)%x(1)%compute%end + ishift
js = domain%list(pe)%y(1)%compute%begin
je = domain%list(pe)%y(1)%compute%end + jshift
end if
array_of_subsizes = [ie - is + 1, je - js + 1, ke]
array_of_starts = [is - isg + ipos, js - jsg + jpos, 0]
call mpp_type_create( &
global, &
array_of_subsizes, &
array_of_starts, &
recvtypes(n) &
)
end do
end if
plocal(1:size(local)) => local
pglobal(1:size(global)) => global
call mpp_alltoall(plocal, sendcounts, sdispls, sendtypes, &
pglobal, recvcounts, rdispls, recvtypes, &
pelist)
plocal => null()
pglobal => null()
! Cleanup
deallocate(pelist)
deallocate(sendcounts, sdispls, sendtypes)
deallocate(recvcounts, rdispls, recvtypes)
call mpp_sync_self()
end subroutine mpp_do_global_field2D_a2a_l8_3d
# 983 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_do_global_field.h" 1
subroutine mpp_do_global_field2D_r4_3d( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
real(4), intent(in) :: local(:,:,:)
integer, intent(in) :: tile, ishift, jshift
real(4), intent(out) :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
real(4), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
real(4) :: clocal ((domain%x(1)%compute%size+ishift) *(domain%y(1)%compute%size+jshift) *size(local,3))
real(4) :: cremote((domain%x(1)%compute%max_size+ishift)*(domain%y(1)%compute%max_size+jshift)*size(local,3))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
nword_me = (iec-isc+1)*(jec-jsc+1)*ke
! make contiguous array from compute domain
m = 0
if(global_on_this_pe) then
!z1l: initialize global = 0 to support mask domain
if(PRESENT(default_data)) then
global = default_data
else
# 104
global = 0
endif
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = local(i+ioff,j+joff,k)
global(i+ipos,j+jpos,k) = clocal(m) !always fill local domain directly
end do
end do
end do
else
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = local(i+ioff,j+joff,k)
end do
end do
end do
endif
! if there is more than one tile on this pe, then no decomposition for all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
!fill off-domains (note loops begin at an offset of 1)
if( xonly )then
nd = size(domain%x(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%x(1)%pos+nd-n,nd)
rpos = mod(domain%x(1)%pos +n,nd)
from_pe = domain%x(1)%list(rpos)%pe
rpos = from_pe - root_pe ! for concurrent run, root_pe may not be 0.
if (from_pe == NULL_PE) then
nwords = 0
else
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
endif
! Force use of scalar, integer ptr interface
call mpp_transmit( put_data=clocal(1), plen=nword_me, to_pe=domain%x(1)%list(lpos)%pe, &
get_data=cremote(1), glen=nwords, from_pe=from_pe )
m = 0
if (from_pe /= NULL_PE) then
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
do k = 1, ke
do j = jsc, jec
do i = is, ie
m = m + 1
global(i,j+jpos,k) = cremote(m)
end do
end do
end do
endif
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else if( yonly )then
nd = size(domain%y(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%y(1)%pos+nd-n,nd)
rpos = mod(domain%y(1)%pos +n,nd)
from_pe = domain%y(1)%list(rpos)%pe
rpos = from_pe - root_pe
if (from_pe == NULL_PE) then
nwords = 0
else
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
endif
! Force use of scalar, integer pointer interface
call mpp_transmit( put_data=clocal(1), plen=nword_me, to_pe=domain%y(1)%list(lpos)%pe, &
get_data=cremote(1), glen=nwords, from_pe=from_pe )
m = 0
if (from_pe /= NULL_PE) then
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = isc, iec
m = m + 1
global(i+ipos,j,k) = cremote(m)
end do
end do
end do
endif
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else
tile_id = domain%tile_id(1)
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
call mpp_recv(cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
global(i,j,k) = cremote(m)
end do
end do
end do
end do
endif
else
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id(1).NE. tile_id ) cycle ! global field only within tile
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle ! global field only within tile
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
call mpp_recv( cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
global(i,j,k) = cremote(m)
end do
end do
end do
end do
endif
end if
call mpp_sync_self()
return
end subroutine mpp_do_global_field2D_r4_3d
subroutine mpp_do_global_field2D_a2a_r4_3d( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
integer, intent(in) :: tile, ishift, jshift
real(4), intent(in), contiguous, target :: local(:,:,:)
real(4), intent(out), contiguous, target :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
real(4), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
! Alltoallw vectors
real(4), dimension(:), pointer :: plocal, pglobal
integer, dimension(:), allocatable :: sendcounts(:), recvcounts(:)
integer, dimension(:), allocatable :: sdispls(:), rdispls(:)
type(mpp_type), allocatable :: sendtypes(:), recvtypes(:)
integer, dimension(3) :: array_of_subsizes, array_of_starts
integer :: n_sends, n_ax, pe
integer :: isg, jsg
integer, allocatable :: pelist(:), axis_pelist(:), pelist_idx(:)
if (.NOT.module_is_initialized) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
! Validate flag consistency and configure the function
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
! Calculate offset for truncated global fields
! NOTE: We do not check contiguity of global subarrays, and assume that
! they have been copied to a contigous array.
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
! NOTE: Since local is assumed to contiguously match the data domain, this
! is not a useful check. But maybe someday we can support compute
! domains.
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin
joff = -domain%y(tile)%compute%begin
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin
joff = -domain%y(tile)%data%begin
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
isg = domain%x(1)%global%begin; jsg = domain%y(1)%global%begin
if(global_on_this_pe) then
!z1l: initialize global = 0 to support mask domain
if(PRESENT(default_data)) then
global = default_data
else
# 360
global = 0
endif
endif
! if there is more than one tile on this pe, then no decomposition for
! all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
! Generate the pelist
! TODO: Add these to the domain API
if (xonly) then
n_ax = size(domain%x(1)%list(:))
allocate(axis_pelist(n_ax))
axis_pelist = [ (domain%x(1)%list(i)%pe, i = 0, n_ax-1) ]
nd = count(axis_pelist >= 0)
allocate(pelist(nd), pelist_idx(0:nd-1))
pelist = pack(axis_pelist, mask=(axis_pelist >= 0))
pelist_idx = pack([(i, i=0, n_ax-1)], mask=(axis_pelist >= 0))
deallocate(axis_pelist)
else if (yonly) then
n_ax = size(domain%y(1)%list(:))
allocate(axis_pelist(n_ax))
axis_pelist = [ (domain%y(1)%list(i)%pe, i = 0, n_ax-1) ]
nd = count(axis_pelist >= 0)
allocate(pelist(nd), pelist_idx(0:nd-1))
pelist = pack(axis_pelist, mask=(axis_pelist >= 0))
pelist_idx = pack([(i, i=0, n_ax-1)], mask=(axis_pelist >= 0))
deallocate(axis_pelist)
else
nd = size(domain%list(:))
allocate(pelist(nd), pelist_idx(0:nd-1))
call mpp_get_pelist(domain, pelist)
pelist_idx = [ (i, i=0, nd-1) ]
end if
! Allocate message data buffers
allocate(sendcounts(0:nd-1))
allocate(sdispls(0:nd-1))
allocate(sendtypes(0:nd-1))
sendcounts(:) = 0
sdispls(:) = 0
sendtypes(:) = mpp_byte
allocate(recvcounts(0:nd-1))
allocate(rdispls(0:nd-1))
allocate(recvtypes(0:nd-1))
recvcounts(:) = 0
rdispls(:) = 0
recvtypes(:) = mpp_byte
array_of_subsizes = [iec - isc + 1, jec - jsc + 1, size(local, 3)]
array_of_starts = [isc + ioff, jsc + joff, 0]
n_sends = merge(1, nd, root_only) ! 1 if root_only else nd
do n = 0, n_sends - 1
sendcounts(n) = 1
call mpp_type_create( &
local, &
array_of_subsizes, &
array_of_starts, &
sendtypes(n) &
)
end do
! Receive configuration
if (global_on_this_pe) then
do n = 0, nd - 1
recvcounts(n) = 1
pe = pelist_idx(n)
if (xonly) then
is = domain%x(1)%list(pe)%compute%begin
ie = domain%x(1)%list(pe)%compute%end + ishift
js = jsc; je = jec
else if (yonly) then
is = isc; ie = iec
js = domain%y(1)%list(pe)%compute%begin
je = domain%y(1)%list(pe)%compute%end + jshift
else
is = domain%list(pe)%x(1)%compute%begin
ie = domain%list(pe)%x(1)%compute%end + ishift
js = domain%list(pe)%y(1)%compute%begin
je = domain%list(pe)%y(1)%compute%end + jshift
end if
array_of_subsizes = [ie - is + 1, je - js + 1, ke]
array_of_starts = [is - isg + ipos, js - jsg + jpos, 0]
call mpp_type_create( &
global, &
array_of_subsizes, &
array_of_starts, &
recvtypes(n) &
)
end do
end if
plocal(1:size(local)) => local
pglobal(1:size(global)) => global
call mpp_alltoall(plocal, sendcounts, sdispls, sendtypes, &
pglobal, recvcounts, rdispls, recvtypes, &
pelist)
plocal => null()
pglobal => null()
! Cleanup
deallocate(pelist)
deallocate(sendcounts, sdispls, sendtypes)
deallocate(recvcounts, rdispls, recvtypes)
call mpp_sync_self()
end subroutine mpp_do_global_field2D_a2a_r4_3d
# 994 "../mpp/include/mpp_domains_reduce.inc" 2
# 1004
# 1 "../mpp/include/mpp_do_global_field.h" 1
subroutine mpp_do_global_field2D_i4_3d( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
integer(4), intent(in) :: local(:,:,:)
integer, intent(in) :: tile, ishift, jshift
integer(4), intent(out) :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
integer(4), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
integer(4) :: clocal ((domain%x(1)%compute%size+ishift) *(domain%y(1)%compute%size+jshift) *size(local,3))
integer(4) :: cremote((domain%x(1)%compute%max_size+ishift)*(domain%y(1)%compute%max_size+jshift)*size(local,3))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
nword_me = (iec-isc+1)*(jec-jsc+1)*ke
! make contiguous array from compute domain
m = 0
if(global_on_this_pe) then
!z1l: initialize global = 0 to support mask domain
if(PRESENT(default_data)) then
global = default_data
else
# 104
global = 0
endif
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = local(i+ioff,j+joff,k)
global(i+ipos,j+jpos,k) = clocal(m) !always fill local domain directly
end do
end do
end do
else
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = local(i+ioff,j+joff,k)
end do
end do
end do
endif
! if there is more than one tile on this pe, then no decomposition for all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
!fill off-domains (note loops begin at an offset of 1)
if( xonly )then
nd = size(domain%x(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%x(1)%pos+nd-n,nd)
rpos = mod(domain%x(1)%pos +n,nd)
from_pe = domain%x(1)%list(rpos)%pe
rpos = from_pe - root_pe ! for concurrent run, root_pe may not be 0.
if (from_pe == NULL_PE) then
nwords = 0
else
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
endif
! Force use of scalar, integer ptr interface
call mpp_transmit( put_data=clocal(1), plen=nword_me, to_pe=domain%x(1)%list(lpos)%pe, &
get_data=cremote(1), glen=nwords, from_pe=from_pe )
m = 0
if (from_pe /= NULL_PE) then
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
do k = 1, ke
do j = jsc, jec
do i = is, ie
m = m + 1
global(i,j+jpos,k) = cremote(m)
end do
end do
end do
endif
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else if( yonly )then
nd = size(domain%y(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%y(1)%pos+nd-n,nd)
rpos = mod(domain%y(1)%pos +n,nd)
from_pe = domain%y(1)%list(rpos)%pe
rpos = from_pe - root_pe
if (from_pe == NULL_PE) then
nwords = 0
else
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
endif
! Force use of scalar, integer pointer interface
call mpp_transmit( put_data=clocal(1), plen=nword_me, to_pe=domain%y(1)%list(lpos)%pe, &
get_data=cremote(1), glen=nwords, from_pe=from_pe )
m = 0
if (from_pe /= NULL_PE) then
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = isc, iec
m = m + 1
global(i+ipos,j,k) = cremote(m)
end do
end do
end do
endif
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else
tile_id = domain%tile_id(1)
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
call mpp_recv(cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
global(i,j,k) = cremote(m)
end do
end do
end do
end do
endif
else
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id(1).NE. tile_id ) cycle ! global field only within tile
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle ! global field only within tile
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
call mpp_recv( cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
global(i,j,k) = cremote(m)
end do
end do
end do
end do
endif
end if
call mpp_sync_self()
return
end subroutine mpp_do_global_field2D_i4_3d
subroutine mpp_do_global_field2D_a2a_i4_3d( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
integer, intent(in) :: tile, ishift, jshift
integer(4), intent(in), contiguous, target :: local(:,:,:)
integer(4), intent(out), contiguous, target :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
integer(4), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
! Alltoallw vectors
integer(4), dimension(:), pointer :: plocal, pglobal
integer, dimension(:), allocatable :: sendcounts(:), recvcounts(:)
integer, dimension(:), allocatable :: sdispls(:), rdispls(:)
type(mpp_type), allocatable :: sendtypes(:), recvtypes(:)
integer, dimension(3) :: array_of_subsizes, array_of_starts
integer :: n_sends, n_ax, pe
integer :: isg, jsg
integer, allocatable :: pelist(:), axis_pelist(:), pelist_idx(:)
if (.NOT.module_is_initialized) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
! Validate flag consistency and configure the function
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
! Calculate offset for truncated global fields
! NOTE: We do not check contiguity of global subarrays, and assume that
! they have been copied to a contigous array.
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
! NOTE: Since local is assumed to contiguously match the data domain, this
! is not a useful check. But maybe someday we can support compute
! domains.
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin
joff = -domain%y(tile)%compute%begin
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin
joff = -domain%y(tile)%data%begin
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
isg = domain%x(1)%global%begin; jsg = domain%y(1)%global%begin
if(global_on_this_pe) then
!z1l: initialize global = 0 to support mask domain
if(PRESENT(default_data)) then
global = default_data
else
# 360
global = 0
endif
endif
! if there is more than one tile on this pe, then no decomposition for
! all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
! Generate the pelist
! TODO: Add these to the domain API
if (xonly) then
n_ax = size(domain%x(1)%list(:))
allocate(axis_pelist(n_ax))
axis_pelist = [ (domain%x(1)%list(i)%pe, i = 0, n_ax-1) ]
nd = count(axis_pelist >= 0)
allocate(pelist(nd), pelist_idx(0:nd-1))
pelist = pack(axis_pelist, mask=(axis_pelist >= 0))
pelist_idx = pack([(i, i=0, n_ax-1)], mask=(axis_pelist >= 0))
deallocate(axis_pelist)
else if (yonly) then
n_ax = size(domain%y(1)%list(:))
allocate(axis_pelist(n_ax))
axis_pelist = [ (domain%y(1)%list(i)%pe, i = 0, n_ax-1) ]
nd = count(axis_pelist >= 0)
allocate(pelist(nd), pelist_idx(0:nd-1))
pelist = pack(axis_pelist, mask=(axis_pelist >= 0))
pelist_idx = pack([(i, i=0, n_ax-1)], mask=(axis_pelist >= 0))
deallocate(axis_pelist)
else
nd = size(domain%list(:))
allocate(pelist(nd), pelist_idx(0:nd-1))
call mpp_get_pelist(domain, pelist)
pelist_idx = [ (i, i=0, nd-1) ]
end if
! Allocate message data buffers
allocate(sendcounts(0:nd-1))
allocate(sdispls(0:nd-1))
allocate(sendtypes(0:nd-1))
sendcounts(:) = 0
sdispls(:) = 0
sendtypes(:) = mpp_byte
allocate(recvcounts(0:nd-1))
allocate(rdispls(0:nd-1))
allocate(recvtypes(0:nd-1))
recvcounts(:) = 0
rdispls(:) = 0
recvtypes(:) = mpp_byte
array_of_subsizes = [iec - isc + 1, jec - jsc + 1, size(local, 3)]
array_of_starts = [isc + ioff, jsc + joff, 0]
n_sends = merge(1, nd, root_only) ! 1 if root_only else nd
do n = 0, n_sends - 1
sendcounts(n) = 1
call mpp_type_create( &
local, &
array_of_subsizes, &
array_of_starts, &
sendtypes(n) &
)
end do
! Receive configuration
if (global_on_this_pe) then
do n = 0, nd - 1
recvcounts(n) = 1
pe = pelist_idx(n)
if (xonly) then
is = domain%x(1)%list(pe)%compute%begin
ie = domain%x(1)%list(pe)%compute%end + ishift
js = jsc; je = jec
else if (yonly) then
is = isc; ie = iec
js = domain%y(1)%list(pe)%compute%begin
je = domain%y(1)%list(pe)%compute%end + jshift
else
is = domain%list(pe)%x(1)%compute%begin
ie = domain%list(pe)%x(1)%compute%end + ishift
js = domain%list(pe)%y(1)%compute%begin
je = domain%list(pe)%y(1)%compute%end + jshift
end if
array_of_subsizes = [ie - is + 1, je - js + 1, ke]
array_of_starts = [is - isg + ipos, js - jsg + jpos, 0]
call mpp_type_create( &
global, &
array_of_subsizes, &
array_of_starts, &
recvtypes(n) &
)
end do
end if
plocal(1:size(local)) => local
pglobal(1:size(global)) => global
call mpp_alltoall(plocal, sendcounts, sdispls, sendtypes, &
pglobal, recvcounts, rdispls, recvtypes, &
pelist)
plocal => null()
pglobal => null()
! Cleanup
deallocate(pelist)
deallocate(sendcounts, sdispls, sendtypes)
deallocate(recvcounts, rdispls, recvtypes)
call mpp_sync_self()
end subroutine mpp_do_global_field2D_a2a_i4_3d
# 1013 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_do_global_field.h" 1
subroutine mpp_do_global_field2D_l4_3d( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
logical(4), intent(in) :: local(:,:,:)
integer, intent(in) :: tile, ishift, jshift
logical(4), intent(out) :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
logical(4), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
logical(4) :: clocal ((domain%x(1)%compute%size+ishift) *(domain%y(1)%compute%size+jshift) *size(local,3))
logical(4) :: cremote((domain%x(1)%compute%max_size+ishift)*(domain%y(1)%compute%max_size+jshift)*size(local,3))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
nword_me = (iec-isc+1)*(jec-jsc+1)*ke
! make contiguous array from compute domain
m = 0
if(global_on_this_pe) then
!z1l: initialize global = 0 to support mask domain
if(PRESENT(default_data)) then
global = default_data
else
global = .false.
# 106
endif
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = local(i+ioff,j+joff,k)
global(i+ipos,j+jpos,k) = clocal(m) !always fill local domain directly
end do
end do
end do
else
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = local(i+ioff,j+joff,k)
end do
end do
end do
endif
! if there is more than one tile on this pe, then no decomposition for all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
!fill off-domains (note loops begin at an offset of 1)
if( xonly )then
nd = size(domain%x(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%x(1)%pos+nd-n,nd)
rpos = mod(domain%x(1)%pos +n,nd)
from_pe = domain%x(1)%list(rpos)%pe
rpos = from_pe - root_pe ! for concurrent run, root_pe may not be 0.
if (from_pe == NULL_PE) then
nwords = 0
else
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
endif
! Force use of scalar, integer ptr interface
call mpp_transmit( put_data=clocal(1), plen=nword_me, to_pe=domain%x(1)%list(lpos)%pe, &
get_data=cremote(1), glen=nwords, from_pe=from_pe )
m = 0
if (from_pe /= NULL_PE) then
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
do k = 1, ke
do j = jsc, jec
do i = is, ie
m = m + 1
global(i,j+jpos,k) = cremote(m)
end do
end do
end do
endif
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else if( yonly )then
nd = size(domain%y(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%y(1)%pos+nd-n,nd)
rpos = mod(domain%y(1)%pos +n,nd)
from_pe = domain%y(1)%list(rpos)%pe
rpos = from_pe - root_pe
if (from_pe == NULL_PE) then
nwords = 0
else
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
endif
! Force use of scalar, integer pointer interface
call mpp_transmit( put_data=clocal(1), plen=nword_me, to_pe=domain%y(1)%list(lpos)%pe, &
get_data=cremote(1), glen=nwords, from_pe=from_pe )
m = 0
if (from_pe /= NULL_PE) then
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = isc, iec
m = m + 1
global(i+ipos,j,k) = cremote(m)
end do
end do
end do
endif
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else
tile_id = domain%tile_id(1)
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
call mpp_recv(cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
global(i,j,k) = cremote(m)
end do
end do
end do
end do
endif
else
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id(1).NE. tile_id ) cycle ! global field only within tile
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle ! global field only within tile
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
call mpp_recv( cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
global(i,j,k) = cremote(m)
end do
end do
end do
end do
endif
end if
call mpp_sync_self()
return
end subroutine mpp_do_global_field2D_l4_3d
subroutine mpp_do_global_field2D_a2a_l4_3d( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
integer, intent(in) :: tile, ishift, jshift
logical(4), intent(in), contiguous, target :: local(:,:,:)
logical(4), intent(out), contiguous, target :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
logical(4), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
! Alltoallw vectors
logical(4), dimension(:), pointer :: plocal, pglobal
integer, dimension(:), allocatable :: sendcounts(:), recvcounts(:)
integer, dimension(:), allocatable :: sdispls(:), rdispls(:)
type(mpp_type), allocatable :: sendtypes(:), recvtypes(:)
integer, dimension(3) :: array_of_subsizes, array_of_starts
integer :: n_sends, n_ax, pe
integer :: isg, jsg
integer, allocatable :: pelist(:), axis_pelist(:), pelist_idx(:)
if (.NOT.module_is_initialized) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
! Validate flag consistency and configure the function
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
! Calculate offset for truncated global fields
! NOTE: We do not check contiguity of global subarrays, and assume that
! they have been copied to a contigous array.
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
! NOTE: Since local is assumed to contiguously match the data domain, this
! is not a useful check. But maybe someday we can support compute
! domains.
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin
joff = -domain%y(tile)%compute%begin
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin
joff = -domain%y(tile)%data%begin
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
isg = domain%x(1)%global%begin; jsg = domain%y(1)%global%begin
if(global_on_this_pe) then
!z1l: initialize global = 0 to support mask domain
if(PRESENT(default_data)) then
global = default_data
else
global = .false.
# 362
endif
endif
! if there is more than one tile on this pe, then no decomposition for
! all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
! Generate the pelist
! TODO: Add these to the domain API
if (xonly) then
n_ax = size(domain%x(1)%list(:))
allocate(axis_pelist(n_ax))
axis_pelist = [ (domain%x(1)%list(i)%pe, i = 0, n_ax-1) ]
nd = count(axis_pelist >= 0)
allocate(pelist(nd), pelist_idx(0:nd-1))
pelist = pack(axis_pelist, mask=(axis_pelist >= 0))
pelist_idx = pack([(i, i=0, n_ax-1)], mask=(axis_pelist >= 0))
deallocate(axis_pelist)
else if (yonly) then
n_ax = size(domain%y(1)%list(:))
allocate(axis_pelist(n_ax))
axis_pelist = [ (domain%y(1)%list(i)%pe, i = 0, n_ax-1) ]
nd = count(axis_pelist >= 0)
allocate(pelist(nd), pelist_idx(0:nd-1))
pelist = pack(axis_pelist, mask=(axis_pelist >= 0))
pelist_idx = pack([(i, i=0, n_ax-1)], mask=(axis_pelist >= 0))
deallocate(axis_pelist)
else
nd = size(domain%list(:))
allocate(pelist(nd), pelist_idx(0:nd-1))
call mpp_get_pelist(domain, pelist)
pelist_idx = [ (i, i=0, nd-1) ]
end if
! Allocate message data buffers
allocate(sendcounts(0:nd-1))
allocate(sdispls(0:nd-1))
allocate(sendtypes(0:nd-1))
sendcounts(:) = 0
sdispls(:) = 0
sendtypes(:) = mpp_byte
allocate(recvcounts(0:nd-1))
allocate(rdispls(0:nd-1))
allocate(recvtypes(0:nd-1))
recvcounts(:) = 0
rdispls(:) = 0
recvtypes(:) = mpp_byte
array_of_subsizes = [iec - isc + 1, jec - jsc + 1, size(local, 3)]
array_of_starts = [isc + ioff, jsc + joff, 0]
n_sends = merge(1, nd, root_only) ! 1 if root_only else nd
do n = 0, n_sends - 1
sendcounts(n) = 1
call mpp_type_create( &
local, &
array_of_subsizes, &
array_of_starts, &
sendtypes(n) &
)
end do
! Receive configuration
if (global_on_this_pe) then
do n = 0, nd - 1
recvcounts(n) = 1
pe = pelist_idx(n)
if (xonly) then
is = domain%x(1)%list(pe)%compute%begin
ie = domain%x(1)%list(pe)%compute%end + ishift
js = jsc; je = jec
else if (yonly) then
is = isc; ie = iec
js = domain%y(1)%list(pe)%compute%begin
je = domain%y(1)%list(pe)%compute%end + jshift
else
is = domain%list(pe)%x(1)%compute%begin
ie = domain%list(pe)%x(1)%compute%end + ishift
js = domain%list(pe)%y(1)%compute%begin
je = domain%list(pe)%y(1)%compute%end + jshift
end if
array_of_subsizes = [ie - is + 1, je - js + 1, ke]
array_of_starts = [is - isg + ipos, js - jsg + jpos, 0]
call mpp_type_create( &
global, &
array_of_subsizes, &
array_of_starts, &
recvtypes(n) &
)
end do
end if
plocal(1:size(local)) => local
pglobal(1:size(global)) => global
call mpp_alltoall(plocal, sendcounts, sdispls, sendtypes, &
pglobal, recvcounts, rdispls, recvtypes, &
pelist)
plocal => null()
pglobal => null()
! Cleanup
deallocate(pelist)
deallocate(sendcounts, sdispls, sendtypes)
deallocate(recvcounts, rdispls, recvtypes)
call mpp_sync_self()
end subroutine mpp_do_global_field2D_a2a_l4_3d
# 1022 "../mpp/include/mpp_domains_reduce.inc" 2
!****************************************************
# 1 "../mpp/include/mpp_do_global_field_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_global_field2D_r8_3d_ad( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
real(8), intent(inout) :: local(:,:,:)
integer, intent(in) :: tile, ishift, jshift
real(8), intent(in) :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
real(8), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
real(8) :: clocal ((domain%x(1)%compute%size+ishift) *(domain%y(1)%compute%size+jshift) *size(local,3))
real(8) :: cremote((domain%x(1)%compute%max_size+ishift)*(domain%y(1)%compute%max_size+jshift)*size(local,3))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
nword_me = (iec-isc+1)*(jec-jsc+1)*ke
! make contiguous array from compute domain
m = 0
! if there is more than one tile on this pe, then no decomposition for all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
!fill off-domains (note loops begin at an offset of 1)
if( xonly )then
nd = size(domain%x(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%x(1)%pos+nd-n,nd)
rpos = mod(domain%x(1)%pos +n,nd)
from_pe = domain%x(1)%list(rpos)%pe
rpos = from_pe - root_pe ! for concurrent run, root_pe may not be 0.
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
! Force use of scalar, integer ptr interface
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
do k = 1, ke
do j = jsc, jec
do i = is, ie
m = m + 1
cremote(m) = global(i,j+jpos,k)
end do
end do
end do
call mpp_transmit( put_data=cremote(1), plen=nwords, to_pe=from_pe, &
get_data=clocal(1), glen=nword_me, from_pe=domain%x(1)%list(lpos)%pe )
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else if( yonly )then
nd = size(domain%y(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%y(1)%pos+nd-n,nd)
rpos = mod(domain%y(1)%pos +n,nd)
from_pe = domain%y(1)%list(rpos)%pe
rpos = from_pe - root_pe
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
! Force use of scalar, integer pointer interface
m = 0
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = isc, iec
m = m + 1
cremote(m) = global(i+ipos,j,k)
end do
end do
end do
call mpp_transmit( put_data=cremote(1), plen=nwords, to_pe=from_pe, &
get_data=clocal(1), glen=nword_me, from_pe=domain%y(1)%list(lpos)%pe )
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else
tile_id = domain%tile_id(1)
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_recv( clocal(1), glen=nwords, from_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
cremote(m) = global(i,j,k)
end do
end do
end do
call mpp_send(cremote(1), plen=nword_me, to_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
end do
endif
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle ! global field only within tile
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
cremote(m) = global(i,j,k)
end do
end do
end do
call mpp_send( cremote(1), plen=nwords, to_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id(1).NE. tile_id ) cycle ! global field only within tile
call mpp_recv( clocal(1), glen=nword_me, from_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
endif
end if
call mpp_sync_self()
! make contiguous array from compute domain
m = 0
# 246
local = 0
if(global_on_this_pe) then
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = global(i+ipos,j+jpos,k) !always fill local domain directly
local(i+ioff,j+joff,k) = clocal(m)
end do
end do
end do
else
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
local(i+ioff,j+joff,k) = clocal(m)
end do
end do
end do
endif
return
end subroutine mpp_do_global_field2D_r8_3d_ad
# 1029 "../mpp/include/mpp_domains_reduce.inc" 2
# 1036
# 1 "../mpp/include/mpp_do_global_field_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_global_field2D_i8_3d_ad( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
integer(8), intent(inout) :: local(:,:,:)
integer, intent(in) :: tile, ishift, jshift
integer(8), intent(in) :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
integer(8), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
integer(8) :: clocal ((domain%x(1)%compute%size+ishift) *(domain%y(1)%compute%size+jshift) *size(local,3))
integer(8) :: cremote((domain%x(1)%compute%max_size+ishift)*(domain%y(1)%compute%max_size+jshift)*size(local,3))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
nword_me = (iec-isc+1)*(jec-jsc+1)*ke
! make contiguous array from compute domain
m = 0
! if there is more than one tile on this pe, then no decomposition for all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
!fill off-domains (note loops begin at an offset of 1)
if( xonly )then
nd = size(domain%x(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%x(1)%pos+nd-n,nd)
rpos = mod(domain%x(1)%pos +n,nd)
from_pe = domain%x(1)%list(rpos)%pe
rpos = from_pe - root_pe ! for concurrent run, root_pe may not be 0.
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
! Force use of scalar, integer ptr interface
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
do k = 1, ke
do j = jsc, jec
do i = is, ie
m = m + 1
cremote(m) = global(i,j+jpos,k)
end do
end do
end do
call mpp_transmit( put_data=cremote(1), plen=nwords, to_pe=from_pe, &
get_data=clocal(1), glen=nword_me, from_pe=domain%x(1)%list(lpos)%pe )
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else if( yonly )then
nd = size(domain%y(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%y(1)%pos+nd-n,nd)
rpos = mod(domain%y(1)%pos +n,nd)
from_pe = domain%y(1)%list(rpos)%pe
rpos = from_pe - root_pe
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
! Force use of scalar, integer pointer interface
m = 0
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = isc, iec
m = m + 1
cremote(m) = global(i+ipos,j,k)
end do
end do
end do
call mpp_transmit( put_data=cremote(1), plen=nwords, to_pe=from_pe, &
get_data=clocal(1), glen=nword_me, from_pe=domain%y(1)%list(lpos)%pe )
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else
tile_id = domain%tile_id(1)
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_recv( clocal(1), glen=nwords, from_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
cremote(m) = global(i,j,k)
end do
end do
end do
call mpp_send(cremote(1), plen=nword_me, to_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
end do
endif
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle ! global field only within tile
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
cremote(m) = global(i,j,k)
end do
end do
end do
call mpp_send( cremote(1), plen=nwords, to_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id(1).NE. tile_id ) cycle ! global field only within tile
call mpp_recv( clocal(1), glen=nword_me, from_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
endif
end if
call mpp_sync_self()
! make contiguous array from compute domain
m = 0
# 246
local = 0
if(global_on_this_pe) then
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = global(i+ipos,j+jpos,k) !always fill local domain directly
local(i+ioff,j+joff,k) = clocal(m)
end do
end do
end do
else
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
local(i+ioff,j+joff,k) = clocal(m)
end do
end do
end do
endif
return
end subroutine mpp_do_global_field2D_i8_3d_ad
# 1044 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_do_global_field_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_global_field2D_l8_3d_ad( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
logical(8), intent(inout) :: local(:,:,:)
integer, intent(in) :: tile, ishift, jshift
logical(8), intent(in) :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
logical(8), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
logical(8) :: clocal ((domain%x(1)%compute%size+ishift) *(domain%y(1)%compute%size+jshift) *size(local,3))
logical(8) :: cremote((domain%x(1)%compute%max_size+ishift)*(domain%y(1)%compute%max_size+jshift)*size(local,3))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
nword_me = (iec-isc+1)*(jec-jsc+1)*ke
! make contiguous array from compute domain
m = 0
! if there is more than one tile on this pe, then no decomposition for all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
!fill off-domains (note loops begin at an offset of 1)
if( xonly )then
nd = size(domain%x(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%x(1)%pos+nd-n,nd)
rpos = mod(domain%x(1)%pos +n,nd)
from_pe = domain%x(1)%list(rpos)%pe
rpos = from_pe - root_pe ! for concurrent run, root_pe may not be 0.
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
! Force use of scalar, integer ptr interface
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
do k = 1, ke
do j = jsc, jec
do i = is, ie
m = m + 1
cremote(m) = global(i,j+jpos,k)
end do
end do
end do
call mpp_transmit( put_data=cremote(1), plen=nwords, to_pe=from_pe, &
get_data=clocal(1), glen=nword_me, from_pe=domain%x(1)%list(lpos)%pe )
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else if( yonly )then
nd = size(domain%y(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%y(1)%pos+nd-n,nd)
rpos = mod(domain%y(1)%pos +n,nd)
from_pe = domain%y(1)%list(rpos)%pe
rpos = from_pe - root_pe
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
! Force use of scalar, integer pointer interface
m = 0
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = isc, iec
m = m + 1
cremote(m) = global(i+ipos,j,k)
end do
end do
end do
call mpp_transmit( put_data=cremote(1), plen=nwords, to_pe=from_pe, &
get_data=clocal(1), glen=nword_me, from_pe=domain%y(1)%list(lpos)%pe )
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else
tile_id = domain%tile_id(1)
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_recv( clocal(1), glen=nwords, from_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
cremote(m) = global(i,j,k)
end do
end do
end do
call mpp_send(cremote(1), plen=nword_me, to_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
end do
endif
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle ! global field only within tile
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
cremote(m) = global(i,j,k)
end do
end do
end do
call mpp_send( cremote(1), plen=nwords, to_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id(1).NE. tile_id ) cycle ! global field only within tile
call mpp_recv( clocal(1), glen=nword_me, from_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
endif
end if
call mpp_sync_self()
! make contiguous array from compute domain
m = 0
local = .false.
# 248
if(global_on_this_pe) then
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = global(i+ipos,j+jpos,k) !always fill local domain directly
local(i+ioff,j+joff,k) = clocal(m)
end do
end do
end do
else
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
local(i+ioff,j+joff,k) = clocal(m)
end do
end do
end do
endif
return
end subroutine mpp_do_global_field2D_l8_3d_ad
# 1051 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_do_global_field_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_global_field2D_r4_3d_ad( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
real(4), intent(inout) :: local(:,:,:)
integer, intent(in) :: tile, ishift, jshift
real(4), intent(in) :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
real(4), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
real(4) :: clocal ((domain%x(1)%compute%size+ishift) *(domain%y(1)%compute%size+jshift) *size(local,3))
real(4) :: cremote((domain%x(1)%compute%max_size+ishift)*(domain%y(1)%compute%max_size+jshift)*size(local,3))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
nword_me = (iec-isc+1)*(jec-jsc+1)*ke
! make contiguous array from compute domain
m = 0
! if there is more than one tile on this pe, then no decomposition for all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
!fill off-domains (note loops begin at an offset of 1)
if( xonly )then
nd = size(domain%x(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%x(1)%pos+nd-n,nd)
rpos = mod(domain%x(1)%pos +n,nd)
from_pe = domain%x(1)%list(rpos)%pe
rpos = from_pe - root_pe ! for concurrent run, root_pe may not be 0.
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
! Force use of scalar, integer ptr interface
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
do k = 1, ke
do j = jsc, jec
do i = is, ie
m = m + 1
cremote(m) = global(i,j+jpos,k)
end do
end do
end do
call mpp_transmit( put_data=cremote(1), plen=nwords, to_pe=from_pe, &
get_data=clocal(1), glen=nword_me, from_pe=domain%x(1)%list(lpos)%pe )
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else if( yonly )then
nd = size(domain%y(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%y(1)%pos+nd-n,nd)
rpos = mod(domain%y(1)%pos +n,nd)
from_pe = domain%y(1)%list(rpos)%pe
rpos = from_pe - root_pe
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
! Force use of scalar, integer pointer interface
m = 0
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = isc, iec
m = m + 1
cremote(m) = global(i+ipos,j,k)
end do
end do
end do
call mpp_transmit( put_data=cremote(1), plen=nwords, to_pe=from_pe, &
get_data=clocal(1), glen=nword_me, from_pe=domain%y(1)%list(lpos)%pe )
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else
tile_id = domain%tile_id(1)
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_recv( clocal(1), glen=nwords, from_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
cremote(m) = global(i,j,k)
end do
end do
end do
call mpp_send(cremote(1), plen=nword_me, to_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
end do
endif
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle ! global field only within tile
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
cremote(m) = global(i,j,k)
end do
end do
end do
call mpp_send( cremote(1), plen=nwords, to_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id(1).NE. tile_id ) cycle ! global field only within tile
call mpp_recv( clocal(1), glen=nword_me, from_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
endif
end if
call mpp_sync_self()
! make contiguous array from compute domain
m = 0
# 246
local = 0
if(global_on_this_pe) then
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = global(i+ipos,j+jpos,k) !always fill local domain directly
local(i+ioff,j+joff,k) = clocal(m)
end do
end do
end do
else
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
local(i+ioff,j+joff,k) = clocal(m)
end do
end do
end do
endif
return
end subroutine mpp_do_global_field2D_r4_3d_ad
# 1060 "../mpp/include/mpp_domains_reduce.inc" 2
# 1068
# 1 "../mpp/include/mpp_do_global_field_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_global_field2D_i4_3d_ad( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
integer(4), intent(inout) :: local(:,:,:)
integer, intent(in) :: tile, ishift, jshift
integer(4), intent(in) :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
integer(4), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
integer(4) :: clocal ((domain%x(1)%compute%size+ishift) *(domain%y(1)%compute%size+jshift) *size(local,3))
integer(4) :: cremote((domain%x(1)%compute%max_size+ishift)*(domain%y(1)%compute%max_size+jshift)*size(local,3))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
nword_me = (iec-isc+1)*(jec-jsc+1)*ke
! make contiguous array from compute domain
m = 0
! if there is more than one tile on this pe, then no decomposition for all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
!fill off-domains (note loops begin at an offset of 1)
if( xonly )then
nd = size(domain%x(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%x(1)%pos+nd-n,nd)
rpos = mod(domain%x(1)%pos +n,nd)
from_pe = domain%x(1)%list(rpos)%pe
rpos = from_pe - root_pe ! for concurrent run, root_pe may not be 0.
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
! Force use of scalar, integer ptr interface
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
do k = 1, ke
do j = jsc, jec
do i = is, ie
m = m + 1
cremote(m) = global(i,j+jpos,k)
end do
end do
end do
call mpp_transmit( put_data=cremote(1), plen=nwords, to_pe=from_pe, &
get_data=clocal(1), glen=nword_me, from_pe=domain%x(1)%list(lpos)%pe )
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else if( yonly )then
nd = size(domain%y(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%y(1)%pos+nd-n,nd)
rpos = mod(domain%y(1)%pos +n,nd)
from_pe = domain%y(1)%list(rpos)%pe
rpos = from_pe - root_pe
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
! Force use of scalar, integer pointer interface
m = 0
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = isc, iec
m = m + 1
cremote(m) = global(i+ipos,j,k)
end do
end do
end do
call mpp_transmit( put_data=cremote(1), plen=nwords, to_pe=from_pe, &
get_data=clocal(1), glen=nword_me, from_pe=domain%y(1)%list(lpos)%pe )
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else
tile_id = domain%tile_id(1)
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_recv( clocal(1), glen=nwords, from_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
cremote(m) = global(i,j,k)
end do
end do
end do
call mpp_send(cremote(1), plen=nword_me, to_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
end do
endif
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle ! global field only within tile
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
cremote(m) = global(i,j,k)
end do
end do
end do
call mpp_send( cremote(1), plen=nwords, to_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id(1).NE. tile_id ) cycle ! global field only within tile
call mpp_recv( clocal(1), glen=nword_me, from_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
endif
end if
call mpp_sync_self()
! make contiguous array from compute domain
m = 0
# 246
local = 0
if(global_on_this_pe) then
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = global(i+ipos,j+jpos,k) !always fill local domain directly
local(i+ioff,j+joff,k) = clocal(m)
end do
end do
end do
else
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
local(i+ioff,j+joff,k) = clocal(m)
end do
end do
end do
endif
return
end subroutine mpp_do_global_field2D_i4_3d_ad
# 1075 "../mpp/include/mpp_domains_reduce.inc" 2
# 1 "../mpp/include/mpp_do_global_field_ad.h" 1
! -*-f90-*-
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_do_global_field2D_l4_3d_ad( domain, local, global, tile, ishift, jshift, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domain2D), intent(in) :: domain
logical(4), intent(inout) :: local(:,:,:)
integer, intent(in) :: tile, ishift, jshift
logical(4), intent(in) :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
integer, intent(in), optional :: flags
logical(4), intent(in), optional :: default_data
integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
integer :: ke, isc, iec, jsc, jec, is, ie, js, je, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
logical(4) :: clocal ((domain%x(1)%compute%size+ishift) *(domain%y(1)%compute%size+jshift) *size(local,3))
logical(4) :: cremote((domain%x(1)%compute%max_size+ishift)*(domain%y(1)%compute%max_size+jshift)*size(local,3))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
xonly = .FALSE.
yonly = .FALSE.
root_only = .FALSE.
if( PRESENT(flags) ) then
xonly = BTEST(flags,EAST)
yonly = BTEST(flags,SOUTH)
if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
if(xonly .AND. yonly) then
xonly = .false.; yonly = .false.
endif
root_only = BTEST(flags, ROOT_GLOBAL)
if( (xonly .or. yonly) .AND. root_only ) then
call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
root_only = .FALSE.
endif
endif
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
if(xonly) then
if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
jpos = -domain%y(tile)%compute%begin + 1
else if(yonly) then
if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
ipos = -domain%x(tile)%compute%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
endif
endif
endif
if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
!local is on compute domain
ioff = -domain%x(tile)%compute%begin + 1
joff = -domain%y(tile)%compute%begin + 1
else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
!local is on data domain
ioff = -domain%x(tile)%data%begin + 1
joff = -domain%y(tile)%data%begin + 1
else
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
end if
ke = size(local,3)
isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
nword_me = (iec-isc+1)*(jec-jsc+1)*ke
! make contiguous array from compute domain
m = 0
! if there is more than one tile on this pe, then no decomposition for all tiles on this pe, so we can just return
if(size(domain%x(:))>1) then
!--- the following is needed to avoid deadlock.
if( tile == size(domain%x(:)) ) call mpp_sync_self( )
return
end if
root_pe = mpp_root_pe()
!fill off-domains (note loops begin at an offset of 1)
if( xonly )then
nd = size(domain%x(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%x(1)%pos+nd-n,nd)
rpos = mod(domain%x(1)%pos +n,nd)
from_pe = domain%x(1)%list(rpos)%pe
rpos = from_pe - root_pe ! for concurrent run, root_pe may not be 0.
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
! Force use of scalar, integer ptr interface
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
do k = 1, ke
do j = jsc, jec
do i = is, ie
m = m + 1
cremote(m) = global(i,j+jpos,k)
end do
end do
end do
call mpp_transmit( put_data=cremote(1), plen=nwords, to_pe=from_pe, &
get_data=clocal(1), glen=nword_me, from_pe=domain%x(1)%list(lpos)%pe )
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else if( yonly )then
nd = size(domain%y(1)%list(:))
do n = 1,nd-1
lpos = mod(domain%y(1)%pos+nd-n,nd)
rpos = mod(domain%y(1)%pos +n,nd)
from_pe = domain%y(1)%list(rpos)%pe
rpos = from_pe - root_pe
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
* (domain%list(rpos)%y(1)%compute%size+jshift) * ke
! Force use of scalar, integer pointer interface
m = 0
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = isc, iec
m = m + 1
cremote(m) = global(i+ipos,j,k)
end do
end do
end do
call mpp_transmit( put_data=cremote(1), plen=nwords, to_pe=from_pe, &
get_data=clocal(1), glen=nword_me, from_pe=domain%y(1)%list(lpos)%pe )
call mpp_sync_self() !-ensure MPI_ISEND is done.
end do
else
tile_id = domain%tile_id(1)
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_recv( clocal(1), glen=nwords, from_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
cremote(m) = global(i,j,k)
end do
end do
end do
call mpp_send(cremote(1), plen=nword_me, to_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
end do
endif
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle ! global field only within tile
nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
m = 0
is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
do k = 1,ke
do j = js, je
do i = is, ie
m = m + 1
cremote(m) = global(i,j,k)
end do
end do
end do
call mpp_send( cremote(1), plen=nwords, to_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id(1).NE. tile_id ) cycle ! global field only within tile
call mpp_recv( clocal(1), glen=nword_me, from_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
endif
end if
call mpp_sync_self()
! make contiguous array from compute domain
m = 0
local = .false.
# 248
if(global_on_this_pe) then
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
clocal(m) = global(i+ipos,j+jpos,k) !always fill local domain directly
local(i+ioff,j+joff,k) = clocal(m)
end do
end do
end do
else
do k = 1, ke
do j = jsc, jec
do i = isc, iec
m = m + 1
local(i+ioff,j+joff,k) = clocal(m)
end do
end do
end do
endif
return
end subroutine mpp_do_global_field2D_l4_3d_ad
# 1082 "../mpp/include/mpp_domains_reduce.inc" 2
# 2789 "../mpp/mpp_domains.F90" 2
# 1 "../mpp/include/mpp_unstruct_domain.inc" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
!#####################################################################
subroutine mpp_define_unstruct_domain(UG_domain, SG_domain, npts_tile, grid_nlev, ndivs, npes_io_group, grid_index, name)
type(domainUG), intent(inout) :: UG_domain
type(domain2d), target, intent(in) :: SG_domain
integer, intent(in) :: npts_tile(:) ! number of unstructured points on each tile
integer, intent(in) :: grid_nlev(:) ! number of levels in each unstructured grid.
integer, intent(in) :: ndivs
integer, intent(in) :: npes_io_group ! number of processors in a io group. Only pe with same tile_id
! in the same group
integer, intent(in) :: grid_index(:)
character(len=*), optional, intent(in) :: name
integer, dimension(size(npts_tile(:))) :: ndivs_tile, pe_start, pe_end
integer, dimension(0:ndivs-1) :: ibegin, iend, costs_list
integer :: ntiles, ntotal_pts, ndivs_used, max_npts, cur_tile, cur_npts
integer :: n, ts, te, p, pos, tile_id, ngroup, group_id, my_pos, i
integer :: npes_in_group, is, ie, ntotal_costs, max_cost, cur_cost, costs_left
integer :: npts_left, ndiv_left, cur_pos, ndiv, prev_cost, ioff
real :: avg_cost
integer :: costs(size(npts_tile(:)))
UG_domain%SG_domain => SG_domain
ntiles = size(npts_tile(:))
UG_domain%ntiles = ntiles
!--- total number of points must be no less than ndivs
if(sum(npts_tile) ndivs)
max_cost = 0
cur_tile = 0
do n = 1, ntiles
if( ndivs_tile(n) > 1 ) then
cur_cost = CEILING(real(costs(n))/(ndivs_tile(n)-1))
if( max_cost == 0 .OR. cur_cost 0 )
UG_domain%SG2UG%nrecv = nrecv
allocate(UG_domain%SG2UG%recv(nrecv))
nrecv = 0
pos = 0
do list = 0,nlist-1
m = mod( SG_domain%pos+nlist-list, nlist )
if( recv_cnt(m) > 0 ) then
nrecv = nrecv+1
UG_domain%SG2UG%recv(nrecv)%count = recv_cnt(m)
UG_domain%SG2UG%recv(nrecv)%pe = UG_domain%list(m)%pe
allocate(UG_domain%SG2UG%recv(nrecv)%i(recv_cnt(m)))
pos = buffer_pos(m)
do l = 1, recv_cnt(m)
pos = pos + 1
UG_domain%SG2UG%recv(nrecv)%i(l) = index_list(pos)
enddo
endif
enddo
!--- figure out the send index information.
send_cnt = recv_cnt
recv_cnt = 0
call mpp_alltoall(send_cnt,1,recv_cnt,1)
!--- make sure sum(send_cnt) == UG_domain%compute%size
if( UG_domain%compute%size .NE. sum(send_cnt) ) call mpp_error(FATAL, &
"compute_overlap_SG2UG: UG_domain%compute%size .NE. sum(send_cnt)")
allocate(recv_buffer(sum(recv_cnt)))
send_buffer_pos = 0; recv_buffer_pos = 0
send_pos = 0; recv_pos = 0
do n = 0, nlist-1
if(send_cnt(n) > 0) then
send_buffer_pos(n) = send_pos
send_pos = send_pos + send_cnt(n)
endif
if(recv_cnt(n) > 0) then
recv_buffer_pos(n) = recv_pos
recv_pos = recv_pos + recv_cnt(n)
endif
enddo
call mpp_alltoall(send_buffer, send_cnt, send_buffer_pos, &
recv_buffer, recv_cnt, recv_buffer_pos)
nsend = count( recv_cnt(:) > 0 )
UG_domain%SG2UG%nsend = nsend
allocate(UG_domain%SG2UG%send(nsend))
nsend = 0
isc = SG_domain%x(1)%compute%begin
jsc = SG_domain%y(1)%compute%begin
do list = 0,nlist-1
m = mod( SG_domain%pos+list, nlist )
if( recv_cnt(m) > 0 ) then
nsend = nsend+1
UG_domain%SG2UG%send(nsend)%count = recv_cnt(m)
UG_domain%SG2UG%send(nsend)%pe = UG_domain%list(m)%pe
allocate(UG_domain%SG2UG%send(nsend)%i(recv_cnt(m)))
allocate(UG_domain%SG2UG%send(nsend)%j(recv_cnt(m)))
pos = recv_buffer_pos(m)
do l = 1, recv_cnt(m)
grid_index = recv_buffer(pos+l)
UG_domain%SG2UG%send(nsend)%i(l) = mod(grid_index-1,nxg) + 1
UG_domain%SG2UG%send(nsend)%j(l) = (grid_index-1)/nxg + 1
enddo
endif
enddo
deallocate(send_buffer, recv_buffer, index_list, buffer_pos)
return
end subroutine compute_overlap_SG2UG
!####################################################################
subroutine compute_overlap_UG2SG(UG_domain)
type(domainUG), intent(inout) :: UG_domain
!--- UG2SG is the reverse of SG2UG
UG_domain%UG2SG%nsend = UG_domain%SG2UG%nrecv
UG_domain%UG2SG%send => UG_domain%SG2UG%recv
UG_domain%UG2SG%nrecv = UG_domain%SG2UG%nsend
UG_domain%UG2SG%recv => UG_domain%SG2UG%send
return
end subroutine compute_overlap_UG2SG
!####################################################################
subroutine mpp_get_UG_SG_domain(UG_domain,SG_domain)
type(domainUG), intent(inout) :: UG_domain
type(domain2d), pointer :: SG_domain
SG_domain => UG_domain%SG_domain
return
end subroutine mpp_get_UG_SG_domain
!####################################################################
function mpp_get_UG_io_domain(domain)
type(domainUG), intent(in) :: domain
type(domainUG), pointer :: mpp_get_UG_io_domain
if(ASSOCIATED(domain%io_domain)) then
mpp_get_UG_io_domain => domain%io_domain
else
call mpp_error(FATAL, "mpp_get_UG_io_domain: io_domain is not defined, contact developer")
endif
end function mpp_get_UG_io_domain
!#####################################################################
subroutine mpp_get_UG_compute_domain( domain, begin, end, size)
type(domainUG), intent(in) :: domain
integer, intent(out), optional :: begin, end, size
if( PRESENT(begin) )begin = domain%compute%begin
if( PRESENT(end) )end = domain%compute%end
if( PRESENT(size) )size = domain%compute%size
return
end subroutine mpp_get_UG_compute_domain
!#####################################################################
subroutine mpp_get_UG_global_domain( domain, begin, end, size)
type(domainUG), intent(in) :: domain
integer, intent(out), optional :: begin, end, size
if( PRESENT(begin) )begin = domain%global%begin
if( PRESENT(end) )end = domain%global%end
if( PRESENT(size) )size = domain%global%size
return
end subroutine mpp_get_UG_global_domain
!#####################################################################
subroutine mpp_get_UG_compute_domains( domain, begin, end, size )
type(domainUG), intent(in) :: domain
integer, intent(out), optional, dimension(:) :: begin, end, size
!we use shape instead of size for error checks because size is used as an argument
if( PRESENT(begin) )then
if( any(shape(begin).NE.shape(domain%list)) ) &
call mpp_error( FATAL, 'mpp_get_UG_compute_domains: begin array size does not match domain.' )
begin(:) = domain%list(:)%compute%begin
end if
if( PRESENT(end) )then
if( any(shape(end).NE.shape(domain%list)) ) &
call mpp_error( FATAL, 'mpp_get_UG_compute_domains: end array size does not match domain.' )
end(:) = domain%list(:)%compute%end
end if
if( PRESENT(size) )then
if( any(shape(size).NE.shape(domain%list)) ) &
call mpp_error( FATAL, 'mpp_get_UG_compute_domains: size array size does not match domain.' )
size(:) = domain%list(:)%compute%size
end if
return
end subroutine mpp_get_UG_compute_domains
!#####################################################################
subroutine mpp_get_UG_domains_index( domain, begin, end)
type(domainUG), intent(in) :: domain
integer, intent(out), dimension(:) :: begin, end
!we use shape instead of size for error checks because size is used as an argument
if( any(shape(begin).NE.shape(domain%list)) ) &
call mpp_error( FATAL, 'mpp_get_UG_compute_domains: begin array size does not match domain.' )
begin(:) = domain%list(:)%compute%begin_index
if( any(shape(end).NE.shape(domain%list)) ) &
call mpp_error( FATAL, 'mpp_get_UG_compute_domains: end array size does not match domain.' )
end(:) = domain%list(:)%compute%end_index
return
end subroutine mpp_get_UG_domains_index
!#####################################################################
function mpp_get_UG_domain_ntiles(domain)
type(domainUG), intent(in) :: domain
integer :: mpp_get_UG_domain_ntiles
mpp_get_UG_domain_ntiles = domain%ntiles
return
end function mpp_get_UG_domain_ntiles
!#######################################################################
subroutine mpp_get_ug_domain_tile_list(domain, tiles)
type(domainUG), intent(in) :: domain
integer, intent(inout) :: tiles(:)
integer :: i
if( size(tiles(:)).NE.size(domain%list(:)) ) &
call mpp_error( FATAL, 'mpp_get_ug_domain_tile_list: tiles array size does not match domain.' )
do i = 1, size(tiles(:))
tiles(i) = domain%list(i-1)%tile_id
end do
end subroutine mpp_get_ug_domain_tile_list
!#####################################################################
function mpp_get_UG_domain_tile_id(domain)
type(domainUG), intent(in) :: domain
integer :: mpp_get_UG_domain_tile_id
mpp_get_UG_domain_tile_id = domain%tile_id
return
end function mpp_get_UG_domain_tile_id
!####################################################################
function mpp_get_UG_domain_npes(domain)
type(domainUG), intent(in) :: domain
integer :: mpp_get_UG_domain_npes
mpp_get_UG_domain_npes = size(domain%list(:))
return
end function mpp_get_UG_domain_npes
!####################################################################
subroutine mpp_get_UG_domain_pelist( domain, pelist)
type(domainUG), intent(in) :: domain
integer, intent(out) :: pelist(:)
if( size(pelist(:)).NE.size(domain%list(:)) ) &
call mpp_error( FATAL, 'mpp_get_UG_domain_pelist: pelist array size does not match domain.' )
pelist(:) = domain%list(:)%pe
return
end subroutine mpp_get_UG_domain_pelist
!###################################################################
subroutine mpp_get_UG_domain_tile_pe_inf( domain, root_pe, npes, pelist)
type(domainUG), intent(in) :: domain
integer, optional, intent(out) :: root_pe, npes
integer, optional, intent(out) :: pelist(:)
if(present(root_pe)) root_pe = domain%tile_root_pe
if(present(npes)) root_pe = domain%tile_npes
if(present(pelist)) then
if( size(pelist(:)).NE. domain%tile_npes ) &
call mpp_error( FATAL, 'mpp_get_UG_domain_tile_pe_inf: pelist array size does not match domain.' )
pelist(:) = domain%list(domain%pos:domain%pos+domain%tile_npes-1)%pe
endif
return
end subroutine mpp_get_UG_domain_tile_pe_inf
!####################################################################
subroutine mpp_get_UG_domain_grid_index( domain, grid_index)
type(domainUG), intent(in) :: domain
integer, intent(out) :: grid_index(:)
if( size(grid_index(:)).NE.size(domain%grid_index(:)) ) &
call mpp_error( FATAL, 'mpp_get_UG_domain_grid_index: grid_index array size does not match domain.' )
grid_index(:) = domain%grid_index(:)
return
end subroutine mpp_get_UG_domain_grid_index
!###################################################################
subroutine mpp_define_null_UG_domain(domain)
type(domainUG), intent(inout) :: domain
domain%global%begin = -1; domain%global%end = -1; domain%global%size = 0
domain%compute%begin = -1; domain%compute%end = -1; domain%compute%size = 0
domain%pe = NULL_PE
domain%ntiles = -1
domain%pos = -1
domain%tile_id = -1
domain%tile_root_pe = -1
end subroutine mpp_define_null_UG_domain
!##############################################################################
subroutine mpp_broadcast_domain_ug( domain )
!broadcast domain (useful only outside the context of its own pelist)
type(domainUG), intent(inout) :: domain
integer, allocatable :: pes(:)
logical :: native !true if I'm on the pelist of this domain
integer :: listsize, listpos
integer :: n
integer, dimension(7) :: msg, info !pe and compute domain of each item in list
integer :: errunit
errunit = stderr()
if( .NOT.module_is_initialized ) &
call mpp_error( FATAL, 'MPP_BROADCAST_DOMAIN_ug: You must first call mpp_domains_init.' )
!get the current pelist
allocate( pes(0:mpp_npes()-1) )
call mpp_get_current_pelist(pes)
!am I part of this domain?
native = ASSOCIATED(domain%list)
!set local list size
if( native )then
listsize = size(domain%list(:))
else
listsize = 0
end if
call mpp_max(listsize)
if( .NOT.native )then
!initialize domain%list and set null values in message
allocate( domain%list(0:listsize-1) )
domain%pe = NULL_PE
domain%pos = -1
domain%ntiles = -1
domain%compute%begin = 1
domain%compute%end = -1
domain%compute%begin_index = 1
domain%compute%end_index = -1
domain%global %begin = -1
domain%global %end = -1
domain%tile_id = -1
domain%tile_root_pe = -1
end if
!initialize values in info
info(1) = domain%pe
info(2) = domain%pos
info(3) = domain%tile_id
call mpp_get_UG_compute_domain( domain, info(4), info(5))
info(6) = domain%compute%begin_index
info(7) = domain%compute%end_index
!broadcast your info across current pelist and unpack if needed
listpos = 0
do n = 0,mpp_npes()-1
msg = info
if( mpp_pe().EQ.pes(n) .AND. debug )write( errunit,* )'PE ', mpp_pe(), 'broadcasting msg ', msg
call mpp_broadcast( msg, 7, pes(n) )
!no need to unpack message if native
!no need to unpack message from non-native PE
if( .NOT.native .AND. msg(1).NE.NULL_PE )then
domain%list(listpos)%pe = msg(1)
domain%list(listpos)%pos = msg(2)
domain%list(listpos)%tile_id = msg(3)
domain%list(listpos)%compute%begin = msg(4)
domain%list(listpos)%compute%end = msg(5)
domain%list(listpos)%compute%begin_index = msg(6)
domain%list(listpos)%compute%end_index = msg(7)
listpos = listpos + 1
if( debug )write( errunit,* )'PE ', mpp_pe(), 'received domain from PE ', msg(1), 'ls,le=', msg(4:5)
end if
end do
end subroutine mpp_broadcast_domain_ug
!------------------------------------------------------------------------------
function mpp_domain_UG_is_tile_root_pe(domain) result(is_root)
! null()
endif
if (associated(domain%io_domain)) then
if (associated(domain%io_domain%list)) then
deallocate(domain%io_domain%list)
domain%io_domain%list => null()
endif
deallocate(domain%io_domain)
domain%io_domain => null()
endif
call deallocate_unstruct_pass_type(domain%SG2UG)
call deallocate_unstruct_pass_type(domain%UG2SG)
if (associated(domain%grid_index)) then
deallocate(domain%grid_index)
domain%grid_index => null()
endif
if (associated(domain%SG_domain)) then
domain%SG_domain => null()
endif
return
end subroutine mpp_deallocate_domainUG
!###################################################################
!> Overload the .eq. for UG
function mpp_domainUG_eq( a, b )
logical :: mpp_domainUG_eq
type(domainUG), intent(in) :: a, b
if (associated(a%SG_domain) .and. associated(b%SG_domain)) then
if (a%SG_domain .ne. b%SG_domain) then
mpp_domainUG_eq = .false.
return
endif
elseif (associated(a%SG_domain) .and. .not. associated(b%SG_domain)) then
mpp_domainUG_eq = .false.
return
elseif (.not. associated(a%SG_domain) .and. associated(b%SG_domain)) then
mpp_domainUG_eq = .false.
return
endif
mpp_domainUG_eq = (a%npes_io_group .EQ. b%npes_io_group) .AND. &
(a%pos .EQ. b%pos) .AND. &
(a%ntiles .EQ. b%ntiles) .AND. &
(a%tile_id .EQ. b%tile_id) .AND. &
(a%tile_npes .EQ. b%tile_npes) .AND. &
(a%tile_root_pe .EQ. b%tile_root_pe)
if(.not. mpp_domainUG_eq) return
mpp_domainUG_eq = ( a%compute%begin.EQ.b%compute%begin .AND. &
a%compute%end .EQ.b%compute%end .AND. &
a%global%begin .EQ.b%global%begin .AND. &
a%global%end .EQ.b%global%end .AND. &
a%SG2UG%nsend .EQ.b%SG2UG%nsend .AND. &
a%SG2UG%nrecv .EQ.b%SG2UG%nrecv .AND. &
a%UG2SG%nsend .EQ.b%UG2SG%nsend .AND. &
a%UG2SG%nrecv .EQ.b%UG2SG%nrecv &
)
return
end function mpp_domainUG_eq
!> Overload the .ne. for UG
function mpp_domainUG_ne( a, b )
logical :: mpp_domainUG_ne
type(domainUG), intent(in) :: a, b
mpp_domainUG_ne = .NOT. ( a.EQ.b )
return
end function mpp_domainUG_ne
# 1 "../mpp/include/mpp_unstruct_pass_data.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
! This subroutine pass data from unstructured domain2d to domain.
! First only implement for data at grid cell center.
SUBROUTINE mpp_pass_SG_to_UG_r8_2d(UG_domain, field_SG, field_UG)
type(domainUG), intent(in) :: UG_domain
real(8), intent(inout) :: field_UG(:)
real(8), intent(in) :: field_SG(:,:)
real(8) :: field3D_SG(size(field_SG,1),size(field_SG,2),1)
real(8) :: field2D_UG(size(field_UG(:)), 1)
pointer(ptr_SG, field3D_SG)
pointer(ptr_UG, field2D_UG)
ptr_SG = LOC(field_SG)
ptr_UG = LOC(field_UG)
call mpp_pass_SG_to_UG(UG_domain, field3D_SG, field2D_UG)
end SUBROUTINE mpp_pass_SG_to_UG_r8_2d
SUBROUTINE mpp_pass_SG_to_UG_r8_3d(UG_domain, field_SG, field_UG)
type(domainUG), intent(in) :: UG_domain
real(8), intent(inout) :: field_UG(:,:)
real(8), intent(in) :: field_SG(:,:,:)
real(8) :: buffer(mpp_domains_stack_size)
character(len=8) :: text
integer :: i, j, k, l, m, ke, from_pe, to_pe
integer :: buffer_pos, pos, msgsize, ioff, joff
!--- check if data is on data or computing domain
if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%compute%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%compute%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%compute%begin
joff = 1 - UG_domain%SG_domain%y(1)%compute%begin
else if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%data%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%data%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%data%begin
joff = 1 - UG_domain%SG_domain%y(1)%data%begin
else
call mpp_error( FATAL, 'mpp_pass_SG_to_UG_3D_: field_SG must match either compute domain or data domain.' )
endif
ke = size(field_SG,3)
buffer_pos = 0
!---pre-post receive
do m = 1, UG_domain%SG2UG%nrecv
msgsize = UG_domain%SG2UG%recv(m)%count*ke
if( msgsize.GT.0 )then
from_pe = UG_domain%SG2UG%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_SG_to_UG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
!---pack and send data
do m = 1, UG_domain%SG2UG%nsend
pos = buffer_pos
msgsize = UG_domain%SG2UG%send(m)%count * ke
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_SG_to_UG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
do k = 1, ke
do l = 1, UG_domain%SG2UG%send(m)%count
i = UG_domain%SG2UG%send(m)%i(l)+ioff
j = UG_domain%SG2UG%send(m)%j(l)+joff
pos = pos+1
buffer(pos) = field_SG(i,j,k)
end do
end do
to_pe = UG_domain%SG2UG%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
call mpp_sync_self(check=EVENT_RECV)
!--- unpack the buffer
buffer_pos = 0
do m = 1, UG_domain%SG2UG%nrecv
pos = buffer_pos
do k = 1, ke
do l = 1, UG_domain%SG2UG%recv(m)%count
pos = pos+1
i = UG_domain%SG2UG%recv(m)%i(l)
field_UG(i,k) = buffer(pos)
enddo
enddo
buffer_pos = pos
enddo
call mpp_sync_self( )
end SUBROUTINE mpp_pass_SG_to_UG_r8_3d
! This subroutine pass data from unstructured domain2d to domain.
! First only implement for data at grid cell center.
SUBROUTINE mpp_pass_UG_to_SG_r8_2d(UG_domain, field_UG, field_SG)
type(domainUG), intent(in) :: UG_domain
real(8), intent(in) :: field_UG(:)
real(8), intent(inout) :: field_SG(:,:)
real(8) :: field3D_SG(size(field_SG,1),size(field_SG,2),1)
real(8) :: field2D_UG(size(field_UG(:)), 1)
pointer(ptr_SG, field3D_SG)
pointer(ptr_UG, field2D_UG)
ptr_SG = LOC(field_SG)
ptr_UG = LOC(field_UG)
call mpp_pass_UG_to_SG(UG_domain, field2D_UG, field3D_SG)
end SUBROUTINE mpp_pass_UG_to_SG_r8_2d
SUBROUTINE mpp_pass_UG_to_SG_r8_3d(UG_domain, field_UG, field_SG)
type(domainUG), intent(in) :: UG_domain
real(8), intent(in) :: field_UG(:,:)
real(8), intent(inout) :: field_SG(:,:,:)
real(8) :: buffer(mpp_domains_stack_size)
character(len=8) :: text
integer :: i, j, k, l, m, ke, from_pe, to_pe
integer :: buffer_pos, pos, msgsize, ioff, joff
!--- check if data is on data or computing domain
if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%compute%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%compute%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%compute%begin
joff = 1 - UG_domain%SG_domain%y(1)%compute%begin
else if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%data%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%data%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%data%begin
joff = 1 - UG_domain%SG_domain%y(1)%data%begin
else
call mpp_error( FATAL, 'mpp_pass_UG_to_SG_3D_: field_SG must match either compute domain or data domain.' )
endif
ke = size(field_SG,3)
buffer_pos = 0
!---pre-post receive
do m = 1, UG_domain%UG2SG%nrecv
msgsize = UG_domain%UG2SG%recv(m)%count * ke
if( msgsize.GT.0 )then
from_pe = UG_domain%UG2SG%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_UG_to_SG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
!---pack and send data
do m = 1, UG_domain%UG2SG%nsend
pos = buffer_pos
msgsize = UG_domain%UG2SG%send(m)%count * ke
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_UG_to_SG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
do k = 1, ke
do l = 1, UG_domain%UG2SG%send(m)%count
pos = pos+1
i = UG_domain%UG2SG%send(m)%i(l)
buffer(pos) = field_UG(i,k)
end do
end do
to_pe = UG_domain%UG2SG%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
call mpp_sync_self(check=EVENT_RECV)
!--- unpack the buffer
buffer_pos = 0
do m = 1, UG_domain%UG2SG%nrecv
pos = buffer_pos
do k = 1, ke
do l = 1, UG_domain%UG2SG%recv(m)%count
pos = pos+1
i = UG_domain%UG2SG%recv(m)%i(l)+ioff
j = UG_domain%UG2SG%recv(m)%j(l)+joff
field_SG(i,j,k) = buffer(pos)
enddo
enddo
buffer_pos = pos
enddo
call mpp_sync_self( )
end SUBROUTINE mpp_pass_UG_to_SG_r8_3d
# 805 "../mpp/include/mpp_unstruct_domain.inc" 2
# 1 "../mpp/include/mpp_unstruct_pass_data.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
! This subroutine pass data from unstructured domain2d to domain.
! First only implement for data at grid cell center.
SUBROUTINE mpp_pass_SG_to_UG_r4_2d(UG_domain, field_SG, field_UG)
type(domainUG), intent(in) :: UG_domain
real(4), intent(inout) :: field_UG(:)
real(4), intent(in) :: field_SG(:,:)
real(4) :: field3D_SG(size(field_SG,1),size(field_SG,2),1)
real(4) :: field2D_UG(size(field_UG(:)), 1)
pointer(ptr_SG, field3D_SG)
pointer(ptr_UG, field2D_UG)
ptr_SG = LOC(field_SG)
ptr_UG = LOC(field_UG)
call mpp_pass_SG_to_UG(UG_domain, field3D_SG, field2D_UG)
end SUBROUTINE mpp_pass_SG_to_UG_r4_2d
SUBROUTINE mpp_pass_SG_to_UG_r4_3d(UG_domain, field_SG, field_UG)
type(domainUG), intent(in) :: UG_domain
real(4), intent(inout) :: field_UG(:,:)
real(4), intent(in) :: field_SG(:,:,:)
real(4) :: buffer(mpp_domains_stack_size)
character(len=8) :: text
integer :: i, j, k, l, m, ke, from_pe, to_pe
integer :: buffer_pos, pos, msgsize, ioff, joff
!--- check if data is on data or computing domain
if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%compute%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%compute%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%compute%begin
joff = 1 - UG_domain%SG_domain%y(1)%compute%begin
else if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%data%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%data%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%data%begin
joff = 1 - UG_domain%SG_domain%y(1)%data%begin
else
call mpp_error( FATAL, 'mpp_pass_SG_to_UG_3D_: field_SG must match either compute domain or data domain.' )
endif
ke = size(field_SG,3)
buffer_pos = 0
!---pre-post receive
do m = 1, UG_domain%SG2UG%nrecv
msgsize = UG_domain%SG2UG%recv(m)%count*ke
if( msgsize.GT.0 )then
from_pe = UG_domain%SG2UG%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_SG_to_UG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
!---pack and send data
do m = 1, UG_domain%SG2UG%nsend
pos = buffer_pos
msgsize = UG_domain%SG2UG%send(m)%count * ke
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_SG_to_UG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
do k = 1, ke
do l = 1, UG_domain%SG2UG%send(m)%count
i = UG_domain%SG2UG%send(m)%i(l)+ioff
j = UG_domain%SG2UG%send(m)%j(l)+joff
pos = pos+1
buffer(pos) = field_SG(i,j,k)
end do
end do
to_pe = UG_domain%SG2UG%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
call mpp_sync_self(check=EVENT_RECV)
!--- unpack the buffer
buffer_pos = 0
do m = 1, UG_domain%SG2UG%nrecv
pos = buffer_pos
do k = 1, ke
do l = 1, UG_domain%SG2UG%recv(m)%count
pos = pos+1
i = UG_domain%SG2UG%recv(m)%i(l)
field_UG(i,k) = buffer(pos)
enddo
enddo
buffer_pos = pos
enddo
call mpp_sync_self( )
end SUBROUTINE mpp_pass_SG_to_UG_r4_3d
! This subroutine pass data from unstructured domain2d to domain.
! First only implement for data at grid cell center.
SUBROUTINE mpp_pass_UG_to_SG_r4_2d(UG_domain, field_UG, field_SG)
type(domainUG), intent(in) :: UG_domain
real(4), intent(in) :: field_UG(:)
real(4), intent(inout) :: field_SG(:,:)
real(4) :: field3D_SG(size(field_SG,1),size(field_SG,2),1)
real(4) :: field2D_UG(size(field_UG(:)), 1)
pointer(ptr_SG, field3D_SG)
pointer(ptr_UG, field2D_UG)
ptr_SG = LOC(field_SG)
ptr_UG = LOC(field_UG)
call mpp_pass_UG_to_SG(UG_domain, field2D_UG, field3D_SG)
end SUBROUTINE mpp_pass_UG_to_SG_r4_2d
SUBROUTINE mpp_pass_UG_to_SG_r4_3d(UG_domain, field_UG, field_SG)
type(domainUG), intent(in) :: UG_domain
real(4), intent(in) :: field_UG(:,:)
real(4), intent(inout) :: field_SG(:,:,:)
real(4) :: buffer(mpp_domains_stack_size)
character(len=8) :: text
integer :: i, j, k, l, m, ke, from_pe, to_pe
integer :: buffer_pos, pos, msgsize, ioff, joff
!--- check if data is on data or computing domain
if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%compute%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%compute%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%compute%begin
joff = 1 - UG_domain%SG_domain%y(1)%compute%begin
else if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%data%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%data%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%data%begin
joff = 1 - UG_domain%SG_domain%y(1)%data%begin
else
call mpp_error( FATAL, 'mpp_pass_UG_to_SG_3D_: field_SG must match either compute domain or data domain.' )
endif
ke = size(field_SG,3)
buffer_pos = 0
!---pre-post receive
do m = 1, UG_domain%UG2SG%nrecv
msgsize = UG_domain%UG2SG%recv(m)%count * ke
if( msgsize.GT.0 )then
from_pe = UG_domain%UG2SG%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_UG_to_SG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
!---pack and send data
do m = 1, UG_domain%UG2SG%nsend
pos = buffer_pos
msgsize = UG_domain%UG2SG%send(m)%count * ke
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_UG_to_SG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
do k = 1, ke
do l = 1, UG_domain%UG2SG%send(m)%count
pos = pos+1
i = UG_domain%UG2SG%send(m)%i(l)
buffer(pos) = field_UG(i,k)
end do
end do
to_pe = UG_domain%UG2SG%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
call mpp_sync_self(check=EVENT_RECV)
!--- unpack the buffer
buffer_pos = 0
do m = 1, UG_domain%UG2SG%nrecv
pos = buffer_pos
do k = 1, ke
do l = 1, UG_domain%UG2SG%recv(m)%count
pos = pos+1
i = UG_domain%UG2SG%recv(m)%i(l)+ioff
j = UG_domain%UG2SG%recv(m)%j(l)+joff
field_SG(i,j,k) = buffer(pos)
enddo
enddo
buffer_pos = pos
enddo
call mpp_sync_self( )
end SUBROUTINE mpp_pass_UG_to_SG_r4_3d
# 818 "../mpp/include/mpp_unstruct_domain.inc" 2
# 1 "../mpp/include/mpp_unstruct_pass_data.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
! This subroutine pass data from unstructured domain2d to domain.
! First only implement for data at grid cell center.
SUBROUTINE mpp_pass_SG_to_UG_i4_2d(UG_domain, field_SG, field_UG)
type(domainUG), intent(in) :: UG_domain
integer(4), intent(inout) :: field_UG(:)
integer(4), intent(in) :: field_SG(:,:)
integer(4) :: field3D_SG(size(field_SG,1),size(field_SG,2),1)
integer(4) :: field2D_UG(size(field_UG(:)), 1)
pointer(ptr_SG, field3D_SG)
pointer(ptr_UG, field2D_UG)
ptr_SG = LOC(field_SG)
ptr_UG = LOC(field_UG)
call mpp_pass_SG_to_UG(UG_domain, field3D_SG, field2D_UG)
end SUBROUTINE mpp_pass_SG_to_UG_i4_2d
SUBROUTINE mpp_pass_SG_to_UG_i4_3d(UG_domain, field_SG, field_UG)
type(domainUG), intent(in) :: UG_domain
integer(4), intent(inout) :: field_UG(:,:)
integer(4), intent(in) :: field_SG(:,:,:)
integer(4) :: buffer(mpp_domains_stack_size)
character(len=8) :: text
integer :: i, j, k, l, m, ke, from_pe, to_pe
integer :: buffer_pos, pos, msgsize, ioff, joff
!--- check if data is on data or computing domain
if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%compute%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%compute%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%compute%begin
joff = 1 - UG_domain%SG_domain%y(1)%compute%begin
else if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%data%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%data%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%data%begin
joff = 1 - UG_domain%SG_domain%y(1)%data%begin
else
call mpp_error( FATAL, 'mpp_pass_SG_to_UG_3D_: field_SG must match either compute domain or data domain.' )
endif
ke = size(field_SG,3)
buffer_pos = 0
!---pre-post receive
do m = 1, UG_domain%SG2UG%nrecv
msgsize = UG_domain%SG2UG%recv(m)%count*ke
if( msgsize.GT.0 )then
from_pe = UG_domain%SG2UG%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_SG_to_UG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
!---pack and send data
do m = 1, UG_domain%SG2UG%nsend
pos = buffer_pos
msgsize = UG_domain%SG2UG%send(m)%count * ke
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_SG_to_UG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
do k = 1, ke
do l = 1, UG_domain%SG2UG%send(m)%count
i = UG_domain%SG2UG%send(m)%i(l)+ioff
j = UG_domain%SG2UG%send(m)%j(l)+joff
pos = pos+1
buffer(pos) = field_SG(i,j,k)
end do
end do
to_pe = UG_domain%SG2UG%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
call mpp_sync_self(check=EVENT_RECV)
!--- unpack the buffer
buffer_pos = 0
do m = 1, UG_domain%SG2UG%nrecv
pos = buffer_pos
do k = 1, ke
do l = 1, UG_domain%SG2UG%recv(m)%count
pos = pos+1
i = UG_domain%SG2UG%recv(m)%i(l)
field_UG(i,k) = buffer(pos)
enddo
enddo
buffer_pos = pos
enddo
call mpp_sync_self( )
end SUBROUTINE mpp_pass_SG_to_UG_i4_3d
! This subroutine pass data from unstructured domain2d to domain.
! First only implement for data at grid cell center.
SUBROUTINE mpp_pass_UG_to_SG_i4_2d(UG_domain, field_UG, field_SG)
type(domainUG), intent(in) :: UG_domain
integer(4), intent(in) :: field_UG(:)
integer(4), intent(inout) :: field_SG(:,:)
integer(4) :: field3D_SG(size(field_SG,1),size(field_SG,2),1)
integer(4) :: field2D_UG(size(field_UG(:)), 1)
pointer(ptr_SG, field3D_SG)
pointer(ptr_UG, field2D_UG)
ptr_SG = LOC(field_SG)
ptr_UG = LOC(field_UG)
call mpp_pass_UG_to_SG(UG_domain, field2D_UG, field3D_SG)
end SUBROUTINE mpp_pass_UG_to_SG_i4_2d
SUBROUTINE mpp_pass_UG_to_SG_i4_3d(UG_domain, field_UG, field_SG)
type(domainUG), intent(in) :: UG_domain
integer(4), intent(in) :: field_UG(:,:)
integer(4), intent(inout) :: field_SG(:,:,:)
integer(4) :: buffer(mpp_domains_stack_size)
character(len=8) :: text
integer :: i, j, k, l, m, ke, from_pe, to_pe
integer :: buffer_pos, pos, msgsize, ioff, joff
!--- check if data is on data or computing domain
if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%compute%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%compute%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%compute%begin
joff = 1 - UG_domain%SG_domain%y(1)%compute%begin
else if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%data%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%data%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%data%begin
joff = 1 - UG_domain%SG_domain%y(1)%data%begin
else
call mpp_error( FATAL, 'mpp_pass_UG_to_SG_3D_: field_SG must match either compute domain or data domain.' )
endif
ke = size(field_SG,3)
buffer_pos = 0
!---pre-post receive
do m = 1, UG_domain%UG2SG%nrecv
msgsize = UG_domain%UG2SG%recv(m)%count * ke
if( msgsize.GT.0 )then
from_pe = UG_domain%UG2SG%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_UG_to_SG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
!---pack and send data
do m = 1, UG_domain%UG2SG%nsend
pos = buffer_pos
msgsize = UG_domain%UG2SG%send(m)%count * ke
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_UG_to_SG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
do k = 1, ke
do l = 1, UG_domain%UG2SG%send(m)%count
pos = pos+1
i = UG_domain%UG2SG%send(m)%i(l)
buffer(pos) = field_UG(i,k)
end do
end do
to_pe = UG_domain%UG2SG%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
call mpp_sync_self(check=EVENT_RECV)
!--- unpack the buffer
buffer_pos = 0
do m = 1, UG_domain%UG2SG%nrecv
pos = buffer_pos
do k = 1, ke
do l = 1, UG_domain%UG2SG%recv(m)%count
pos = pos+1
i = UG_domain%UG2SG%recv(m)%i(l)+ioff
j = UG_domain%UG2SG%recv(m)%j(l)+joff
field_SG(i,j,k) = buffer(pos)
enddo
enddo
buffer_pos = pos
enddo
call mpp_sync_self( )
end SUBROUTINE mpp_pass_UG_to_SG_i4_3d
# 831 "../mpp/include/mpp_unstruct_domain.inc" 2
# 1 "../mpp/include/mpp_unstruct_pass_data.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
! This subroutine pass data from unstructured domain2d to domain.
! First only implement for data at grid cell center.
SUBROUTINE mpp_pass_SG_to_UG_l4_2d(UG_domain, field_SG, field_UG)
type(domainUG), intent(in) :: UG_domain
logical(4), intent(inout) :: field_UG(:)
logical(4), intent(in) :: field_SG(:,:)
logical(4) :: field3D_SG(size(field_SG,1),size(field_SG,2),1)
logical(4) :: field2D_UG(size(field_UG(:)), 1)
pointer(ptr_SG, field3D_SG)
pointer(ptr_UG, field2D_UG)
ptr_SG = LOC(field_SG)
ptr_UG = LOC(field_UG)
call mpp_pass_SG_to_UG(UG_domain, field3D_SG, field2D_UG)
end SUBROUTINE mpp_pass_SG_to_UG_l4_2d
SUBROUTINE mpp_pass_SG_to_UG_l4_3d(UG_domain, field_SG, field_UG)
type(domainUG), intent(in) :: UG_domain
logical(4), intent(inout) :: field_UG(:,:)
logical(4), intent(in) :: field_SG(:,:,:)
logical(4) :: buffer(mpp_domains_stack_size)
character(len=8) :: text
integer :: i, j, k, l, m, ke, from_pe, to_pe
integer :: buffer_pos, pos, msgsize, ioff, joff
!--- check if data is on data or computing domain
if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%compute%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%compute%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%compute%begin
joff = 1 - UG_domain%SG_domain%y(1)%compute%begin
else if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%data%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%data%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%data%begin
joff = 1 - UG_domain%SG_domain%y(1)%data%begin
else
call mpp_error( FATAL, 'mpp_pass_SG_to_UG_3D_: field_SG must match either compute domain or data domain.' )
endif
ke = size(field_SG,3)
buffer_pos = 0
!---pre-post receive
do m = 1, UG_domain%SG2UG%nrecv
msgsize = UG_domain%SG2UG%recv(m)%count*ke
if( msgsize.GT.0 )then
from_pe = UG_domain%SG2UG%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_SG_to_UG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
!---pack and send data
do m = 1, UG_domain%SG2UG%nsend
pos = buffer_pos
msgsize = UG_domain%SG2UG%send(m)%count * ke
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_SG_to_UG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
do k = 1, ke
do l = 1, UG_domain%SG2UG%send(m)%count
i = UG_domain%SG2UG%send(m)%i(l)+ioff
j = UG_domain%SG2UG%send(m)%j(l)+joff
pos = pos+1
buffer(pos) = field_SG(i,j,k)
end do
end do
to_pe = UG_domain%SG2UG%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
call mpp_sync_self(check=EVENT_RECV)
!--- unpack the buffer
buffer_pos = 0
do m = 1, UG_domain%SG2UG%nrecv
pos = buffer_pos
do k = 1, ke
do l = 1, UG_domain%SG2UG%recv(m)%count
pos = pos+1
i = UG_domain%SG2UG%recv(m)%i(l)
field_UG(i,k) = buffer(pos)
enddo
enddo
buffer_pos = pos
enddo
call mpp_sync_self( )
end SUBROUTINE mpp_pass_SG_to_UG_l4_3d
! This subroutine pass data from unstructured domain2d to domain.
! First only implement for data at grid cell center.
SUBROUTINE mpp_pass_UG_to_SG_l4_2d(UG_domain, field_UG, field_SG)
type(domainUG), intent(in) :: UG_domain
logical(4), intent(in) :: field_UG(:)
logical(4), intent(inout) :: field_SG(:,:)
logical(4) :: field3D_SG(size(field_SG,1),size(field_SG,2),1)
logical(4) :: field2D_UG(size(field_UG(:)), 1)
pointer(ptr_SG, field3D_SG)
pointer(ptr_UG, field2D_UG)
ptr_SG = LOC(field_SG)
ptr_UG = LOC(field_UG)
call mpp_pass_UG_to_SG(UG_domain, field2D_UG, field3D_SG)
end SUBROUTINE mpp_pass_UG_to_SG_l4_2d
SUBROUTINE mpp_pass_UG_to_SG_l4_3d(UG_domain, field_UG, field_SG)
type(domainUG), intent(in) :: UG_domain
logical(4), intent(in) :: field_UG(:,:)
logical(4), intent(inout) :: field_SG(:,:,:)
logical(4) :: buffer(mpp_domains_stack_size)
character(len=8) :: text
integer :: i, j, k, l, m, ke, from_pe, to_pe
integer :: buffer_pos, pos, msgsize, ioff, joff
!--- check if data is on data or computing domain
if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%compute%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%compute%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%compute%begin
joff = 1 - UG_domain%SG_domain%y(1)%compute%begin
else if(size(field_SG,1) .EQ. UG_domain%SG_domain%x(1)%data%size .AND. &
size(field_SG,2) .EQ. UG_domain%SG_domain%y(1)%data%size) then
ioff = 1 - UG_domain%SG_domain%x(1)%data%begin
joff = 1 - UG_domain%SG_domain%y(1)%data%begin
else
call mpp_error( FATAL, 'mpp_pass_UG_to_SG_3D_: field_SG must match either compute domain or data domain.' )
endif
ke = size(field_SG,3)
buffer_pos = 0
!---pre-post receive
do m = 1, UG_domain%UG2SG%nrecv
msgsize = UG_domain%UG2SG%recv(m)%count * ke
if( msgsize.GT.0 )then
from_pe = UG_domain%UG2SG%recv(m)%pe
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_UG_to_SG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
!---pack and send data
do m = 1, UG_domain%UG2SG%nsend
pos = buffer_pos
msgsize = UG_domain%UG2SG%send(m)%count * ke
if( msgsize.GT.0 )then
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
write( text,'(i8)' )mpp_domains_stack_hwm
call mpp_error( FATAL, 'mpp_pass_UG_to_SG_3D: mpp_domains_stack overflow, '// &
'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
do k = 1, ke
do l = 1, UG_domain%UG2SG%send(m)%count
pos = pos+1
i = UG_domain%UG2SG%send(m)%i(l)
buffer(pos) = field_UG(i,k)
end do
end do
to_pe = UG_domain%UG2SG%send(m)%pe
call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1 )
buffer_pos = buffer_pos + msgsize
end if
end do
call mpp_sync_self(check=EVENT_RECV)
!--- unpack the buffer
buffer_pos = 0
do m = 1, UG_domain%UG2SG%nrecv
pos = buffer_pos
do k = 1, ke
do l = 1, UG_domain%UG2SG%recv(m)%count
pos = pos+1
i = UG_domain%UG2SG%recv(m)%i(l)+ioff
j = UG_domain%UG2SG%recv(m)%j(l)+joff
field_SG(i,j,k) = buffer(pos)
enddo
enddo
buffer_pos = pos
enddo
call mpp_sync_self( )
end SUBROUTINE mpp_pass_UG_to_SG_l4_3d
# 843 "../mpp/include/mpp_unstruct_domain.inc" 2
# 1 "../mpp/include/mpp_global_field_ug.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_ug_r8_2d( domain, local, global, flags, default_data)
type(domainUG), intent(in) :: domain
real(8), intent(in) :: local(:)
real(8), intent(out) :: global(:)
integer, intent(in), optional :: flags
real(8), intent(in), optional :: default_data
real(8) :: local3D (size( local,1),1)
real(8) :: global3D(size(global,1),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field_UG( domain, local3D, global3D, flags, default_data )
end subroutine mpp_global_field2D_ug_r8_2d
subroutine mpp_global_field2D_ug_r8_3d( domain, local, global, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domainUG), intent(in) :: domain
real(8), intent(in) :: local(domain%compute%begin:,:)
real(8), intent(out) :: global(domain%global%begin:,:)
integer, intent(in), optional :: flags
real(8), intent(in), optional :: default_data
integer :: l, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, tile_id
integer :: ke, lsc, lec, ls, le, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
real(8) :: clocal (domain%compute%size*size(local,2))
real(8) :: cremote(domain%compute%max_size*size(local,2))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD_UG user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_UG: must first call mpp_domains_init.' )
root_only = .FALSE.
if( PRESENT(flags) ) root_only = BTEST(flags, ROOT_GLOBAL)
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,2).NE.size(global,2) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD_UG: mismatch of third dimension size of global and local')
if( size(global,1).NE.domain%global%size)then
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_UG: incoming arrays do not match domain.' )
endif
endif
if( size(local,1).NE.domain%compute%size )then
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_UG_: incoming field array must match compute domain ' )
end if
ke = size(local,2)
lsc = domain%compute%begin; lec = domain%compute%end
nword_me = (lec-lsc+1)*ke
! make contiguous array from compute domain
m = 0
if(global_on_this_pe) then
if(PRESENT(default_data)) then
global = default_data
else
global = 0
endif
do k = 1, ke
do l = lsc, lec
m = m + 1
clocal(m) = local(l,k)
global(l,k) = clocal(m) !always fill local domain directly
end do
end do
else
do k = 1, ke
do l = lsc, lec
m = m + 1
clocal(m) = local(l,k)
end do
end do
endif
!fill off-domains (note loops begin at an offset of 1)
tile_id = domain%tile_id
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id .NE. tile_id ) cycle
nwords = domain%list(rpos)%compute%size * ke
call mpp_recv(cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
m = 0
ls = domain%list(rpos)%compute%begin; le = domain%list(rpos)%compute%end
do k = 1,ke
do l = ls, le
m = m + 1
global(l,k) = cremote(m)
end do
end do
end do
endif
else
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id.NE. tile_id ) cycle ! global field only within tile
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id .NE. tile_id ) cycle ! global field only within tile
nwords = domain%list(rpos)%compute%size * ke
call mpp_recv( cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
m = 0
ls = domain%list(rpos)%compute%begin; le = domain%list(rpos)%compute%end
do k = 1,ke
do l = ls, le
m = m + 1
global(l,k) = cremote(m)
end do
end do
enddo
endif
call mpp_sync_self()
end subroutine mpp_global_field2D_ug_r8_3d
subroutine mpp_global_field2D_ug_r8_4d( domain, local, global, flags, default_data )
type(domainUG), intent(in) :: domain
real(8), intent(in) :: local(:,:,:)
real(8), intent(out) :: global(:,:,:)
integer, intent(in), optional :: flags
real(8), intent(in), optional :: default_data
real(8) :: local3D (size( local,1),size( local,2)*size( local,3))
real(8) :: global3D(size(global,1),size(global,2)*size(global,3))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_UG( domain, local3D, global3D, flags, default_data )
end subroutine mpp_global_field2D_ug_r8_4d
subroutine mpp_global_field2D_ug_r8_5d( domain, local, global, flags, default_data )
type(domainUG), intent(in) :: domain
real(8), intent(in) :: local(:,:,:,:)
real(8), intent(out) :: global(:,:,:,:)
integer, intent(in), optional :: flags
real(8), intent(in), optional :: default_data
real(8) :: local3D (size( local,1),size( local,2)*size( local,3)*size( local,4))
real(8) :: global3D(size(global,1),size(global,2)*size(global,3)*size(global,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_UG( domain, local3D, global3D, flags, default_data )
end subroutine mpp_global_field2D_ug_r8_5d
# 855 "../mpp/include/mpp_unstruct_domain.inc" 2
# 1 "../mpp/include/mpp_global_field_ug.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_ug_i8_2d( domain, local, global, flags, default_data)
type(domainUG), intent(in) :: domain
integer(8), intent(in) :: local(:)
integer(8), intent(out) :: global(:)
integer, intent(in), optional :: flags
integer(8), intent(in), optional :: default_data
integer(8) :: local3D (size( local,1),1)
integer(8) :: global3D(size(global,1),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field_UG( domain, local3D, global3D, flags, default_data )
end subroutine mpp_global_field2D_ug_i8_2d
subroutine mpp_global_field2D_ug_i8_3d( domain, local, global, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domainUG), intent(in) :: domain
integer(8), intent(in) :: local(domain%compute%begin:,:)
integer(8), intent(out) :: global(domain%global%begin:,:)
integer, intent(in), optional :: flags
integer(8), intent(in), optional :: default_data
integer :: l, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, tile_id
integer :: ke, lsc, lec, ls, le, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
integer(8) :: clocal (domain%compute%size*size(local,2))
integer(8) :: cremote(domain%compute%max_size*size(local,2))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD_UG user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_UG: must first call mpp_domains_init.' )
root_only = .FALSE.
if( PRESENT(flags) ) root_only = BTEST(flags, ROOT_GLOBAL)
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,2).NE.size(global,2) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD_UG: mismatch of third dimension size of global and local')
if( size(global,1).NE.domain%global%size)then
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_UG: incoming arrays do not match domain.' )
endif
endif
if( size(local,1).NE.domain%compute%size )then
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_UG_: incoming field array must match compute domain ' )
end if
ke = size(local,2)
lsc = domain%compute%begin; lec = domain%compute%end
nword_me = (lec-lsc+1)*ke
! make contiguous array from compute domain
m = 0
if(global_on_this_pe) then
if(PRESENT(default_data)) then
global = default_data
else
global = 0
endif
do k = 1, ke
do l = lsc, lec
m = m + 1
clocal(m) = local(l,k)
global(l,k) = clocal(m) !always fill local domain directly
end do
end do
else
do k = 1, ke
do l = lsc, lec
m = m + 1
clocal(m) = local(l,k)
end do
end do
endif
!fill off-domains (note loops begin at an offset of 1)
tile_id = domain%tile_id
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id .NE. tile_id ) cycle
nwords = domain%list(rpos)%compute%size * ke
call mpp_recv(cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
m = 0
ls = domain%list(rpos)%compute%begin; le = domain%list(rpos)%compute%end
do k = 1,ke
do l = ls, le
m = m + 1
global(l,k) = cremote(m)
end do
end do
end do
endif
else
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id.NE. tile_id ) cycle ! global field only within tile
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id .NE. tile_id ) cycle ! global field only within tile
nwords = domain%list(rpos)%compute%size * ke
call mpp_recv( cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
m = 0
ls = domain%list(rpos)%compute%begin; le = domain%list(rpos)%compute%end
do k = 1,ke
do l = ls, le
m = m + 1
global(l,k) = cremote(m)
end do
end do
enddo
endif
call mpp_sync_self()
end subroutine mpp_global_field2D_ug_i8_3d
subroutine mpp_global_field2D_ug_i8_4d( domain, local, global, flags, default_data )
type(domainUG), intent(in) :: domain
integer(8), intent(in) :: local(:,:,:)
integer(8), intent(out) :: global(:,:,:)
integer, intent(in), optional :: flags
integer(8), intent(in), optional :: default_data
integer(8) :: local3D (size( local,1),size( local,2)*size( local,3))
integer(8) :: global3D(size(global,1),size(global,2)*size(global,3))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_UG( domain, local3D, global3D, flags, default_data )
end subroutine mpp_global_field2D_ug_i8_4d
subroutine mpp_global_field2D_ug_i8_5d( domain, local, global, flags, default_data )
type(domainUG), intent(in) :: domain
integer(8), intent(in) :: local(:,:,:,:)
integer(8), intent(out) :: global(:,:,:,:)
integer, intent(in), optional :: flags
integer(8), intent(in), optional :: default_data
integer(8) :: local3D (size( local,1),size( local,2)*size( local,3)*size( local,4))
integer(8) :: global3D(size(global,1),size(global,2)*size(global,3)*size(global,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_UG( domain, local3D, global3D, flags, default_data )
end subroutine mpp_global_field2D_ug_i8_5d
# 868 "../mpp/include/mpp_unstruct_domain.inc" 2
# 1 "../mpp/include/mpp_global_field_ug.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_ug_r4_2d( domain, local, global, flags, default_data)
type(domainUG), intent(in) :: domain
real(4), intent(in) :: local(:)
real(4), intent(out) :: global(:)
integer, intent(in), optional :: flags
real(4), intent(in), optional :: default_data
real(4) :: local3D (size( local,1),1)
real(4) :: global3D(size(global,1),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field_UG( domain, local3D, global3D, flags, default_data )
end subroutine mpp_global_field2D_ug_r4_2d
subroutine mpp_global_field2D_ug_r4_3d( domain, local, global, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domainUG), intent(in) :: domain
real(4), intent(in) :: local(domain%compute%begin:,:)
real(4), intent(out) :: global(domain%global%begin:,:)
integer, intent(in), optional :: flags
real(4), intent(in), optional :: default_data
integer :: l, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, tile_id
integer :: ke, lsc, lec, ls, le, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
real(4) :: clocal (domain%compute%size*size(local,2))
real(4) :: cremote(domain%compute%max_size*size(local,2))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD_UG user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_UG: must first call mpp_domains_init.' )
root_only = .FALSE.
if( PRESENT(flags) ) root_only = BTEST(flags, ROOT_GLOBAL)
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,2).NE.size(global,2) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD_UG: mismatch of third dimension size of global and local')
if( size(global,1).NE.domain%global%size)then
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_UG: incoming arrays do not match domain.' )
endif
endif
if( size(local,1).NE.domain%compute%size )then
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_UG_: incoming field array must match compute domain ' )
end if
ke = size(local,2)
lsc = domain%compute%begin; lec = domain%compute%end
nword_me = (lec-lsc+1)*ke
! make contiguous array from compute domain
m = 0
if(global_on_this_pe) then
if(PRESENT(default_data)) then
global = default_data
else
global = 0
endif
do k = 1, ke
do l = lsc, lec
m = m + 1
clocal(m) = local(l,k)
global(l,k) = clocal(m) !always fill local domain directly
end do
end do
else
do k = 1, ke
do l = lsc, lec
m = m + 1
clocal(m) = local(l,k)
end do
end do
endif
!fill off-domains (note loops begin at an offset of 1)
tile_id = domain%tile_id
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id .NE. tile_id ) cycle
nwords = domain%list(rpos)%compute%size * ke
call mpp_recv(cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
m = 0
ls = domain%list(rpos)%compute%begin; le = domain%list(rpos)%compute%end
do k = 1,ke
do l = ls, le
m = m + 1
global(l,k) = cremote(m)
end do
end do
end do
endif
else
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id.NE. tile_id ) cycle ! global field only within tile
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id .NE. tile_id ) cycle ! global field only within tile
nwords = domain%list(rpos)%compute%size * ke
call mpp_recv( cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
m = 0
ls = domain%list(rpos)%compute%begin; le = domain%list(rpos)%compute%end
do k = 1,ke
do l = ls, le
m = m + 1
global(l,k) = cremote(m)
end do
end do
enddo
endif
call mpp_sync_self()
end subroutine mpp_global_field2D_ug_r4_3d
subroutine mpp_global_field2D_ug_r4_4d( domain, local, global, flags, default_data )
type(domainUG), intent(in) :: domain
real(4), intent(in) :: local(:,:,:)
real(4), intent(out) :: global(:,:,:)
integer, intent(in), optional :: flags
real(4), intent(in), optional :: default_data
real(4) :: local3D (size( local,1),size( local,2)*size( local,3))
real(4) :: global3D(size(global,1),size(global,2)*size(global,3))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_UG( domain, local3D, global3D, flags, default_data )
end subroutine mpp_global_field2D_ug_r4_4d
subroutine mpp_global_field2D_ug_r4_5d( domain, local, global, flags, default_data )
type(domainUG), intent(in) :: domain
real(4), intent(in) :: local(:,:,:,:)
real(4), intent(out) :: global(:,:,:,:)
integer, intent(in), optional :: flags
real(4), intent(in), optional :: default_data
real(4) :: local3D (size( local,1),size( local,2)*size( local,3)*size( local,4))
real(4) :: global3D(size(global,1),size(global,2)*size(global,3)*size(global,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_UG( domain, local3D, global3D, flags, default_data )
end subroutine mpp_global_field2D_ug_r4_5d
# 882 "../mpp/include/mpp_unstruct_domain.inc" 2
# 1 "../mpp/include/mpp_global_field_ug.h" 1
!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the GFDL Flexible Modeling System (FMS).
!*
!* FMS is free software: you can redistribute it and/or modify it under
!* the terms of the GNU Lesser General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or (at
!* your option) any later version.
!*
!* FMS is distributed in the hope that it will be useful, but WITHOUT
!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
!* for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with FMS. If not, see .
!***********************************************************************
subroutine mpp_global_field2D_ug_i4_2d( domain, local, global, flags, default_data)
type(domainUG), intent(in) :: domain
integer(4), intent(in) :: local(:)
integer(4), intent(out) :: global(:)
integer, intent(in), optional :: flags
integer(4), intent(in), optional :: default_data
integer(4) :: local3D (size( local,1),1)
integer(4) :: global3D(size(global,1),1)
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC( local)
gptr = LOC(global)
call mpp_global_field_UG( domain, local3D, global3D, flags, default_data )
end subroutine mpp_global_field2D_ug_i4_2d
subroutine mpp_global_field2D_ug_i4_3d( domain, local, global, flags, default_data)
!get a global field from a local field
!local field may be on compute OR data domain
type(domainUG), intent(in) :: domain
integer(4), intent(in) :: local(domain%compute%begin:,:)
integer(4), intent(out) :: global(domain%global%begin:,:)
integer, intent(in), optional :: flags
integer(4), intent(in), optional :: default_data
integer :: l, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, tile_id
integer :: ke, lsc, lec, ls, le, nword_me
integer :: ipos, jpos
logical :: xonly, yonly, root_only, global_on_this_pe
integer(4) :: clocal (domain%compute%size*size(local,2))
integer(4) :: cremote(domain%compute%max_size*size(local,2))
integer :: stackuse
character(len=8) :: text
pointer( ptr_local, clocal )
pointer( ptr_remote, cremote )
stackuse = size(clocal(:))+size(cremote(:))
if( stackuse.GT.mpp_domains_stack_size )then
write( text, '(i8)' )stackuse
call mpp_error( FATAL, &
'MPP_DO_GLOBAL_FIELD_UG user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
end if
mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, stackuse )
ptr_local = LOC(mpp_domains_stack)
ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_UG: must first call mpp_domains_init.' )
root_only = .FALSE.
if( PRESENT(flags) ) root_only = BTEST(flags, ROOT_GLOBAL)
global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
ipos = 0; jpos = 0
if(global_on_this_pe ) then
if(size(local,2).NE.size(global,2) ) call mpp_error( FATAL, &
'MPP_GLOBAL_FIELD_UG: mismatch of third dimension size of global and local')
if( size(global,1).NE.domain%global%size)then
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_UG: incoming arrays do not match domain.' )
endif
endif
if( size(local,1).NE.domain%compute%size )then
call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_UG_: incoming field array must match compute domain ' )
end if
ke = size(local,2)
lsc = domain%compute%begin; lec = domain%compute%end
nword_me = (lec-lsc+1)*ke
! make contiguous array from compute domain
m = 0
if(global_on_this_pe) then
if(PRESENT(default_data)) then
global = default_data
else
global = 0
endif
do k = 1, ke
do l = lsc, lec
m = m + 1
clocal(m) = local(l,k)
global(l,k) = clocal(m) !always fill local domain directly
end do
end do
else
do k = 1, ke
do l = lsc, lec
m = m + 1
clocal(m) = local(l,k)
end do
end do
endif
!fill off-domains (note loops begin at an offset of 1)
tile_id = domain%tile_id
nd = size(domain%list(:))
if(root_only) then
if(domain%pe .NE. domain%tile_root_pe) then
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
else
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id .NE. tile_id ) cycle
nwords = domain%list(rpos)%compute%size * ke
call mpp_recv(cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
m = 0
ls = domain%list(rpos)%compute%begin; le = domain%list(rpos)%compute%end
do k = 1,ke
do l = ls, le
m = m + 1
global(l,k) = cremote(m)
end do
end do
end do
endif
else
do n = 1,nd-1
lpos = mod(domain%pos+nd-n,nd)
if( domain%list(lpos)%tile_id.NE. tile_id ) cycle ! global field only within tile
call mpp_send( clocal(1), plen=nword_me, to_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
end do
do n = 1,nd-1
rpos = mod(domain%pos+n,nd)
if( domain%list(rpos)%tile_id .NE. tile_id ) cycle ! global field only within tile
nwords = domain%list(rpos)%compute%size * ke
call mpp_recv( cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
m = 0
ls = domain%list(rpos)%compute%begin; le = domain%list(rpos)%compute%end
do k = 1,ke
do l = ls, le
m = m + 1
global(l,k) = cremote(m)
end do
end do
enddo
endif
call mpp_sync_self()
end subroutine mpp_global_field2D_ug_i4_3d
subroutine mpp_global_field2D_ug_i4_4d( domain, local, global, flags, default_data )
type(domainUG), intent(in) :: domain
integer(4), intent(in) :: local(:,:,:)
integer(4), intent(out) :: global(:,:,:)
integer, intent(in), optional :: flags
integer(4), intent(in), optional :: default_data
integer(4) :: local3D (size( local,1),size( local,2)*size( local,3))
integer(4) :: global3D(size(global,1),size(global,2)*size(global,3))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_UG( domain, local3D, global3D, flags, default_data )
end subroutine mpp_global_field2D_ug_i4_4d
subroutine mpp_global_field2D_ug_i4_5d( domain, local, global, flags, default_data )
type(domainUG), intent(in) :: domain
integer(4), intent(in) :: local(:,:,:,:)
integer(4), intent(out) :: global(:,:,:,:)
integer, intent(in), optional :: flags
integer(4), intent(in), optional :: default_data
integer(4) :: local3D (size( local,1),size( local,2)*size( local,3)*size( local,4))
integer(4) :: global3D(size(global,1),size(global,2)*size(global,3)*size(global,4))
pointer( lptr, local3D )
pointer( gptr, global3D )
lptr = LOC(local)
gptr = LOC(global)
call mpp_global_field_UG( domain, local3D, global3D, flags, default_data )
end subroutine mpp_global_field2D_ug_i4_5d
# 895 "../mpp/include/mpp_unstruct_domain.inc" 2
# 2790 "../mpp/mpp_domains.F90" 2
end module mpp_domains_mod
!
!
! Any module or program unit using mpp_domains_mod
! must contain the line
!
! use mpp_domains_mod
!
! mpp_domains_moduses mpp_mod, and therefore is subject to the compiling and linking requirements of that module.
!
!
! mpp_domains_mod uses standard f90, and has no special
! requirements. There are some OS-dependent
! pre-processor directives that you might need to modify on
! non-SGI/Cray systems and compilers. The portability of mpp_mod
! obviously is a constraint, since this module is built on top of
! it. Contact me, Balaji, SGI/GFDL, with questions.
!
!
! The mpp_domains source consists of the main source file
! mpp_domains.F90 and also requires the following include files:
!
! GFDL users can check it out of the main CVS repository as part of
! the mpp CVS module. The current public tag is galway.
! External users can download the latest mpp package here. Public access
! to the GFDL CVS repository will soon be made available.
!
!