!***********************************************************************
!* GNU Lesser General Public License
!*
!* This file is part of the FV3 dynamical core.
!*
!* The FV3 dynamical core is free software: you can redistribute it
!* and/or modify it under the terms of the
!* GNU Lesser General Public License as published by the
!* Free Software Foundation, either version 3 of the License, or
!* (at your option) any later version.
!*
!* The FV3 dynamical core is distributed in the hope that it will be
!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty
!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
!* See the GNU General Public License for more details.
!*
!* You should have received a copy of the GNU Lesser General Public
!* License along with the FV3 dynamical core.
!* If not, see .
!***********************************************************************
!>@brief The module 'fv_nesting' is a collection of routines pertaining to grid nesting
!! \cite harris2013two.
#undef MULTI_GASES
module fv_nesting_mod
! Modules Included:
!
!
! Module Name |
! Functions Included |
!
!
! boundary_mod |
! update_coarse_grid,nested_grid_BC_send, nested_grid_BC_recv, nested_grid_BC_save_proc
! nested_grid_BC, nested_grid_BC_apply_intT |
!
!
! constants_mod |
! grav, pi=>pi_8, radius, hlv, rdgas, cp_air, rvgas, cp_vapor, kappa |
!
!
! field_manager_mod |
! MODEL_ATMOS |
!
!
! fv_arrays_mod |
! fv_grid_type, fv_flags_type, fv_atmos_type, fv_nest_type, fv_diag_type,
! fv_nest_BC_type_3D,allocate_fv_nest_BC_type, fv_atmos_type, fv_grid_bounds_type |
!
!
! fv_diagnostics_mod |
! sphum_ll_fix, range_check |
!
!
! fv_grid_utils_mod |
! ptop_min, g_sum, cubed_to_latlon, f_p |
!
!
! fv_mapz_mod |
! mappm, remap_2d |
!
!
! fv_mp_mod |
! is, ie, js, je, isd, ied, jsd, jed, isc, iec, jsc, jec,is_master,mp_reduce_sum |
!
!
! fv_restart_mod |
! d2a_setup, d2c_setup |
!
!
! fv_sg_mod |
! neg_adj3 |
!
!
! fv_timing_mod |
! timing_on, timing_off |
!
!
! init_hydro_mod |
! p_var |
!
!
! mpp_mod/td>
! | mpp_sync_self, mpp_sync, mpp_send, mpp_recv, mpp_error, FATAL |
!
!
! mpp_domains_mod/td>
! | mpp_update_domains, mpp_global_field,mpp_get_data_domain, mpp_get_compute_domain,
! mpp_get_global_domain, DGRID_NE, mpp_update_domains, domain2D, mpp_global_sum,
! BITWISE_EFP_SUM, BITWISE_EXACT_SUM |
!
!
! sw_core_mod |
! divergence_corner, divergence_corner_nest |
!
!
! tracer_manager_mod |
! get_tracer_index |
!
!
use mpp_domains_mod, only: mpp_update_domains
use mpp_domains_mod, only: mpp_global_field
use field_manager_mod, only: MODEL_ATMOS
use tracer_manager_mod, only: get_tracer_index
use fv_sg_mod, only: neg_adj3
use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain, mpp_get_global_domain
use mpp_domains_mod, only: AGRID, CGRID_NE, DGRID_NE, mpp_update_domains, domain2D
use mpp_mod, only: mpp_sync_self, mpp_sync, mpp_send, mpp_recv, mpp_error, FATAL, mpp_pe, WARNING, NOTE
use mpp_domains_mod, only: mpp_global_sum, BITWISE_EFP_SUM, BITWISE_EXACT_SUM
use boundary_mod, only: update_coarse_grid
use boundary_mod, only: nested_grid_BC_send, nested_grid_BC_recv, nested_grid_BC_save_proc
use boundary_mod, only: nested_grid_BC, nested_grid_BC_apply_intT
use fv_arrays_mod, only: fv_grid_type, fv_flags_type, fv_atmos_type, fv_nest_type, fv_diag_type, fv_nest_BC_type_3D
use fv_arrays_mod, only: allocate_fv_nest_BC_type, fv_atmos_type, fv_grid_bounds_type, deallocate_fv_nest_BC_type
use fv_grid_utils_mod, only: ptop_min, g_sum, cubed_to_latlon, f_p
use init_hydro_mod, only: p_var
#ifdef OVERLOAD_R4
use constantsR4_mod, only: grav, pi=>pi_8, radius, hlv, rdgas, cp_air, rvgas, cp_vapor, kappa
#else
use constants_mod, only: grav, pi=>pi_8, radius, hlv, rdgas, cp_air, rvgas, cp_vapor, kappa
#endif
use fv_mapz_mod, only: mappm, remap_2d
use fv_timing_mod, only: timing_on, timing_off
use fv_mp_mod, only: is_master
use fv_mp_mod, only: mp_reduce_sum, global_nest_domain
use fv_diagnostics_mod, only: sphum_ll_fix, range_check
use sw_core_mod, only: divergence_corner, divergence_corner_nest
use time_manager_mod, only: time_type
implicit none
logical :: RF_initialized = .false.
logical :: bad_range
real, allocatable :: rf(:), rw(:)
integer :: kmax=1
!Arrays for global grid total energy, used for grid nesting
real, allocatable :: te_2d_coarse(:,:)
real, allocatable :: dp1_coarse(:,:,:)
!For nested grid buffers
!Individual structures are allocated by nested_grid_BC_recv
type(fv_nest_BC_type_3d) :: u_buf, v_buf, uc_buf, vc_buf, delp_buf, delz_buf, pt_buf, w_buf, divg_buf, pe_u_buf,pe_v_buf,pe_b_buf
type(fv_nest_BC_type_3d), allocatable:: q_buf(:)
real, dimension(:,:,:), allocatable, target :: dum_West, dum_East, dum_North, dum_South
private
public :: twoway_nesting, setup_nested_grid_BCs, set_physics_BCs, dealloc_nested_buffers
contains
!>@brief The subroutine 'dealloc_nested_buffers' deallocates the BC buffers
!! so that they can be resized after nest motion.
!! Ramstrom/HRD Moving Nest upgrade
subroutine dealloc_nested_buffers(Atm)
type(fv_atmos_type), intent(in) :: Atm
integer :: n, ncnst
!logical :: dummy = .false.
call deallocate_fv_nest_BC_type(u_buf)
call deallocate_fv_nest_BC_type(v_buf)
call deallocate_fv_nest_BC_type(uc_buf)
call deallocate_fv_nest_BC_type(vc_buf)
call deallocate_fv_nest_BC_type(delp_buf)
call deallocate_fv_nest_BC_type(delz_buf)
call deallocate_fv_nest_BC_type(pt_buf)
call deallocate_fv_nest_BC_type(w_buf)
call deallocate_fv_nest_BC_type(divg_buf)
call deallocate_fv_nest_BC_type(pe_u_buf)
call deallocate_fv_nest_BC_type(pe_v_buf)
call deallocate_fv_nest_BC_type(pe_b_buf)
ncnst = size(q_buf)
do n=1,ncnst
call deallocate_fv_nest_BC_type(q_buf(n))
end do
! TODO remove the allocation steps
! Reallocate based on the new Atm structure
!ns = Atm%neststruct%nsponge
!! The code for using the BC buffers will allocate when needed in boundary.F90::nested_grid_BC_recv()
!allocate_fv_nest_BC_type_3D_Atm(BC,Atm,ns,istag,jstag,dummy)
! Rely on the previously set values for istag, jstag
! call allocate_fv_nest_BC_type(Atm%neststruct%delp_BC,Atm,ns,0,0,dummy)
! call allocate_fv_nest_BC_type(Atm%neststruct%u_BC,Atm,ns,0,1,dummy)
! call allocate_fv_nest_BC_type(Atm%neststruct%v_BC,Atm,ns,1,0,dummy)
! call allocate_fv_nest_BC_type(Atm%neststruct%uc_BC,Atm,ns,1,0,dummy)
! call allocate_fv_nest_BC_type(Atm%neststruct%vc_BC,Atm,ns,0,1,dummy)
! call allocate_fv_nest_BC_type(Atm%neststruct%divg_BC,Atm,ns,1,1,dummy)
! if (ncnst > 0) then
! allocate(Atm%neststruct%q_BC(ncnst))
! do n=1,ncnst
! call allocate_fv_nest_BC_type(Atm%neststruct%q_BC(n),Atm,ns,0,0,dummy)
! enddo
! endif
!call allocate_fv_nest_BC_type_3D_Atm(u_buf, Atm, ns, u_buf%istag, u_buf%jstag, dummy)
!call allocate_fv_nest_BC_type_3D_Atm(v_buf, Atm, ns, v_buf%istag, v_buf%jstag, dummy)
!call allocate_fv_nest_BC_type_3D_Atm(uc_buf, Atm, ns, uc_buf%istag, uc_buf%jstag, dummy)
!call allocate_fv_nest_BC_type_3D_Atm(vc_buf, Atm, ns, vc_buf%istag, vc_buf%jstag, dummy)
!call allocate_fv_nest_BC_type_3D_Atm(delp_buf, Atm, ns, 0, 0, dummy)
!call allocate_fv_nest_BC_type_3D_Atm(delz_buf, Atm, ns, delz_buf%istag, delz_buf%jstag, dummy)
!call allocate_fv_nest_BC_type_3D_Atm(pt_buf, Atm, ns, pt_buf%istag, pt_buf%jstag, dummy)
!call allocate_fv_nest_BC_type_3D_Atm(pkz_buf, Atm, ns, pkz_buf%istag, pkz_buf%jstag, dummy)
!call allocate_fv_nest_BC_type_3D_Atm(w_buf, Atm, ns, w_buf%istag, w_buf%jstag, dummy)
!call allocate_fv_nest_BC_type_3D_Atm(divg_buf, Atm, ns, divg_buf%istag, divg_buf%jstag, dummy)
!do n=1,ncnst
! call allocate_fv_nest_BC_type_3D_Atm(q_buf(n), Atm, ns, q_buf(n)%istag, q_buf(n)%jstag, dummy)
!end do
end subroutine dealloc_nested_buffers
!>@brief The subroutine 'setup_nested_grid_BCs' fetches data from the coarse grid
!! to set up the nested-grid boundary conditions.
subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, &
u, v, w, pt, delp, delz,q, uc, vc, &
#ifdef USE_COND
q_con, &
#ifdef MOIST_CAPPA
cappa, &
#endif
#endif
nested, inline_q, make_nh, ng, &
gridstruct, flagstruct, neststruct, &
nest_timestep, tracer_nest_timestep, &
domain, parent_grid, bd, nwat, ak, bk)
type(fv_grid_bounds_type), intent(IN) :: bd
real, intent(IN) :: zvir
integer, intent(IN) :: npx, npy, npz
integer, intent(IN) :: ncnst, ng, nwat
logical, intent(IN) :: inline_q, make_nh,nested
real, intent(IN), dimension(npz) :: ak, bk
real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) :: u !< D grid zonal wind (m/s)
real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) :: v !< D grid meridional wind (m/s)
real, intent(inout) :: w( bd%isd: ,bd%jsd: ,1:) !< W (m/s)
real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) !< temperature (K)
real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) !< pressure thickness (pascal)
real, intent(inout) :: delz(bd%is: ,bd%js: ,1:) !< height thickness (m)
real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst) !< specific humidity and constituents
real, intent(inout) :: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) !< (uc,vc) mostly used as the C grid winds
real, intent(inout) :: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
#ifdef USE_COND
real, intent(inout) :: q_con( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
#ifdef MOIST_CAPPA
real, intent(inout) :: cappa( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
#endif
#endif
integer, intent(INOUT) :: nest_timestep, tracer_nest_timestep
type(fv_atmos_type), pointer, intent(IN) :: parent_grid
type(fv_grid_type), intent(INOUT) :: gridstruct
type(fv_flags_type), intent(INOUT) :: flagstruct
type(fv_nest_type), intent(INOUT), target :: neststruct
type(domain2d), intent(INOUT) :: domain
real :: divg(bd%isd:bd%ied+1,bd%jsd:bd%jed+1, npz)
real :: ua(bd%isd:bd%ied,bd%jsd:bd%jed)
real :: va(bd%isd:bd%ied,bd%jsd:bd%jed)
real :: pe_ustag(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz+1)
real :: pe_vstag(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz+1)
real :: pe_bstag(bd%isd:bd%ied+1,bd%jsd:bd%jed+1,npz+1)
real, parameter :: a13 = 1./3.
integer :: i,j,k,n,p, sphum, npz_coarse, nnest
logical :: do_pd
type(fv_nest_BC_type_3d) :: delp_lag_BC, lag_BC, pe_lag_BC, pe_eul_BC
type(fv_nest_BC_type_3d) :: lag_u_BC, pe_u_lag_BC, pe_u_eul_BC
type(fv_nest_BC_type_3d) :: lag_v_BC, pe_v_lag_BC, pe_v_eul_BC
type(fv_nest_BC_type_3d) :: lag_b_BC, pe_b_lag_BC, pe_b_eul_BC
!local pointers
logical, pointer :: child_grids(:)
integer :: is, ie, js, je
integer :: isd, ied, jsd, jed
integer :: nest_level
is = bd%is
ie = bd%ie
js = bd%js
je = bd%je
isd = bd%isd
ied = bd%ied
jsd = bd%jsd
jed = bd%jed
child_grids => neststruct%child_grids
!IF nested, set up nested grid BCs for time-interpolation
!(actually applying the BCs is done in dyn_core)
!For multiple grids: Each grid has ONE parent but potentially MULTIPLE nests
nest_timestep = 0
if (.not. inline_q) tracer_nest_timestep = 0
if (neststruct%nested .and. (.not. (neststruct%first_step) .or. make_nh) ) then
do_pd = .true.
call set_BCs_t0(ncnst, flagstruct%hydrostatic, neststruct)
else
!On first timestep the t0 BCs are not initialized and may contain garbage
do_pd = .false.
end if
!compute uc/vc for nested-grid BCs
!!! CLEANUP: if we compute uc/vc here we don't need to do on the first call of c_sw, right?
if (ANY(neststruct%child_grids)) then
call timing_on('COMM_TOTAL')
!!! CLEANUP: could we make this a non-blocking operation?
!!! Is this needed? it is on the initialization step.
call mpp_update_domains(delp, domain) !This is needed to make sure delp is updated for pe calculations
call mpp_update_domains(u, v, &
domain, gridtype=DGRID_NE, complete=.true.)
call timing_off('COMM_TOTAL')
!$OMP parallel do default(none) shared(isd,jsd,ied,jed,is,ie,js,je,npx,npy,npz, &
!$OMP gridstruct,flagstruct,bd,u,v,uc,vc,nested,divg) &
!$OMP private(ua,va)
do k=1,npz
call d2c_setup(u(isd,jsd,k), v(isd,jsd,k), &
ua, va, &
uc(isd,jsd,k), vc(isd,jsd,k), flagstruct%nord>0, &
isd,ied,jsd,jed, is,ie,js,je, npx,npy, &
gridstruct%grid_type, gridstruct%bounded_domain, &
gridstruct%se_corner, gridstruct%sw_corner, &
gridstruct%ne_corner, gridstruct%nw_corner, &
gridstruct%rsin_u, gridstruct%rsin_v, &
gridstruct%cosa_s, gridstruct%rsin2 )
if (nested) then
call divergence_corner_nest(u(isd,jsd,k), v(isd,jsd,k), ua, va, divg(isd,jsd,k), gridstruct, flagstruct, bd)
else
call divergence_corner(u(isd,jsd,k), v(isd,jsd,k), ua, va, divg(isd,jsd,k), gridstruct, flagstruct, bd)
endif
end do
endif
nnest = neststruct%nlevel
!! LOOPING OVER NEST LEVELS
do nest_level=1,neststruct%num_nest_level
!! Nested grid: receive from parent grid (Lagrangian coordinate, npz_coarse)
if (neststruct%nested .AND. neststruct%nlevel==nest_level ) then
npz_coarse = neststruct%parent_grid%npz
if (.not. allocated(q_buf)) then
allocate(q_buf(ncnst))
endif
call nested_grid_BC_recv(global_nest_domain, 0, 0, npz_coarse, bd, &
delp_buf, nnest)
do n=1,ncnst
call nested_grid_BC_recv(global_nest_domain, 0, 0, npz_coarse, bd, &
q_buf(n), nnest)
enddo
#ifndef SW_DYNAMICS
call nested_grid_BC_recv(global_nest_domain, 0, 0, npz_coarse, bd, &
pt_buf, nnest)
if (.not. flagstruct%hydrostatic) then
call nested_grid_BC_recv(global_nest_domain, 0, 0, npz_coarse, bd, &
w_buf, nnest)
call nested_grid_BC_recv(global_nest_domain, 0, 0, npz_coarse, bd, &
delz_buf, nnest)
endif
#endif
if (neststruct%do_remap_BC_level(nest_level)) then
call nested_grid_BC_recv(global_nest_domain, npz_coarse+1, bd, &
pe_u_buf, pe_v_buf, nnest, gridtype=DGRID_NE)
call nested_grid_BC_recv(global_nest_domain, 1, 1, npz_coarse+1, bd, &
pe_b_buf, nnest)
endif
call nested_grid_BC_recv(global_nest_domain, npz_coarse, bd, &
u_buf, v_buf, nnest, gridtype=DGRID_NE)
call nested_grid_BC_recv(global_nest_domain, npz_coarse, bd, &
uc_buf, vc_buf, nnest, gridtype=CGRID_NE)
call nested_grid_BC_recv(global_nest_domain, 1, 1, npz_coarse, bd, &
divg_buf, nnest)
endif
!! Coarse grid: send to child grids (Lagrangian coordinate, npz_coarse)
if (ANY (neststruct%child_grids) .AND. neststruct%nlevel==nest_level-1) then
call nested_grid_BC_send(delp, global_nest_domain, 0, 0, nnest+1)
do n=1,ncnst
call nested_grid_BC_send(q(:,:,:,n), global_nest_domain, 0, 0, nnest+1)
enddo
#ifndef SW_DYNAMICS
call nested_grid_BC_send(pt, global_nest_domain, 0, 0, nnest+1)
if (.not. flagstruct%hydrostatic) then
call nested_grid_BC_send(w, global_nest_domain, 0, 0, nnest+1)
call nested_grid_BC_send(delz, global_nest_domain, 0, 0, nnest+1)
endif
#endif
if (neststruct%do_remap_BC_level(nest_level)) then
!Compute and send staggered pressure
!u points
!$OMP parallel do default(none) shared(ak,pe_ustag,delp, &
!$OMP is,ie,js,je,npz)
do j=js,je+1
do i=is,ie
pe_ustag(i,j,1) = ak(1)
enddo
do k=1,npz
do i=is,ie
pe_ustag(i,j,k+1) = pe_ustag(i,j,k) + 0.5*(delp(i,j,k)+delp(i,j-1,k))
enddo
enddo
enddo
!v points
!$OMP parallel do default(none) shared(ak,pe_vstag,delp, &
!$OMP is,ie,js,je,npz)
do j=js,je
do i=is,ie+1
pe_vstag(i,j,1) = ak(1)
enddo
do k=1,npz
do i=is,ie+1
pe_vstag(i,j,k+1) = pe_vstag(i,j,k) + 0.5*(delp(i,j,k)+delp(i-1,j,k))
enddo
enddo
enddo
call nested_grid_BC_send(pe_ustag, pe_vstag, global_nest_domain, nnest+1, gridtype=DGRID_NE)
!b points
!$OMP parallel do default(none) shared(ak,pe_bstag,delp, &
!$OMP is,ie,js,je,npz)
do j=js,je+1
do i=is,ie+1
pe_bstag(i,j,1) = ak(1)
enddo
enddo
!Sets up so 3-point average is automatically done at the corner
if (is == 1 .and. js == 1) then
do k=1,npz
delp(0,0,k) = a13*(delp(1,1,k) + delp(0,1,k) + delp(1,0,k))
enddo
endif
if (ie == npx-1 .and. js == 1) then
do k=1,npz
delp(npx,0,k) = a13*(delp(npx-1,1,k) + delp(npx,1,k) + delp(npx-1,0,k))
enddo
endif
if (is == 1 .and. je == npy-1) then
do k=1,npz
delp(0,npy,k) = a13*(delp(1,npy-1,k) + delp(0,npy-1,k) + delp(1,npy,k))
enddo
endif
if (ie == npx-1 .and. je == npy-1) then
do k=1,npz
delp(npx,npy,k) = a13*(delp(npx-1,npy-1,k) + delp(npx,npy-1,k) + delp(npx-1,npy,k))
enddo
endif
!$OMP parallel do default(none) shared(ak,pe_bstag,delp, &
!$OMP is,ie,js,je,npz)
do j=js,je+1
do k=1,npz
do i=is,ie+1
pe_bstag(i,j,k+1) = pe_bstag(i,j,k) + &
0.25*(delp(i,j,k)+delp(i-1,j,k)+delp(i,j-1,k)+delp(i-1,j-1,k))
enddo
enddo
enddo
call nested_grid_BC_send(pe_bstag, global_nest_domain, 1, 1, nnest+1)
endif
call nested_grid_BC_send(u, v, global_nest_domain, nnest+1, gridtype=DGRID_NE)
call nested_grid_BC_send(uc, vc, global_nest_domain, nnest+1, gridtype=CGRID_NE)
call nested_grid_BC_send(divg, global_nest_domain, 1, 1, nnest+1)
endif
enddo !NESTLEVELS
!Nested grid: do computations
! Lag: coarse grid, npz_coarse, lagrangian coordinate---receive and use save_proc to copy into lag_BCs
! Eul: nested grid, npz, Eulerian (reference) coordinate
! Remapping from Lag to Eul
if (nested) then
if (neststruct%do_remap_BC(flagstruct%grid_number)) then
call allocate_fv_nest_BC_type(delp_lag_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse,ng,0,0,0,.false.)
call allocate_fv_nest_BC_type(lag_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse,ng,0,0,0,.false.)
call allocate_fv_nest_BC_type(pe_lag_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse+1,ng,0,0,0,.false.)
call allocate_fv_nest_BC_type(pe_eul_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz+1,ng,0,0,0,.false.)
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
delp_lag_BC, delp_buf, pd_in=do_pd)
!The incoming delp is on the coarse grid's lagrangian coordinate. Re-create the reference coordinate
call setup_eul_delp_BC(delp_lag_BC, neststruct%delp_BC, pe_lag_BC, pe_eul_BC, ak, bk, npx, npy, npz, npz_coarse, parent_grid%ptop, bd)
else
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
neststruct%delp_BC, delp_buf, pd_in=do_pd)
endif
#ifndef SW_DYNAMICS
if (neststruct%do_remap_BC(flagstruct%grid_number)) then
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
lag_BC, pt_buf)
!NOTE: need to remap using peln, not pe
call remap_BC(pe_lag_BC, pe_eul_BC, lag_BC, neststruct%pt_BC, npx, npy, npz, npz_coarse, bd, 0, 0, 1, abs(flagstruct%kord_tm), 'pt', do_log_pe=.true.)
else
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
neststruct%pt_BC, pt_buf)
endif
!For whatever reason moving the calls for q BC remapping here avoids problems with cross-restart reproducibility.
if (neststruct%do_remap_BC(flagstruct%grid_number)) then
do n=1,ncnst
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
lag_BC, q_buf(n), pd_in=do_pd)
call remap_BC(pe_lag_BC, pe_eul_BC, lag_BC, neststruct%q_BC(n), npx, npy, npz, npz_coarse, bd, 0, 0, 0, flagstruct%kord_tr, 'q2')
enddo
else
do n=1,ncnst
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
neststruct%q_BC(n), q_buf(n), pd_in=do_pd)
enddo
endif
sphum = get_tracer_index (MODEL_ATMOS, 'sphum')
if (flagstruct%hydrostatic) then
call setup_pt_BC(neststruct%pt_BC, pe_eul_BC, neststruct%q_BC(sphum), npx, npy, npz, zvir, bd)
else
if (neststruct%do_remap_BC(flagstruct%grid_number)) then
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
lag_BC, w_buf)
call remap_BC(pe_lag_BC, pe_eul_BC, lag_BC, neststruct%w_BC, npx, npy, npz, npz_coarse, bd, 0, 0, -1, flagstruct%kord_wz, 'w')
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
lag_BC, delz_buf) !Need a negative-definite method?
call remap_delz_BC(pe_lag_BC, pe_eul_BC, delp_lag_BC, lag_BC, neststruct%delp_BC, neststruct%delz_BC, npx, npy, npz, npz_coarse, bd, 0, 0, 1, flagstruct%kord_wz)
else
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
neststruct%w_BC, w_buf)
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
neststruct%delz_BC, delz_buf) !Need a negative-definite method?
endif
call setup_pt_NH_BC(neststruct%pt_BC, neststruct%delp_BC, neststruct%delz_BC, &
neststruct%q_BC(sphum), neststruct%q_BC, ncnst, &
#ifdef USE_COND
neststruct%q_con_BC, &
#ifdef MOIST_CAPPA
neststruct%cappa_BC, &
#endif
#endif
npx, npy, npz, zvir, bd)
endif
#endif
!!!NOTE: The following require remapping on STAGGERED grids, which requires additional pressure data
if (neststruct%do_remap_BC(flagstruct%grid_number)) then
call allocate_fv_nest_BC_type(pe_u_lag_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse+1,ng,0,0,1,.false.)
call allocate_fv_nest_BC_type(pe_u_eul_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz+1 ,ng,0,0,1,.false.)
call allocate_fv_nest_BC_type(lag_u_BC, is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse ,ng,0,0,1,.false.)
call allocate_fv_nest_BC_type(pe_v_lag_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse+1,ng,0,1,0,.false.)
call allocate_fv_nest_BC_type(pe_v_eul_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz+1 ,ng,0,1,0,.false.)
call allocate_fv_nest_BC_type(lag_v_BC, is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse ,ng,0,1,0,.false.)
call allocate_fv_nest_BC_type(pe_b_lag_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse+1,ng,0,1,1,.false.)
call allocate_fv_nest_BC_type(pe_b_eul_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz+1 ,ng,0,1,1,.false.)
call allocate_fv_nest_BC_type(lag_b_BC, is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse ,ng,0,1,1,.false.)
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz_coarse+1, bd, &
pe_u_lag_BC, pe_u_buf)
call setup_eul_pe_BC(pe_u_lag_BC, pe_u_eul_BC, ak, bk, npx, npy, npz, npz_coarse, 0, 1, bd)
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz_coarse+1, bd, &
pe_v_lag_BC, pe_v_buf)
call setup_eul_pe_BC(pe_v_lag_BC, pe_v_eul_BC, ak, bk, npx, npy, npz, npz_coarse, 1, 0, bd)
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_b, neststruct%wt_b, 1, 1, npx, npy, npz_coarse+1, bd, &
pe_b_lag_BC, pe_b_buf)
call setup_eul_pe_BC(pe_b_lag_BC, pe_b_eul_BC, ak, bk, npx, npy, npz, npz_coarse, 1, 1, bd)
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz_coarse, bd, &
lag_u_BC, u_buf)
call remap_BC(pe_u_lag_BC, pe_u_eul_BC, lag_u_BC, neststruct%u_BC, npx, npy, npz, npz_coarse, bd, 0, 1, -1, flagstruct%kord_mt, 'u')
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz_coarse, bd, &
lag_u_BC, vc_buf)
call remap_BC(pe_u_lag_BC, pe_u_eul_BC, lag_u_BC, neststruct%vc_BC, npx, npy, npz, npz_coarse, bd, 0, 1, -1, flagstruct%kord_mt, 'vc')
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz_coarse, bd, &
lag_v_BC, v_buf)
call remap_BC(pe_v_lag_BC, pe_v_eul_BC, lag_v_BC, neststruct%v_BC, npx, npy, npz, npz_coarse, bd, 1, 0, -1, flagstruct%kord_mt, 'v')
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz_coarse, bd, &
lag_v_BC, uc_buf)
call remap_BC(pe_v_lag_BC, pe_v_eul_BC, lag_v_BC, neststruct%uc_BC, npx, npy, npz, npz_coarse, bd, 1, 0, -1, flagstruct%kord_mt, 'uc')
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_b, neststruct%wt_b, 1, 1, npx, npy, npz_coarse, bd, &
lag_b_BC, divg_buf)
call remap_BC(pe_b_lag_BC, pe_b_eul_BC, lag_b_BC, neststruct%divg_BC, npx, npy, npz, npz_coarse, bd, 1, 1, -1, flagstruct%kord_mt, 'divg')
call deallocate_fv_nest_BC_type(delp_lag_BC)
call deallocate_fv_nest_BC_type(lag_BC)
call deallocate_fv_nest_BC_type(pe_lag_BC)
call deallocate_fv_nest_BC_type(pe_eul_BC)
call deallocate_fv_nest_BC_type(pe_u_lag_BC)
call deallocate_fv_nest_BC_type(pe_u_eul_BC)
call deallocate_fv_nest_BC_type(lag_u_BC)
call deallocate_fv_nest_BC_type(pe_v_lag_BC)
call deallocate_fv_nest_BC_type(pe_v_eul_BC)
call deallocate_fv_nest_BC_type(lag_v_BC)
call deallocate_fv_nest_BC_type(pe_b_lag_BC)
call deallocate_fv_nest_BC_type(pe_b_eul_BC)
call deallocate_fv_nest_BC_type(lag_b_BC)
else
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz_coarse, bd, &
neststruct%u_BC, u_buf)
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz_coarse, bd, &
neststruct%vc_BC, vc_buf)
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz_coarse, bd, &
neststruct%v_BC, v_buf)
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz_coarse, bd, &
neststruct%uc_BC, uc_buf)
call nested_grid_BC_save_proc(global_nest_domain, &
neststruct%ind_b, neststruct%wt_b, 1, 1, npx, npy, npz_coarse, bd, &
neststruct%divg_BC, divg_buf)
endif
!Correct halo values have now been set up for BCs; we can go ahead and apply them too
call nested_grid_BC_apply_intT(delp, &
0, 0, npx, npy, npz, bd, 1., 1., &
neststruct%delp_BC, bctype=neststruct%nestbctype )
do n=1,ncnst
call nested_grid_BC_apply_intT(q(:,:,:,n), &
0, 0, npx, npy, npz, bd, 1., 1., &
neststruct%q_BC(n), bctype=neststruct%nestbctype )
enddo
#ifndef SW_DYNAMICS
call nested_grid_BC_apply_intT(pt, &
0, 0, npx, npy, npz, bd, 1., 1., &
neststruct%pt_BC, bctype=neststruct%nestbctype )
if (.not. flagstruct%hydrostatic) then
call nested_grid_BC_apply_intT(w, &
0, 0, npx, npy, npz, bd, 1., 1., &
neststruct%w_BC, bctype=neststruct%nestbctype )
endif
#ifdef USE_COND
call nested_grid_BC_apply_intT(q_con, &
0, 0, npx, npy, npz, bd, 1., 1., &
neststruct%q_con_BC, bctype=neststruct%nestbctype )
#ifdef MOIST_CAPPA
call nested_grid_BC_apply_intT(cappa, &
0, 0, npx, npy, npz, bd, 1., 1., &
neststruct%cappa_BC, bctype=neststruct%nestbctype )
#endif
#endif
#endif
call nested_grid_BC_apply_intT(u, &
0, 1, npx, npy, npz, bd, 1., 1., &
neststruct%u_BC, bctype=neststruct%nestbctype )
call nested_grid_BC_apply_intT(vc, &
0, 1, npx, npy, npz, bd, 1., 1., &
neststruct%vc_BC, bctype=neststruct%nestbctype )
call nested_grid_BC_apply_intT(v, &
1, 0, npx, npy, npz, bd, 1., 1., &
neststruct%v_BC, bctype=neststruct%nestbctype )
call nested_grid_BC_apply_intT(uc, &
1, 0, npx, npy, npz, bd, 1., 1., &
neststruct%uc_BC, bctype=neststruct%nestbctype )
!Update domains needed for Rayleigh damping
if (.not. flagstruct%hydrostatic) call mpp_update_domains(w, domain)
call mpp_update_domains(u, v, domain, gridtype=DGRID_NE, complete=.true.)
endif
if (neststruct%first_step) then
if (neststruct%nested) call set_BCs_t0(ncnst, flagstruct%hydrostatic, neststruct)
neststruct%first_step = .false.
if (.not. flagstruct%hydrostatic) flagstruct%make_nh= .false.
else if (flagstruct%make_nh) then
if (neststruct%nested) call set_NH_BCs_t0(neststruct)
flagstruct%make_nh= .false.
endif
call mpp_sync_self
end subroutine setup_nested_grid_BCs
subroutine set_physics_BCs(ps, u_dt, v_dt, flagstruct, gridstruct, neststruct, npx, npy, npz, ng, ak, bk, bd)
type(fv_grid_bounds_type), intent(IN) :: bd
type(fv_flags_type), intent(IN) :: flagstruct
type(fv_nest_type), intent(INOUT), target :: neststruct
type(fv_grid_type) :: gridstruct
integer, intent(IN) :: npx, npy, npz, ng
real, intent(IN), dimension(npz+1) :: ak, bk
real, intent(INOUT), dimension(bd%isd:bd%ied,bd%jsd:bd%jed) :: ps
real, intent(INOUT), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz) :: u_dt, v_dt
real, dimension(1,1) :: parent_ps ! dummy variable for nesting
type(fv_nest_BC_type_3d) :: u_dt_buf, v_dt_buf, pe_src_BC, pe_dst_BC!, var_BC
integer :: n, npz_coarse, nnest, nest_level
integer :: is, ie, js, je
integer :: isd, ied, jsd, jed
real :: dum(1,1,1)
is = bd%is
ie = bd%ie
js = bd%js
je = bd%je
isd = bd%isd
ied = bd%ied
jsd = bd%jsd
jed = bd%jed
nnest = neststruct%nlevel
do nest_level=1,neststruct%num_nest_level
if (gridstruct%nested .AND. neststruct%nlevel==nest_level) then
if (neststruct%do_remap_BC_level(nest_level)) then
npz_coarse = neststruct%parent_grid%npz
!Both nested and coarse grids assumed on Eulerian coordinates at this point
!Only need to fetch ps to form pressure levels
!Note also u_dt and v_dt are unstaggered
call nested_grid_BC(ps, parent_ps, global_nest_domain, neststruct%ind_h, neststruct%wt_h, 0, 0, &
npx, npy, bd, 1, npx-1, 1, npy-1)
call nested_grid_BC_recv(global_nest_domain, npz_coarse, bd, u_dt_buf, v_dt_buf, nnest, gridtype=AGRID)
if (neststruct%do_remap_BC(flagstruct%grid_number)) then
call allocate_fv_nest_BC_type(pe_src_BC, is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse+1,ng,0,0,0,.false.)
call allocate_fv_nest_BC_type(pe_dst_BC, is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz+1,ng,0,0,0,.false.)
call copy_ps_BC(ps, pe_src_BC, npx, npy, npz_coarse, 0, 0, bd)
call setup_eul_pe_BC(pe_src_BC, pe_dst_BC, ak, bk, npx, npy, npz, npz_coarse, 0, 0, bd, &
make_src_in=.true., ak_src=neststruct%parent_grid%ak, bk_src=neststruct%parent_grid%bk)
!Note that iv=-1 is used for remapping winds, which sets the lower reconstructed values to 0 if
! there is a 2dx signal. Is this the best for **tendencies** though?? Probably not---so iv=1 here
call set_BC_direct( pe_src_BC, pe_dst_BC, u_dt_buf, u_dt, neststruct, npx, npy, npz, npz_coarse, ng, bd, 0, 0, 1, flagstruct%kord_mt)
call set_BC_direct( pe_src_BC, pe_dst_BC, v_dt_buf, v_dt, neststruct, npx, npy, npz, npz_coarse, ng, bd, 0, 0, 1, flagstruct%kord_mt)
call deallocate_fv_nest_BC_type(pe_src_BC)
call deallocate_fv_nest_BC_type(pe_dst_BC)
endif
else
call nested_grid_BC(u_dt, v_dt, dum, dum, global_nest_domain, neststruct%ind_h, neststruct%ind_h, &
neststruct%wt_h, neststruct%wt_h, 0, 0, 0, 0, npx, npy, npz, bd, 1, npx-1, 1, npy-1, nnest, gridtype=AGRID)
endif
endif
if (ANY (neststruct%child_grids) .AND. neststruct%nlevel==nest_level-1) then
if (neststruct%do_remap_BC_level(nest_level)) &
call nested_grid_BC(ps, global_nest_domain, 0, 0, nnest+1)
call nested_grid_BC_send(u_dt, v_dt, global_nest_domain, nnest+1, gridtype=AGRID)
endif
enddo
end subroutine set_physics_BCs
subroutine set_BC_direct( pe_src_BC, pe_dst_BC, buf, var, neststruct, npx, npy, npz, npz_coarse, ng, bd, istag, jstag, iv, kord)
type(fv_grid_bounds_type), intent(IN) :: bd
type(fv_nest_type), intent(INOUT) :: neststruct
integer, intent(IN) :: npx, npy, npz, npz_coarse, ng, istag, jstag, iv, kord
real, intent(INOUT), dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,npz) :: var
type(fv_nest_BC_type_3d), intent(INOUT) :: buf, pe_src_BC, pe_dst_BC
type(fv_nest_BC_type_3d) :: var_BC
call allocate_fv_nest_BC_type(var_BC,bd%is,bd%ie,bd%js,bd%je,bd%isd,bd%ied,bd%jsd,bd%jed,npx,npy,npz_coarse,ng,0,istag,jstag,.false.)
call nested_grid_BC_save_proc(global_nest_domain, neststruct%ind_h, neststruct%wt_h, istag, jstag, &
npx, npy, npz_coarse, bd, var_BC, buf)
call remap_BC_direct(pe_src_BC, pe_dst_BC, var_BC, var, npx, npy, npz, npz_coarse, bd, istag, jstag, iv, kord)
call deallocate_fv_nest_BC_type(var_BC)
end subroutine set_BC_direct
subroutine setup_pt_BC(pt_BC, pe_eul_BC, sphum_BC, npx, npy, npz, zvir, bd)
type(fv_grid_bounds_type), intent(IN) :: bd
type(fv_nest_BC_type_3d), intent(IN) :: pe_eul_BC, sphum_BC
type(fv_nest_BC_type_3d), intent(INOUT) :: pt_BC
integer, intent(IN) :: npx, npy, npz
real, intent(IN) :: zvir
integer :: istart, iend
integer :: is, ie, js, je
integer :: isd, ied, jsd, jed
is = bd%is
ie = bd%ie
js = bd%js
je = bd%je
isd = bd%isd
ied = bd%ied
jsd = bd%jsd
jed = bd%jed
if (is == 1) then
call setup_pt_BC_k(pt_BC%west_t1, sphum_BC%west_t1, pe_eul_BC%west_t1, zvir, isd, ied, isd, 0, jsd, jed, npz)
end if
if (js == 1) then
if (is == 1) then
istart = is
else
istart = isd
end if
if (ie == npx-1) then
iend = ie
else
iend = ied
end if
call setup_pt_BC_k(pt_BC%south_t1, sphum_BC%south_t1, pe_eul_BC%south_t1, zvir, isd, ied, istart, iend, jsd, 0, npz)
end if
if (ie == npx-1) then
call setup_pt_BC_k(pt_BC%east_t1, sphum_BC%east_t1, pe_eul_BC%east_t1, zvir, isd, ied, npx, ied, jsd, jed, npz)
end if
if (je == npy-1) then
if (is == 1) then
istart = is
else
istart = isd
end if
if (ie == npx-1) then
iend = ie
else
iend = ied
end if
call setup_pt_BC_k(pt_BC%north_t1, sphum_BC%north_t1, pe_eul_BC%north_t1, zvir, isd, ied, istart, iend, npy, jed, npz)
end if
end subroutine setup_pt_BC
!!!! A NOTE ON NOMENCLATURE
!!!! Originally the BC arrays were bounded by isd and ied in the i-direction.
!!!! However these were NOT intended to delineate the dimensions of the data domain
!!!! but instead were of the BC arrays. This is confusing especially in other locations
!!!! where BCs and data arrays are both present.
subroutine setup_pt_BC_k(ptBC, sphumBC, peBC, zvir, isd_BC, ied_BC, istart, iend, jstart, jend, npz)
integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz
real, intent(IN) :: zvir
real, intent(INOUT), dimension(isd_BC:ied_BC,jstart:jend,npz) :: ptBC
real, intent(IN), dimension(isd_BC:ied_BC,jstart:jend,npz) :: sphumBC
real, intent(IN), dimension(isd_BC:ied_BC,jstart:jend,npz+1) :: peBC
integer :: i,j,k
real :: pealn, pebln, rpkz
!Assumes dry kappa
!$OMP parallel do default(none) shared(peBC,ptBC,zvir,sphumBC, &
!$OMP istart,iend,jstart,jend,npz) &
!$OMP private(pealn,pebln,rpkz)
do k=1,npz
do j=jstart,jend
do i=istart,iend
pealn = log(peBC(i,j,k))
pebln = log(peBC(i,j,k+1))
rpkz = kappa*(pebln - pealn)/(exp(kappa*pebln)-exp(kappa*pealn) )
ptBC(i,j,k) = ptBC(i,j,k)*rpkz * &
(1.+zvir*sphumBC(i,j,k))
enddo
enddo
enddo
end subroutine setup_pt_BC_k
subroutine setup_eul_delp_BC(delp_lag_BC, delp_eul_BC, pe_lag_BC, pe_eul_BC, ak_dst, bk_dst, npx, npy, npz, npz_coarse, ptop_src, bd)
type(fv_grid_bounds_type), intent(IN) :: bd
type(fv_nest_BC_type_3d), intent(INOUT), target :: delp_lag_BC
type(fv_nest_BC_type_3d), intent(INOUT), target :: delp_eul_BC, pe_lag_BC, pe_eul_BC
integer, intent(IN) :: npx, npy, npz, npz_coarse
real, intent(IN), dimension(npz+1) :: ak_dst, bk_dst
real, intent(IN) :: ptop_src
integer :: i,j,k, istart, iend
integer :: is, ie, js, je
integer :: isd, ied, jsd, jed
is = bd%is
ie = bd%ie
js = bd%js
je = bd%je
isd = bd%isd
ied = bd%ied
jsd = bd%jsd
jed = bd%jed
if (is == 1) then
call setup_eul_delp_BC_k(delp_lag_BC%west_t1, delp_eul_BC%west_t1, pe_lag_BC%west_t1, pe_eul_BC%west_t1, &
ptop_src, ak_dst, bk_dst, isd, 0, isd, 0, jsd, jed, npz, npz_coarse)
end if
if (ie == npx-1) then
call setup_eul_delp_BC_k(delp_lag_BC%east_t1, delp_eul_BC%east_t1, pe_lag_BC%east_t1, pe_eul_BC%east_t1, &
ptop_src, ak_dst, bk_dst, npx, ied, npx, ied, jsd, jed, npz, npz_coarse)
end if
if (is == 1) then
istart = is
else
istart = isd
end if
if (ie == npx-1) then
iend = ie
else
iend = ied
end if
if (js == 1) then
call setup_eul_delp_BC_k(delp_lag_BC%south_t1, delp_eul_BC%south_t1, pe_lag_BC%south_t1, pe_eul_BC%south_t1, &
ptop_src, ak_dst, bk_dst, isd, ied, istart, iend, jsd, 0, npz, npz_coarse)
end if
if (je == npy-1) then
call setup_eul_delp_BC_k(delp_lag_BC%north_t1, delp_eul_BC%north_t1, pe_lag_BC%north_t1, pe_eul_BC%north_t1, &
ptop_src, ak_dst, bk_dst, isd, ied, istart, iend, npy, jed, npz, npz_coarse)
end if
end subroutine setup_eul_delp_BC
subroutine setup_eul_delp_BC_k(delplagBC, delpeulBC, pelagBC, peeulBC, ptop_src, ak_dst, bk_dst, isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_coarse)
integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_coarse
real, intent(INOUT) :: delplagBC(isd_BC:ied_BC,jstart:jend,npz_coarse), pelagBC(isd_BC:ied_BC,jstart:jend,npz_coarse+1)
real, intent(INOUT) :: delpeulBC(isd_BC:ied_BC,jstart:jend,npz), peeulBC(isd_BC:ied_BC,jstart:jend,npz+1)
real, intent(IN) :: ptop_src, ak_dst(npz+1), bk_dst(npz+1)
integer :: i,j,k
character(len=120) :: errstring
!$OMP parallel do default(none) shared(istart,iend,jstart,jend,pelagBC,ptop_src)
do j=jstart,jend
do i=istart,iend
pelagBC(i,j,1) = ptop_src
enddo
enddo
!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_coarse,pelagBC,delplagBC)
do j=jstart,jend
do k=1,npz_coarse
do i=istart,iend
pelagBC(i,j,k+1) = pelagBC(i,j,k) + delplagBC(i,j,k)
end do
end do
end do
!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,npz_coarse,peeulBC,pelagBC,ak_dst,bk_dst)
do k=1,npz+1
do j=jstart,jend
do i=istart,iend
peeulBC(i,j,k) = ak_dst(k) + pelagBC(i,j,npz_coarse+1)*bk_dst(k)
enddo
enddo
enddo
!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,peeulBC,delpeulBC)
do k=1,npz
do j=jstart,jend
do i=istart,iend
delpeulBC(i,j,k) = peeulBC(i,j,k+1) - peeulBC(i,j,k)
enddo
enddo
enddo
end subroutine setup_eul_delp_BC_k
subroutine copy_ps_BC(ps, pe_BC, npx, npy, npz, istag, jstag, bd)
integer, intent(IN) :: npx, npy, npz, istag, jstag
type(fv_grid_bounds_type), intent(IN) :: bd
real, intent(IN) :: ps(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag)
type(fv_nest_BC_type_3d), intent(INOUT) :: pe_BC
integer :: i,j,k, istart, iend
integer :: is, ie, js, je
integer :: isd, ied, jsd, jed
is = bd%is
ie = bd%ie
js = bd%js
je = bd%je
isd = bd%isd
ied = bd%ied
jsd = bd%jsd
jed = bd%jed
if (is == 1) then
!$OMP parallel do default(none) shared(isd,jsd,jed,jstag,npz,pe_BC,ps)
do j=jsd,jed+jstag
do i=isd,0
pe_BC%west_t1(i,j,npz+1) = ps(i,j)
enddo
enddo
end if
if (ie == npx-1) then
!$OMP parallel do default(none) shared(npx,ied,istag,jsd,jed,jstag,npz,pe_BC,ps)
do j=jsd,jed+jstag
do i=npx+istag,ied+istag
pe_BC%east_t1(i,j,npz+1) = ps(i,j)
enddo
enddo
end if
if (is == 1) then
istart = is
else
istart = isd
end if
if (ie == npx-1) then
iend = ie
else
iend = ied
end if
if (js == 1) then
!$OMP parallel do default(none) shared(isd,ied,istag,jsd,npz,pe_BC,ps)
do j=jsd,0
do i=isd,ied+istag
pe_BC%south_t1(i,j,npz+1) = ps(i,j)
enddo
enddo
end if
if (je == npy-1) then
!$OMP parallel do default(none) shared(isd,ied,istag,npy,jed,jstag,npz,pe_BC,ps)
do j=npy+jstag,jed+jstag
do i=isd,ied+istag
pe_BC%north_t1(i,j,npz+1) = ps(i,j)
enddo
enddo
end if
end subroutine copy_ps_BC
!In this routine, the pe_*_BC arrays should already have PS filled in on the npz+1 level
subroutine setup_eul_pe_BC(pe_src_BC, pe_eul_BC, ak_dst, bk_dst, npx, npy, npz, npz_src, istag, jstag, bd, make_src_in, ak_src, bk_src)
type(fv_grid_bounds_type), intent(IN) :: bd
type(fv_nest_BC_type_3d), intent(INOUT), target :: pe_src_BC, pe_eul_BC
integer, intent(IN) :: npx, npy, npz, npz_src, istag, jstag
real, intent(IN), dimension(npz+1) :: ak_dst, bk_dst
logical, intent(IN), OPTIONAL :: make_src_in
real, intent(IN), OPTIONAL :: ak_src(npz_src), bk_src(npz_src)
logical :: make_src
integer :: i,j,k, istart, iend
integer :: is, ie, js, je
integer :: isd, ied, jsd, jed
is = bd%is
ie = bd%ie
js = bd%js
je = bd%je
isd = bd%isd
ied = bd%ied
jsd = bd%jsd
jed = bd%jed
make_src = .false.
if (present(make_src_in)) make_src = make_src_in
if (is == 1) then
call setup_eul_pe_BC_k(pe_src_BC%west_t1, pe_eul_BC%west_t1, ak_dst, bk_dst, isd, 0, isd, 0, jsd, jed+jstag, npz, npz_src, &
make_src, ak_src, bk_src)
end if
if (ie == npx-1) then
call setup_eul_pe_BC_k(pe_src_BC%east_t1, pe_eul_BC%east_t1, ak_dst, bk_dst, npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, npz, npz_src, &
make_src, ak_src, bk_src)
end if
if (is == 1) then
istart = is
else
istart = isd
end if
if (ie == npx-1) then
iend = ie
else
iend = ied
end if
if (js == 1) then
call setup_eul_pe_BC_k(pe_src_BC%south_t1, pe_eul_BC%south_t1, ak_dst, bk_dst, isd, ied+istag, istart, iend+istag, jsd, 0, npz, npz_src, &
make_src, ak_src, bk_src)
end if
if (je == npy-1) then
call setup_eul_pe_BC_k(pe_src_BC%north_t1, pe_eul_BC%north_t1, ak_dst, bk_dst, isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz, npz_src, &
make_src, ak_src, bk_src)
end if
end subroutine setup_eul_pe_BC
subroutine setup_eul_pe_BC_k(pesrcBC, peeulBC, ak_dst, bk_dst, isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_src, make_src, ak_src, bk_src)
integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_src
real, intent(INOUT) :: pesrcBC(isd_BC:ied_BC,jstart:jend,npz_src+1)
real, intent(INOUT) :: peeulBC(isd_BC:ied_BC,jstart:jend,npz+1)
real, intent(IN) :: ak_dst(npz+1), bk_dst(npz+1)
logical, intent(IN) :: make_src
real, intent(IN) :: ak_src(npz_src+1), bk_src(npz_src+1)
integer :: i,j,k
character(len=120) :: errstring
!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,npz_src,peeulBC,ak_dst,pesrcBC,bk_dst)
do k=1,npz+1
do j=jstart,jend
do i=istart,iend
peeulBC(i,j,k) = ak_dst(k) + pesrcBC(i,j,npz_src+1)*bk_dst(k)
enddo
enddo
enddo
if (make_src) then
!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_src,pesrcBC,ak_src,bk_src)
do k=1,npz_src+1
do j=jstart,jend
do i=istart,iend
pesrcBC(i,j,k) = ak_src(k) + pesrcBC(i,j,npz_src+1)*bk_src(k)
enddo
enddo
enddo
endif
end subroutine setup_eul_pe_BC_k
subroutine remap_BC(pe_lag_BC, pe_eul_BC, var_lag_BC, var_eul_BC, npx, npy, npz, npz_coarse, bd, istag, jstag, iv, kord, varname, do_log_pe)
type(fv_grid_bounds_type), intent(IN) :: bd
type(fv_nest_BC_type_3d), intent(INOUT), target :: pe_lag_BC, var_lag_BC
type(fv_nest_BC_type_3d), intent(INOUT), target :: pe_eul_BC, var_eul_BC
integer, intent(IN) :: npx, npy, npz, npz_coarse, istag, jstag, iv, kord
character(len=*), intent(IN) :: varname
logical, intent(IN), OPTIONAL :: do_log_pe
logical :: log_pe = .false.
integer :: i,j,k, istart, iend
integer :: is, ie, js, je
integer :: isd, ied, jsd, jed
is = bd%is
ie = bd%ie
js = bd%js
je = bd%je
isd = bd%isd
ied = bd%ied
jsd = bd%jsd
jed = bd%jed
if (present(do_log_pe)) log_pe = do_log_pe
if (is == 1) then
call remap_BC_k(pe_lag_BC%west_t1, pe_eul_BC%west_t1, var_lag_BC%west_t1, var_eul_BC%west_t1, isd, 0, isd, 0, jsd, jed+jstag, npz, npz_coarse, iv, kord, log_pe)
end if
if (ie == npx-1) then
call remap_BC_k(pe_lag_BC%east_t1, pe_eul_BC%east_t1, var_lag_BC%east_t1, var_eul_BC%east_t1, npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, npz, npz_coarse, iv, kord, log_pe)
end if
if (is == 1) then
istart = is
else
istart = isd
end if
if (ie == npx-1) then
iend = ie
else
iend = ied
end if
if (js == 1) then
call remap_BC_k(pe_lag_BC%south_t1, pe_eul_BC%south_t1, var_lag_BC%south_t1, var_eul_BC%south_t1, isd, ied+istag, istart, iend+istag, jsd, 0, npz, npz_coarse, iv, kord, log_pe)
end if
if (je == npy-1) then
call remap_BC_k(pe_lag_BC%north_t1, pe_eul_BC%north_t1, var_lag_BC%north_t1, var_eul_BC%north_t1, isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz, npz_coarse, iv, kord, log_pe)
end if
end subroutine remap_BC
subroutine remap_BC_direct(pe_lag_BC, pe_eul_BC, var_lag_BC, var, npx, npy, npz, npz_coarse, bd, istag, jstag, iv, kord, do_log_pe)
type(fv_grid_bounds_type), intent(IN) :: bd
integer, intent(IN) :: npx, npy, npz, npz_coarse, istag, jstag, iv, kord
type(fv_nest_BC_type_3d), intent(INOUT), target :: pe_lag_BC, var_lag_BC
type(fv_nest_BC_type_3d), intent(INOUT), target :: pe_eul_BC
real, intent(INOUT) :: var(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,npz)
logical, intent(IN), OPTIONAL :: do_log_pe
logical :: log_pe = .false.
integer :: i,j,k, istart, iend
integer :: is, ie, js, je
integer :: isd, ied, jsd, jed
is = bd%is
ie = bd%ie
js = bd%js
je = bd%je
isd = bd%isd
ied = bd%ied
jsd = bd%jsd
jed = bd%jed
if (present(do_log_pe)) log_pe = do_log_pe
if (is == 1) then
!I was unable how to do pass-by-memory referencing on parts of the 3D var array,
! so instead I am doing an inefficient copy and copy-back. --- lmh 14jun17
call remap_BC_k(pe_lag_BC%west_t1, pe_eul_BC%west_t1, var_lag_BC%west_t1, var(isd:0,jsd:jed+jstag,:), isd, 0, isd, 0, jsd, jed+jstag, npz, npz_coarse, iv, kord, log_pe)
end if
if (ie == npx-1) then
call remap_BC_k(pe_lag_BC%east_t1, pe_eul_BC%east_t1, var_lag_BC%east_t1, var(npx+istag:ied+istag,jsd:jed+jstag,:), npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, npz, npz_coarse, iv, kord, log_pe)
end if
if (is == 1) then
istart = is
else
istart = isd
end if
if (ie == npx-1) then
iend = ie
else
iend = ied
end if
if (js == 1) then
call remap_BC_k(pe_lag_BC%south_t1, pe_eul_BC%south_t1, var_lag_BC%south_t1, var(isd:ied+istag,jsd:0,:), isd, ied+istag, istart, iend+istag, jsd, 0, npz, npz_coarse, iv, kord, log_pe)
end if
if (je == npy-1) then
call remap_BC_k(pe_lag_BC%north_t1, pe_eul_BC%north_t1, var_lag_BC%north_t1, var(isd:ied+istag,npy+jstag:jed+jstag,:), isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz, npz_coarse, iv, kord, log_pe)
end if
end subroutine remap_BC_direct
subroutine remap_BC_k(pe_lagBC, pe_eulBC, var_lagBC, var_eulBC, isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_coarse, iv, kord, log_pe)
integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_coarse, iv, kord
logical, intent(IN) :: log_pe
real, intent(INOUT) :: pe_lagBC(isd_BC:ied_BC,jstart:jend,npz_coarse+1), var_lagBC(isd_BC:ied_BC,jstart:jend,npz_coarse)
real, intent(INOUT) :: pe_eulBC(isd_BC:ied_BC,jstart:jend,npz+1), var_eulBC(isd_BC:ied_BC,jstart:jend,npz)
integer :: i, j, k
real peln_lag(istart:iend,npz_coarse+1)
real peln_eul(istart:iend,npz+1)
character(120) :: errstring
if (log_pe) then
!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,npz_coarse,pe_lagBC,pe_eulBC,var_lagBC,var_eulBC,iv,kord) &
!$OMP private(peln_lag,peln_eul)
do j=jstart,jend
do k=1,npz_coarse+1
do i=istart,iend
peln_lag(i,k) = log(pe_lagBC(i,j,k))
enddo
enddo
do k=1,npz+1
do i=istart,iend
peln_eul(i,k) = log(pe_eulBC(i,j,k))
enddo
enddo
call mappm(npz_coarse, peln_lag, var_lagBC(istart:iend,j:j,:), &
npz, peln_eul, var_eulBC(istart:iend,j:j,:), &
istart, iend, iv, kord, pe_eulBC(istart,j,1))
enddo
else
!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,npz_coarse,pe_lagBC,pe_eulBC,var_lagBC,var_eulBC,iv,kord)
do j=jstart,jend
call mappm(npz_coarse, pe_lagBC(istart:iend,j:j,:), var_lagBC(istart:iend,j:j,:), &
npz, pe_eulBC(istart:iend,j:j,:), var_eulBC(istart:iend,j:j,:), &
istart, iend, iv, kord, pe_eulBC(istart,j,1))
!!! NEED A FILLQ/FILLZ CALL HERE??
enddo
endif
end subroutine remap_BC_k
subroutine remap_delz_BC(pe_lag_BC, pe_eul_BC, delp_lag_BC, delz_lag_BC, delp_eul_BC, delz_eul_BC, npx, npy, npz, npz_coarse, bd, istag, jstag, iv, kord)
type(fv_grid_bounds_type), intent(IN) :: bd
type(fv_nest_BC_type_3d), intent(INOUT), target :: pe_lag_BC, delp_lag_BC, delz_lag_BC
type(fv_nest_BC_type_3d), intent(INOUT), target :: pe_eul_BC, delp_eul_BC, delz_eul_BC
integer, intent(IN) :: npx, npy, npz, npz_coarse, istag, jstag, iv, kord
integer :: i,j,k, istart, iend
integer :: is, ie, js, je
integer :: isd, ied, jsd, jed
is = bd%is
ie = bd%ie
js = bd%js
je = bd%je
isd = bd%isd
ied = bd%ied
jsd = bd%jsd
jed = bd%jed
if (is == 1) then
call compute_specific_volume_BC_k(delp_lag_BC%west_t1, delz_lag_BC%west_t1, isd, 0, isd, 0, jsd, jed, npz_coarse)
call remap_BC_k(pe_lag_BC%west_t1, pe_eul_BC%west_t1, delz_lag_BC%west_t1, delz_eul_BC%west_t1, isd, 0, isd, 0, jsd, jed+jstag, &
npz, npz_coarse, iv, kord, log_pe=.false.)
call compute_delz_BC_k(delp_eul_BC%west_t1, delz_eul_BC%west_t1, isd, 0, isd, 0, jsd, jed, npz)
end if
if (ie == npx-1) then
call compute_specific_volume_BC_k(delp_lag_BC%east_t1, delz_lag_BC%east_t1, npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, npz_coarse)
call remap_BC_k(pe_lag_BC%east_t1, pe_eul_BC%east_t1, delz_lag_BC%east_t1, delz_eul_BC%east_t1, npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, &
npz, npz_coarse, iv, kord, log_pe=.false.)
call compute_delz_BC_k(delp_eul_BC%east_t1, delz_eul_BC%east_t1, npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, npz)
end if
if (is == 1) then
istart = is
else
istart = isd
end if
if (ie == npx-1) then
iend = ie
else
iend = ied
end if
if (js == 1) then
call compute_specific_volume_BC_k(delp_lag_BC%south_t1, delz_lag_BC%south_t1, isd, ied+istag, istart, iend+istag, jsd, 0, npz_coarse)
call remap_BC_k(pe_lag_BC%south_t1, pe_eul_BC%south_t1, delz_lag_BC%south_t1, delz_eul_BC%south_t1, isd, ied+istag, istart, iend+istag, jsd, 0, npz, npz_coarse, &
iv, kord, log_pe=.false.)
call compute_delz_BC_k(delp_eul_BC%south_t1, delz_eul_BC%south_t1, isd, ied+istag, istart, iend+istag, jsd, 0, npz)
end if
if (je == npy-1) then
call compute_specific_volume_BC_k(delp_lag_BC%north_t1, delz_lag_BC%north_t1, isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz_coarse)
call remap_BC_k(pe_lag_BC%north_t1, pe_eul_BC%north_t1, delz_lag_BC%north_t1, delz_eul_BC%north_t1, &
isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz, npz_coarse, iv, kord, log_pe=.false.)
call compute_delz_BC_k(delp_eul_BC%north_t1, delz_eul_BC%north_t1, isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz)
end if
end subroutine remap_delz_BC
subroutine compute_specific_volume_BC_k(delpBC, delzBC, isd_BC, ied_BC, istart, iend, jstart, jend, npz)
integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz
real, intent(IN) :: delpBC(isd_BC:ied_BC,jstart:jend,npz)
real, intent(INOUT) :: delzBC(isd_BC:ied_BC,jstart:jend,npz)
character(len=120) :: errstring
integer :: i,j,k
!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,delzBC,delpBC)
do k=1,npz
do j=jstart,jend
do i=istart,iend
delzBC(i,j,k) = -delzBC(i,j,k)/delpBC(i,j,k)
end do
end do
end do
end subroutine compute_specific_volume_BC_k
subroutine compute_delz_BC_k(delpBC, delzBC, isd_BC, ied_BC, istart, iend, jstart, jend, npz)
integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz
real, intent(IN) :: delpBC(isd_BC:ied_BC,jstart:jend,npz)
real, intent(INOUT) :: delzBC(isd_BC:ied_BC,jstart:jend,npz)
character(len=120) :: errstring
integer :: i,j,k
!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,delzBC,delpBC)
do k=1,npz
do j=jstart,jend
do i=istart,iend
delzBC(i,j,k) = -delzBC(i,j,k)*delpBC(i,j,k)
end do
end do
end do
end subroutine compute_delz_BC_k
subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, &
#ifdef USE_COND
q_con_BC, &
#ifdef MOIST_CAPPA
cappa_BC, &
#endif
#endif
npx, npy, npz, zvir, bd)
type(fv_grid_bounds_type), intent(IN) :: bd
type(fv_nest_BC_type_3d), intent(IN), target :: delp_BC, delz_BC, sphum_BC
type(fv_nest_BC_type_3d), intent(INOUT), target :: pt_BC
integer, intent(IN) :: nq
type(fv_nest_BC_type_3d), intent(IN), target :: q_BC(nq)
#ifdef USE_COND
type(fv_nest_BC_type_3d), intent(INOUT), target :: q_con_BC
#ifdef MOIST_CAPPA
type(fv_nest_BC_type_3d), intent(INOUT), target :: cappa_BC
#endif
#endif
integer, intent(IN) :: npx, npy, npz
real, intent(IN) :: zvir
real, parameter:: c_liq = 4185.5 !< heat capacity of water at 0C
real, parameter:: c_ice = 1972. !< heat capacity of ice at 0C: c=c_ice+7.3*(T-Tice)
real, parameter:: cv_vap = cp_vapor - rvgas !< 1384.5
real, dimension(:,:,:), pointer :: liq_watBC_west, ice_watBC_west, rainwatBC_west, snowwatBC_west, graupelBC_west, hailwatBC_west
real, dimension(:,:,:), pointer :: liq_watBC_east, ice_watBC_east, rainwatBC_east, snowwatBC_east, graupelBC_east, hailwatBC_east
real, dimension(:,:,:), pointer :: liq_watBC_north, ice_watBC_north, rainwatBC_north, snowwatBC_north, graupelBC_north, hailwatBC_north
real, dimension(:,:,:), pointer :: liq_watBC_south, ice_watBC_south, rainwatBC_south, snowwatBC_south, graupelBC_south, hailwatBC_south
real :: dp1, q_liq, q_sol, q_con = 0., cvm, pkz, rdg, cv_air
integer :: i,j,k, istart, iend
integer :: liq_wat, ice_wat, rainwat, snowwat, graupel, hailwat
real, parameter:: tice = 273.16 !< For GFS Partitioning
real, parameter:: t_i0 = 15.
integer :: is, ie, js, je
integer :: isd, ied, jsd, jed
is = bd%is
ie = bd%ie
js = bd%js
je = bd%je
isd = bd%isd
ied = bd%ied
jsd = bd%jsd
jed = bd%jed
rdg = -rdgas / grav
cv_air = cp_air - rdgas
liq_wat = get_tracer_index (MODEL_ATMOS, 'liq_wat')
ice_wat = get_tracer_index (MODEL_ATMOS, 'ice_wat')
rainwat = get_tracer_index (MODEL_ATMOS, 'rainwat')
snowwat = get_tracer_index (MODEL_ATMOS, 'snowwat')
graupel = get_tracer_index (MODEL_ATMOS, 'graupel')
if (is == 1) then
if (.not. allocated(dum_West)) then
allocate(dum_West(isd:0,jsd:jed,npz))
!$OMP parallel do default(none) shared(npz,isd,jsd,jed,dum_West)
do k=1,npz
do j=jsd,jed
do i=isd,0
dum_West(i,j,k) = 0.
enddo
enddo
enddo
endif
endif
if (js == 1) then
if (.not. allocated(dum_South)) then
allocate(dum_South(isd:ied,jsd:0,npz))
!$OMP parallel do default(none) shared(npz,isd,ied,jsd,dum_South)
do k=1,npz
do j=jsd,0
do i=isd,ied
dum_South(i,j,k) = 0.
enddo
enddo
enddo
endif
endif
if (ie == npx-1) then
if (.not. allocated(dum_East)) then
allocate(dum_East(npx:ied,jsd:jed,npz))
!$OMP parallel do default(none) shared(npx,npz,ied,jsd,jed,dum_East)
do k=1,npz
do j=jsd,jed
do i=npx,ied
dum_East(i,j,k) = 0.
enddo
enddo
enddo
endif
endif
if (je == npy-1) then
if (.not. allocated(dum_North)) then
allocate(dum_North(isd:ied,npy:jed,npz))
!$OMP parallel do default(none) shared(npy,npz,isd,ied,jed,dum_North)
do k=1,npz
do j=npy,jed
do i=isd,ied
dum_North(i,j,k) = 0.
enddo
enddo
enddo
endif
endif
if (liq_wat > 0) then
liq_watBC_west => q_BC(liq_wat)%west_t1
liq_watBC_east => q_BC(liq_wat)%east_t1
liq_watBC_north => q_BC(liq_wat)%north_t1
liq_watBC_south => q_BC(liq_wat)%south_t1
else
liq_watBC_west => dum_west
liq_watBC_east => dum_east
liq_watBC_north => dum_north
liq_watBC_south => dum_south
endif
if (ice_wat > 0) then
ice_watBC_west => q_BC(ice_wat)%west_t1
ice_watBC_east => q_BC(ice_wat)%east_t1
ice_watBC_north => q_BC(ice_wat)%north_t1
ice_watBC_south => q_BC(ice_wat)%south_t1
else
ice_watBC_west => dum_west
ice_watBC_east => dum_east
ice_watBC_north => dum_north
ice_watBC_south => dum_south
endif
if (rainwat > 0) then
rainwatBC_west => q_BC(rainwat)%west_t1
rainwatBC_east => q_BC(rainwat)%east_t1
rainwatBC_north => q_BC(rainwat)%north_t1
rainwatBC_south => q_BC(rainwat)%south_t1
else
rainwatBC_west => dum_west
rainwatBC_east => dum_east
rainwatBC_north => dum_north
rainwatBC_south => dum_south
endif
if (snowwat > 0) then
snowwatBC_west => q_BC(snowwat)%west_t1
snowwatBC_east => q_BC(snowwat)%east_t1
snowwatBC_north => q_BC(snowwat)%north_t1
snowwatBC_south => q_BC(snowwat)%south_t1
else
snowwatBC_west => dum_west
snowwatBC_east => dum_east
snowwatBC_north => dum_north
snowwatBC_south => dum_south
endif
if (graupel > 0) then
graupelBC_west => q_BC(graupel)%west_t1
graupelBC_east => q_BC(graupel)%east_t1
graupelBC_north => q_BC(graupel)%north_t1
graupelBC_south => q_BC(graupel)%south_t1
else
graupelBC_west => dum_west
graupelBC_east => dum_east
graupelBC_north => dum_north
graupelBC_south => dum_south
endif
if (hailwat > 0) then
hailwatBC_west => q_BC(hailwat)%west_t1
hailwatBC_east => q_BC(hailwat)%east_t1
hailwatBC_north => q_BC(hailwat)%north_t1
hailwatBC_south => q_BC(hailwat)%south_t1
else
hailwatBC_west => dum_west
hailwatBC_east => dum_east
hailwatBC_north => dum_north
hailwatBC_south => dum_south
endif
if (is == 1) then
call setup_pt_NH_BC_k(pt_BC%west_t1, sphum_BC%west_t1, delp_BC%west_t1, delz_BC%west_t1, &
liq_watBC_west, rainwatBC_west, ice_watBC_west, snowwatBC_west, graupelBC_west, hailwatBC_west, &
#ifdef USE_COND
q_con_BC%west_t1, &
#ifdef MOIST_CAPPA
cappa_BC%west_t1, &
#endif
#endif
zvir, isd, 0, isd, 0, jsd, jed, npz)
end if
if (js == 1) then
if (is == 1) then
istart = is
else
istart = isd
end if
if (ie == npx-1) then
iend = ie
else
iend = ied
end if
call setup_pt_NH_BC_k(pt_BC%south_t1, sphum_BC%south_t1, delp_BC%south_t1, delz_BC%south_t1, &
liq_watBC_south, rainwatBC_south, ice_watBC_south, snowwatBC_south, graupelBC_south, hailwatBC_south, &
#ifdef USE_COND
q_con_BC%south_t1, &
#ifdef MOIST_CAPPA
cappa_BC%south_t1, &
#endif
#endif
zvir, isd, ied, istart, iend, jsd, 0, npz)
end if
if (ie == npx-1) then
call setup_pt_NH_BC_k(pt_BC%east_t1, sphum_BC%east_t1, delp_BC%east_t1, delz_BC%east_t1, &
liq_watBC_east, rainwatBC_east, ice_watBC_east, snowwatBC_east, graupelBC_east, hailwatBC_east, &
#ifdef USE_COND
q_con_BC%east_t1, &
#ifdef MOIST_CAPPA
cappa_BC%east_t1, &
#endif
#endif
zvir, npx, ied, npx, ied, jsd, jed, npz)
end if
if (je == npy-1) then
if (is == 1) then
istart = is
else
istart = isd
end if
if (ie == npx-1) then
iend = ie
else
iend = ied
end if
call setup_pt_NH_BC_k(pt_BC%north_t1, sphum_BC%north_t1, delp_BC%north_t1, delz_BC%north_t1, &
liq_watBC_north, rainwatBC_north, ice_watBC_north, snowwatBC_north, graupelBC_north, hailwatBC_north, &
#ifdef USE_COND
q_con_BC%north_t1, &
#ifdef MOIST_CAPPA
cappa_BC%north_t1, &
#endif
#endif
zvir, isd, ied, istart, iend, npy, jed, npz)
end if
end subroutine setup_pt_NH_BC
subroutine setup_pt_NH_BC_k(ptBC,sphumBC,delpBC,delzBC, &
liq_watBC,rainwatBC,ice_watBC,snowwatBC,graupelBC,hailwatBC, &
#ifdef USE_COND
q_conBC, &
#ifdef MOIST_CAPPA
cappaBC, &
#endif
#endif
zvir, isd_BC, ied_BC, istart, iend, jstart, jend, npz)
integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz
real, intent(OUT), dimension(isd_BC:ied_BC,jstart:jend,npz) :: ptBC
real, intent(IN), dimension(isd_BC:ied_BC,jstart:jend,npz) :: sphumBC, delpBC, delzBC
real, intent(IN), dimension(isd_BC:ied_BC,jstart:jend,npz) :: liq_watBC,rainwatBC,ice_watBC,snowwatBC,graupelBC,hailwatBC
#ifdef USE_COND
real, intent(OUT), dimension(isd_BC:ied_BC,jstart:jend,npz) :: q_conBC
#ifdef MOIST_CAPPA
real, intent(OUT), dimension(isd_BC:ied_BC,jstart:jend,npz) :: cappaBC
#endif
#endif
real, intent(IN) :: zvir
integer :: i,j,k
real :: dp1, q_con, q_sol, q_liq, cvm, pkz, rdg, cv_air
real, parameter:: c_liq = 4185.5 ! heat capacity of water at 0C
real, parameter:: c_ice = 1972. ! heat capacity of ice at 0C: c=c_ice+7.3*(T-Tice)
real, parameter:: cv_vap = cp_vapor - rvgas ! 1384.5
real, parameter:: tice = 273.16 ! For GFS Partitioning
real, parameter:: t_i0 = 15.
rdg = -rdgas / grav
cv_air = cp_air - rdgas
!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,zvir,ptBC,sphumBC,delpBC,delzBC,liq_watBC,rainwatBC,ice_watBC,snowwatBC,graupelBC,hailwatBC, &
#ifdef USE_COND
!$OMP q_conBC, &
#ifdef MOIST_CAPPA
!$OMP cappaBC, &
#endif
#endif
!$OMP rdg, cv_air) &
!$OMP private(dp1,q_liq,q_sol,q_con,cvm,pkz)
do k=1,npz
do j=jstart,jend
do i=istart,iend
dp1 = zvir*sphumBC(i,j,k)
#ifdef USE_COND
q_liq = liq_watBC(i,j,k) + rainwatBC(i,j,k)
q_sol = ice_watBC(i,j,k) + snowwatBC(i,j,k) + graupelBC(i,j,k) + hailwatBC(i,j,k)
q_con = q_liq + q_sol
q_conBC(i,j,k) = q_con
#ifdef MOIST_CAPPA
cvm = (1.-(sphumBC(i,j,k)+q_con))*cv_air+sphumBC(i,j,k)*cv_vap+q_liq*c_liq+q_sol*c_ice
cappaBC(i,j,k) = rdgas/(rdgas + cvm/(1.+dp1))
pkz = exp( cappaBC(i,j,k)*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * &
(1.+dp1)*(1.-q_con)/delzBC(i,j,k)))
#else
pkz = exp( kappa*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * &
(1.+dp1)*(1.-q_con)/delzBC(i,j,k)))
#endif
ptBC(i,j,k) = ptBC(i,j,k)*(1.+dp1)*(1.-q_con)/pkz
#else
pkz = exp( kappa*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * &
(1.+dp1)/delzBC(i,j,k)))
ptBC(i,j,k) = ptBC(i,j,k)*(1.+dp1)/pkz
#endif
end do
end do
end do
end subroutine setup_pt_NH_BC_k
subroutine set_NH_BCs_t0(neststruct)
type(fv_nest_type), intent(INOUT) :: neststruct
#ifndef SW_DYNAMICS
neststruct%delz_BC%east_t0 = neststruct%delz_BC%east_t1
neststruct%delz_BC%west_t0 = neststruct%delz_BC%west_t1
neststruct%delz_BC%north_t0 = neststruct%delz_BC%north_t1
neststruct%delz_BC%south_t0 = neststruct%delz_BC%south_t1
neststruct%w_BC%east_t0 = neststruct%w_BC%east_t1
neststruct%w_BC%west_t0 = neststruct%w_BC%west_t1
neststruct%w_BC%north_t0 = neststruct%w_BC%north_t1
neststruct%w_BC%south_t0 = neststruct%w_BC%south_t1
#endif
end subroutine set_NH_BCs_t0
subroutine set_BCs_t0(ncnst, hydrostatic, neststruct)
integer, intent(IN) :: ncnst
logical, intent(IN) :: hydrostatic
type(fv_nest_type), intent(INOUT) :: neststruct
integer :: n
neststruct%delp_BC%east_t0 = neststruct%delp_BC%east_t1
neststruct%delp_BC%west_t0 = neststruct%delp_BC%west_t1
neststruct%delp_BC%north_t0 = neststruct%delp_BC%north_t1
neststruct%delp_BC%south_t0 = neststruct%delp_BC%south_t1
do n=1,ncnst
neststruct%q_BC(n)%east_t0 = neststruct%q_BC(n)%east_t1
neststruct%q_BC(n)%west_t0 = neststruct%q_BC(n)%west_t1
neststruct%q_BC(n)%north_t0 = neststruct%q_BC(n)%north_t1
neststruct%q_BC(n)%south_t0 = neststruct%q_BC(n)%south_t1
enddo
#ifndef SW_DYNAMICS
neststruct%pt_BC%east_t0 = neststruct%pt_BC%east_t1
neststruct%pt_BC%west_t0 = neststruct%pt_BC%west_t1
neststruct%pt_BC%north_t0 = neststruct%pt_BC%north_t1
neststruct%pt_BC%south_t0 = neststruct%pt_BC%south_t1
neststruct%pt_BC%east_t0 = neststruct%pt_BC%east_t1
neststruct%pt_BC%west_t0 = neststruct%pt_BC%west_t1
neststruct%pt_BC%north_t0 = neststruct%pt_BC%north_t1
neststruct%pt_BC%south_t0 = neststruct%pt_BC%south_t1
#ifdef USE_COND
neststruct%q_con_BC%east_t0 = neststruct%q_con_BC%east_t1
neststruct%q_con_BC%west_t0 = neststruct%q_con_BC%west_t1
neststruct%q_con_BC%north_t0 = neststruct%q_con_BC%north_t1
neststruct%q_con_BC%south_t0 = neststruct%q_con_BC%south_t1
#ifdef MOIST_CAPPA
neststruct%cappa_BC%east_t0 = neststruct%cappa_BC%east_t1
neststruct%cappa_BC%west_t0 = neststruct%cappa_BC%west_t1
neststruct%cappa_BC%north_t0 = neststruct%cappa_BC%north_t1
neststruct%cappa_BC%south_t0 = neststruct%cappa_BC%south_t1
#endif
#endif
if (.not. hydrostatic) then
call set_NH_BCs_t0(neststruct)
endif
#endif
neststruct%u_BC%east_t0 = neststruct%u_BC%east_t1
neststruct%u_BC%west_t0 = neststruct%u_BC%west_t1
neststruct%u_BC%north_t0 = neststruct%u_BC%north_t1
neststruct%u_BC%south_t0 = neststruct%u_BC%south_t1
neststruct%v_BC%east_t0 = neststruct%v_BC%east_t1
neststruct%v_BC%west_t0 = neststruct%v_BC%west_t1
neststruct%v_BC%north_t0 = neststruct%v_BC%north_t1
neststruct%v_BC%south_t0 = neststruct%v_BC%south_t1
neststruct%vc_BC%east_t0 = neststruct%vc_BC%east_t1
neststruct%vc_BC%west_t0 = neststruct%vc_BC%west_t1
neststruct%vc_BC%north_t0 = neststruct%vc_BC%north_t1
neststruct%vc_BC%south_t0 = neststruct%vc_BC%south_t1
neststruct%uc_BC%east_t0 = neststruct%uc_BC%east_t1
neststruct%uc_BC%west_t0 = neststruct%uc_BC%west_t1
neststruct%uc_BC%north_t0 = neststruct%uc_BC%north_t1
neststruct%uc_BC%south_t0 = neststruct%uc_BC%south_t1
neststruct%divg_BC%east_t0 = neststruct%divg_BC%east_t1
neststruct%divg_BC%west_t0 = neststruct%divg_BC%west_t1
neststruct%divg_BC%north_t0 = neststruct%divg_BC%north_t1
neststruct%divg_BC%south_t0 = neststruct%divg_BC%south_t1
end subroutine set_BCs_t0
subroutine d2c_setup(u, v, &
ua, va, &
uc, vc, dord4, &
isd,ied,jsd,jed, is,ie,js,je, npx,npy, &
grid_type, bounded_domain, &
se_corner, sw_corner, ne_corner, nw_corner, &
rsin_u,rsin_v,cosa_s,rsin2 )
logical, intent(in):: dord4
real, intent(in) :: u(isd:ied,jsd:jed+1)
real, intent(in) :: v(isd:ied+1,jsd:jed)
real, intent(out), dimension(isd:ied ,jsd:jed ):: ua
real, intent(out), dimension(isd:ied ,jsd:jed ):: va
real, intent(out), dimension(isd:ied+1,jsd:jed ):: uc
real, intent(out), dimension(isd:ied ,jsd:jed+1):: vc
integer, intent(in) :: isd,ied,jsd,jed, is,ie,js,je, npx,npy,grid_type
logical, intent(in) :: bounded_domain, se_corner, sw_corner, ne_corner, nw_corner
real, intent(in) :: rsin_u(isd:ied+1,jsd:jed)
real, intent(in) :: rsin_v(isd:ied,jsd:jed+1)
real, intent(in) :: cosa_s(isd:ied,jsd:jed)
real, intent(in) :: rsin2(isd:ied,jsd:jed)
! Local
real, dimension(isd:ied,jsd:jed):: utmp, vtmp
real, parameter:: t11=27./28., t12=-13./28., t13=3./7., t14=6./7., t15=3./28.
real, parameter:: a1 = 0.5625
real, parameter:: a2 = -0.0625
real, parameter:: c1 = -2./14.
real, parameter:: c2 = 11./14.
real, parameter:: c3 = 5./14.
integer npt, i, j, ifirst, ilast, id
if ( dord4) then
id = 1
else
id = 0
endif
if (grid_type < 3 .and. .not. bounded_domain) then
npt = 4
else
npt = -2
endif
if ( bounded_domain) then
do j=jsd+1,jed-1
do i=isd,ied
utmp(i,j) = a2*(u(i,j-1)+u(i,j+2)) + a1*(u(i,j)+u(i,j+1))
enddo
enddo
do i=isd,ied
j = jsd
utmp(i,j) = 0.5*(u(i,j)+u(i,j+1))
j = jed
utmp(i,j) = 0.5*(u(i,j)+u(i,j+1))
end do
do j=jsd,jed
do i=isd+1,ied-1
vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j))
enddo
i = isd
vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j))
i = ied
vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j))
enddo
do j=jsd,jed
do i=isd,ied
ua(i,j) = (utmp(i,j)-vtmp(i,j)*cosa_s(i,j)) * rsin2(i,j)
va(i,j) = (vtmp(i,j)-utmp(i,j)*cosa_s(i,j)) * rsin2(i,j)
enddo
enddo
else
!----------
! Interior:
!----------
utmp = 0.
vtmp = 0.
do j=max(npt,js-1),min(npy-npt,je+1)
do i=max(npt,isd),min(npx-npt,ied)
utmp(i,j) = a2*(u(i,j-1)+u(i,j+2)) + a1*(u(i,j)+u(i,j+1))
enddo
enddo
do j=max(npt,jsd),min(npy-npt,jed)
do i=max(npt,is-1),min(npx-npt,ie+1)
vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j))
enddo
enddo
!----------
! edges:
!----------
if (grid_type < 3) then
if ( js==1 .or. jsd