## This is the system-specific configuration file for the WCOSS Cray

[gsi_d02]
scrub=no

[gsi_d03]
scrub=no

[config]
## Specify the hwrf.input.DataCatalog for FORECAST mode runs.
#
# @todo: switch to wcoss_fcst_nco and update to be correct.
fcst_catalog=wcoss_fcst_nco

## Input sources in developmental retrospective mode, set to be
#  invalid.  This ensures an abort if this feature is used
#  accidentally.
input_sources=deliberately_invalid

extra_trackers=no                 ;; Disable experimental extra trackers (d1 & d1+2)

HWRF_VERSION={ENV[HWRF_VERSION]}  ;; Specify NCO HWRF version number
EXPT=hwrf.v{HWRF_VERSION}         ;; Override default EXPT with NCO name
SUBEXPT=hwrf.v{HWRF_VERSION}      ;; should match EXPT

allow_fallbacks=yes
email_sdm=yes ;; email AFOS file to sdm@noaa.gov
track_email_list=sdm@noaa.gov   ;; space-separated list of email addresses
fallback_email_list=ncep.sos@noaa.gov,sdm@noaa.gov,nco.spa@noaa.gov

[prep_hybrid]
threads=24 ;; Number of threads to use for spectral processing (prep_hybrid program)

[ungrib]
## No second GRIB file needed in production (master is available)
#note: need to add back for hwrf.v12.0.0: item2=  gfs_gribB
#item2=  gfs_gribB
item2_optional=no ;; Is the second GRIB file type optional?

[fgat_ungrib]
## No second GRIB file needed in production (master is available)
item2=

[wrfexe]
minsize=10000 ;; Minimum size of wrf output files from init jobs
minage=20     ;; Minimum age of wrf output files from init jobs

[enswrf]
minsize=10000 ;; Minimum size of wrf output files from ensda jobs
minage=20     ;; Minimum age of wrf output files from ensda jobs

[runwrf]
minsize=10000 ;; Minimum size of wrf output files from forecast jobs
minage=30     ;; Minimum age of wrf output files from forecast jobs

[dir]
utilexec={ENV[UTILROOT]}/exec ;; Utility exec dir; never used but must exist
CDSCRUB={ENV[DATA]}           ;; Main scrub area
CDNOSCRUB={ENV[DATA]}         ;; Vestigial but should match CDSCRUB
CDSAVE=/nw{ENV[envir]}        ;; Unused, but this directory must exist.
syndat={ENV[COMINarch]}       ;; Directory with syndat tcvitals data
com={ENV[COMOUT]}             ;; COM output directory
oldcom={ENV[HISTDATA]}        ;; Prior cycle COM output directory
WORKhwrf={ENV[DATA]}          ;; Scrub area for this storm and cycle
HOMEhwrf={ENV[HOMEhwrf]}      ;; HWRF installation location

[exe]
hur_output_email={USHhwrf}/hur_output_email.sh ;; Unused.
wgrib={ENV[WGRIB]}        ;; Path to wgrib program
nco_wgrib2={ENV[WGRIB2]}  ;; Path to grib_util wgrib2 program
grb2index={ENV[GRB2INDEX]} ;; Path to grbindex program
grbindex={ENV[GRBINDEX]}  ;; Path to grbindex program
# mpiserial={ENV[MPISER]}   ;; Path to mpiserial program
mpiserial={HOMEhwrf}/exec/mpiserial   ;; Path to mpiserial program

## Used when parsing hwrf_holdvars.txt to make storm*.holdvars.txt in COM
[holdvars]
WHERE_AM_I=wcosscray ;; Which cluster? (For setting up environment.)
WHICH_JET=none     ;; Which part of Jet are we on?  None; we are not on Jet.

## Set resource requirements for init job WRF executions:
[wrfexe]
nio_groups=2          ;; Number of WRF I/O server groups per domain
nio_tasks_per_group=8,8,8 ;; Number of I/O servers per group
poll_servers=yes      ;; Turn on server polling if quilt servers are used (They are not.)
nproc_x=6             ;; WRF processor count in X direction (-1 = automatic)
nproc_y=8             ;; WRF processor count in Y direction (-1 = automatic)

## Set resource requirements for forecast job WRF execution:
[runwrf]
nio_groups=1          ;; Number of WRF I/O server groups per domain
nio_tasks_per_group=4,4,4 ;; Number of I/O servers per group
poll_servers=yes      ;; Turn on server polling if quilt servers are used (They are not.)
nproc_x=24            ;; WRF processor count in X direction (-1 = automatic)
nproc_y=40            ;; WRF processor count in Y direction (-1 = automatic)
wrf_compute_ranks=960 ;; Number of WRF compute ranks (not I/O servers)
ww3_ranks=240         ;; number of wavewatch3 ranks
wm3c_ranks=2          ;; Number of coupler ranks