## This is the system-specific configuration file for the WCOSS Cray ## Surge machine. Note that Surge has another file, ## system.conf.wcosssurge [prep_hybrid] threads=24 [wrfexe] minsize=10000 minage=20 [enswrf] minsize=10000 minage=20 [runwrf] minsize=10000 minage=30 [config] ## Project whose disk area should be used. disk_project=hwrf tape_project=emc-hwrf ## Specify input sources for HISTORY mode. # # Specifies input sources for use by hwrf.input.InputSources. In a # HISTORY mode run, this will be used in the scripts.exhwrf_input job # to pull data. input_sources=wcossluna_sources_{GFSVER} ## Specify the hwrf.input.DataCatalog for FORECAST mode runs. fcst_catalog=wcoss_fcst_{GFSVER} ## Theia CPU account name for submitting jobs to the batch system. cpu_account=HWRF-T2O ## Archive path archive=hpss:/NCEPDEV/{tape_project}/5year/{ENV[USER]}/{SUBEXPT}/{out_prefix}.tar [wcoss_fcst_PROD2016] inputroot=/gpfs/dell3/nco/storage/hurpara/hwrfdata_PROD2017 ;; Input data root rtofs={inputroot}/rtofs/ ;; RTOFS directory ww3=/gpfs/dell1/nco/ops/com/wave/prod/multi_1.{aYMD}/ ;; WAVE directory dcom=/gpfs/dell1/nco/ops/dcom/prod/ ;; dcom "prod" directory rtofsstage={WORKhwrf}/hycominit/RTOFSDIR gfs={inputroot}/gfs.{aYMDH}/ ;; GFS directory #gdas1=/gpfs/hps3/emc/hwrf/noscrub/Lin.L.Zhu/gdas.{aYMD}/ ;; GDAS directory gdas1={inputroot}/gdas1.{aYMDH}/ ;; GDAS directory #gdas1=/gpfs/dell1/nco/ops/com/gfs/prod/gdas.{aYMD}/ ;; GDAS directory gefs=/gpfs/dell2/nco/ops/com/gens/prod/gefs.{aYMD}/{aHH}/ ;; GEFS directory enkf=/gpfs/dell1/nco/ops/com/gfs/prod/enkfgdas.{aYMD}/{aHH}/ ;; GFS ENKF directory #messages=/gpfs/hps/nco/ops/com/hur/prod/inphwrf/ ;; hurricane message file directory #syndatdir=/gpfs/dell1/nco/ops/com/gfs/prod/syndat/ ;; syndat tcvitals directory loopdata={inputroot}/LOOP-CURRENT/ ;; loop current directory hd_obs={inputroot}/RECON/gdas.{aYMD}/ ;; hd obs directory tdr={inputroot}/TDR/{aYYYY}/{aYMDH}/{vit[stnum]:02d}{vit[basin1lc]}/ ;; TDR data directory #@inc=gfs2014_naming,emc_para_grib2_master,para_loop_naming,prod_gefs_naming,rtofs_naming,rtofs_sflux_naming messages=/gpfs/hps/nco/ops/com/hur/prod/inphwrf/ ;; hurricane message file directory syndatdir=/gpfs/hps3/emc/hwrf/noscrub/input/ ;; syndat tcvitals directory @inc=gfs2014_naming,emc_para_grib2_pgrib,para_loop_naming,prod_gefs_naming,rtofs_naming,rtofs_sflux_naming,rtofs_disk_wcoss [wcoss_fcst_PROD2017] inputroot=/gpfs/hps3/emc/hwrf/noscrub/input/ ;; Input data root rtofs=/gpfs/dell1/nco/ops/com/rtofs/prod/rtofs.{aYMD}/ ;; RTOFS directory rtofsstage={WORKhwrf}/hycominit/RTOFSDIR ww3=/gpfs/dell1/nco/ops/com/wave/prod/multi_1.{aYMD}/ ;; WAVE directory dcom=/gpfs/dell1/nco/ops/dcom/prod ;; DCOM directory gfs=/gpfs/hps/nco/ops/com/gfs/prod/gfs.{aYMD}/ ;; GFS directory gdas1=/gpfs/hps/nco/ops/com/gfs/prod/gdas.{aYMD}/ ;; GDAS directory gefs=/gpfs/dell2/nco/ops/com/gens/prod/gefs.{aYMD}/{aHH}/ ;; GEFS directory enkf=/gpfs/dell1/nco/ops/com/gfs/prod/enkfgdas.{aYMD}/{aHH}/ ;; GFS ENKF directory messages=/gpfs/hps/nco/ops/com/hur/prod/inphwrf/ ;; hurricane message file directory syndatdir=/gpfs/dell1/nco/ops/com/gfs/prod/syndat/ ;; syndat tcvitals directory #loopdata={inputroot}/LOOP-CURRENT/ ;; loop current directory loopdata=/gpfs/dell2/nhc/save/guidance/storm-data/ncep/ ;; loop current directory hd_obs={inputroot}/RECON/gdas.{aYMD}/ ;; hd obs directory tdr={inputroot}/TDR/{aYYYY}/{aYMDH}/{vit[stnum]:02d}{vit[basin1lc]}/ ;; TDR data directory @inc=gfs2017_naming,prod_loop_naming,prod_gefs_naming,rtofs_naming,ww3_naming [wcoss_fcst_PROD2019] inputroot=/gpfs/hps3/emc/hwrf/noscrub/input/ ;; Input data root rtofs=/gpfs/dell1/nco/ops/com/rtofs/prod/rtofs.{aYMD}/ ;; RTOFS directory rtofsstage={WORKhwrf}/hycominit/RTOFSDIR ww3=/gpfs/dell1/nco/ops/com/wave/prod/multi_1.{aYMD}/ ;; WAVE directory dcom=/gpfs/dell1/nco/ops/dcom/prod ;; DCOM directory gfs=/gpfs/dell1/nco/ops/com/gfs/prod/gfs.{aYMD}/{aHH} ;; GFS directory gdas1=/gpfs/dell1/nco/ops/com/gfs/prod/gdas.{aYMD}/{aHH} ;; GDAS directory gefs=/gpfs/dell2/nco/ops/com/gens/prod/gefs.{aYMD}/{aHH}/ ;; GEFS directory enkf=/gpfs/dell1/nco/ops/com/gfs/prod/enkfgdas.{aYMD}/{aHH}/ ;; GFS ENKF directory messages=/gpfs/hps/nco/ops/com/hur/prod/inphwrf/ ;; hurricane message file directory syndatdir=/gpfs/dell1/nco/ops/com/gfs/prod/syndat/ ;; syndat tcvitals directory loopdata={inputroot}/LOOP-CURRENT/ ;; loop current directory hd_obs={inputroot}/RECON/gdas.{aYMD}/ ;; hd obs directory tdr={inputroot}/TDR/{aYYYY}/{aYMDH}/{vit[stnum]:02d}{vit[basin1lc]}/ ;; TDR data directory @inc=gfs2019_naming,para_loop_naming,prod_gefs_naming,rtofs_naming,ww3_naming [hwrfdata] inputroot=/gpfs/hps3/emc/{disk_project}/noscrub/{ENV[USER]}/hwrfdata_{GFSVER} [dir] utilexec=/gpfs/hps/nco/ops/nwprod/grib_util.v1.0.2/exec/ ;; /nwprod/util/exec directory utilscript=/ ;; unused /nwprod/util/ush directory (must exist) ## Non-scrubbed directory for track files, etc. Make sure you edit this. CDNOSCRUB=/gpfs/hps3/emc/{disk_project}/noscrub/{ENV[USER]}/trak ## Scrubbed directory for large work files. Make sure you edit this. # # This is the area where HWRF will actually run. Due to the design of # the WCOSS Cray compute and disk, it is critical that you use one # of two areas: # - /gpfs/hps/ptmp # - /gpfs/hps/stmp # # All other areas of WCOSS disk are on a different part of WCOSS and cannot # handle the load of the full Luna cluster. Using them for I/O-heavy jobs may # cause problems. CDSCRUB=/gpfs/hps2/ptmp/{ENV[USER]} ## Save directory. Make sure you edit this. CDSAVE=/gpfs/hps3/emc/{disk_project}/noscrub/{ENV[USER]}/save ## Syndat directory for finding which cycles to run syndat=/gpfs/dell1/nco/ops/com/gfs/prod/syndat ## Used when parsing hwrf_holdvars.txt to make storm*.holdvars.txt in COM [holdvars] WHERE_AM_I=wcosscray ;; Which cluster? (For setting up environment.) WHICH_JET=none ;; Which part of Jet are we on? None; we are not on Jet. [wrfexe] nio_groups=2 ;; Number of WRF I/O server groups per domain nio_tasks_per_group=8,8,8 ;; Number of I/O servers per group poll_servers=yes ;; Turn on server polling if quilt servers are used (They are not.) nproc_x=6 ;; WRF processor count in X direction (-1 = automatic) nproc_y=8 ;; WRF processor count in Y direction (-1 = automatic) [runwrf] nio_groups=1 ;; Number of WRF I/O server groups per domain nio_tasks_per_group=4,4,4 ;; Number of I/O servers per group poll_servers=yes ;; Turn on server polling if quilt servers are used (They are not.) nproc_x=32 ;; WRF processor count in X direction (-1 = automatic) nproc_y=60 ;; WRF processor count in Y direction (-1 = automatic) wrf_compute_ranks=1920 ;; Number of WRF compute ranks (not I/O servers) ww3_ranks=192 ;; number of wavewatch3 ranks wm3c_ranks=16 ;; Number of coupler ranks