## This is the system-specific configuration file for Phase 2 of the ## NOAA WCOSS cluster. It should not be used directly. Users should ## copy it to parm/system.conf and edit the values manually. [config] ## WCOSS project to use for file storage disk_project=hwrf ## NCEPDEV project to use for tape storage tape_project=emc-{disk_project} ## Specify input sources for HISTORY mode. # # Specifies input sources for use by hwrf.input.InputSources. In a # HISTORY mode run, this will be used in the scripts.exhwrf_input job # to pull data. input_sources=wcoss_sources_{GFSVER} ## Specify the hwrf.input.DataCatalog for FORECAST mode runs. fcst_catalog=wcoss_fcst_{GFSVER} ## Theia CPU account name for submitting jobs to the batch system. cpu_account=HUR-T2O ## Archive path archive=hpss:/NCEPDEV/{tape_project}/5year/{ENV[USER]}/{SUBEXPT}/{out_prefix}.tar [hwrfdata] inputroot=/gpfs/hps3/emc/{disk_project}/noscrub/{ENV[USER]}/hwrfdata_{GFSVER} [dir] utilexec=/gpfs/hps/nco/ops/nwprod/grib_util.v1.0.2/exec/ ;; /nwprod/util/exec directory utilscript=/ ;; unused /nwprod/util/ush directory (must exist) ## Non-scrubbed directory for track files, etc. Make sure you edit this. CDNOSCRUB=/{disk_project}/noscrub/{ENV[USER]} ## Scrubbed directory for large work files. Make sure you edit this. # # This is the area where HWRF will actually run. Due to the design of # the WCOSS Phase 2 compute and disk, it is critical that you use one # of two areas: # - /ptmpp2/ # - /ptmpd3/ # # All other areas of WCOSS disk are on a different WCOSS phase. Using # them for I/O-heavy jobs may cause problems. CDSCRUB=/ptmpp2/{ENV[USER]} ## Save directory. Make sure you edit this. CDSAVE=/{disk_project}/save/{ENV[USER]} ## Syndat directory for finding which cycles to run syndat=/com/arch/prod/syndat ## Used when parsing hwrf_holdvars.txt to make storm*.holdvars.txt in COM [holdvars] WHERE_AM_I=wcoss ;; Which cluster? (For setting up environment.) WHICH_JET=none ;; Which part of Jet are we on? None; we are not on Jet. [wrfexe] nio_groups=1 ;; Number of WRF I/O server groups per domain nio_tasks_per_group=0,0,0 ;; Number of I/O servers per group poll_servers=yes ;; Turn on server polling if quilt servers are used (They are not.) nproc_x=-1 ;; WRF processor count in X direction (-1 = automatic) nproc_y=-1 ;; WRF processor count in Y direction (-1 = automatic) [runwrf] nio_groups=1 ;; Number of WRF I/O server groups per domain nio_tasks_per_group=4,4,4 ;; Number of I/O servers per group poll_servers=yes ;; Turn on server polling if quilt servers are used (They are not.) nproc_x=18 ;; WRF processor count in X direction (-1 = automatic) nproc_y=36 ;; WRF processor count in Y direction (-1 = automatic) wrf_compute_ranks=648 ;; Number of WRF compute ranks (not I/O servers)