## This is the first configuration file read in during the pre_master # job when creating the initial configuration. It sets directory # paths to which the later conf files refer. Any variables set in # later conf files will override variables set here. # # This default file is used by NCEP EMC for retrospective runs. It # assumes another file has set the CDSCRUB, CDSAVE, syndat and # CDNOSCRUB variables in the [dir] section. This allows the system to # be easily picked up and moved to another cluster. See @ref # conf-file-system_conf_jet and @ref conf-file-system_conf_zeus # for examples. [config] GFSVER=PROD2020 ;; GFS version (default) forecast_length=126 ;; Length of the forecast in hours scrub_com=yes ;; Do we scrub the com directory? scrub_work=yes ;; Do we scrub the work directory? sync_frequently=no ;; Do we run "sync" frequently? #! Number of hours between cycles. Only 6.0 has ever been tested. # Several things will break if you try to change this without # modifying the code and scripts. cycling_interval=6.0 #! Scrub option: do we delete temporary files? Disabling this will # disable most, but not all, temporary file deletion. scrub=yes ; delete temporary files ## Prefix to prepend to most output files in the COM directory. Also # used for archive filenames. out_prefix={vit[stormnamelc]}{vit[stnum]:02d}{vit[basin1lc]}.{vit[YMDH]} out_prefix_nodate={vit[stormnamelc]}{vit[stnum]:02d}{vit[basin1lc]} ## out_prefix value for prior cycle old_out_prefix={oldvit[stormnamelc]}{oldvit[stnum]:02d}{oldvit[basin1lc]}.{oldvit[YMDH]} ## out_prefix_nodate value for prior cycle old_out_prefix_nodate={oldvit[stormnamelc]}{oldvit[stnum]:02d}{oldvit[basin1lc]} ## RUNhwrf is a component of some other file paths RUNhwrf={SUBEXPT} # Enable or disable parts of the workflow run_gsi=yes ;; GSI and FGAT initialization run_ocean=yes ;; POM coupling ocean_model=POM ;; Selected ocean model: POM or HYCOM atmos_model=WRF ;; Selected atmospheric model. Must be WRF run_wave=yes ;; Wave coupling? wave_model=WW3 ;; Which wave model? Must be WW3 run_relocation=yes ;; vortex relocation run_ensemble_da=yes ;; run the DA ensemble ## Different options to run DA ensemble ## ## 0: run HWRF ensemble initialized from previouse cycle GFS EnKF analysis ## 1: run cycled HWRF ensemble hybrid DA (3D) ## 2: run cycled HWRF ensemble hybrid DA (4DEnVar) (not available) ensda_opt=1 ## Different options to run DA ensemble ### ### 0: run HWRF ensemble initialized from previouse cycle GFS EnKF analysis ### 1: run cycled HWRF ensemble hybrid DA (3D) ### 2: run cycled HWRF ensemble hybrid DA (4DEnVar) (not available) ensda_when=tdr_next_cycle ;; run DA ensemble always or depending on TDR data ## Different options to trigger HWRF ensemble ### ### always: always run HWRF ensemble ### tdr_next_cycle: HWRF ensemble is triggered by TDR trigger file or TDR data next cycle ### tdr_and_wmax: HWRF ensemble is trigger by TDR data next cycle, if TDR data will not be ### available, it will be triggered if storm intensity is larger than a pre-defined ### threshold. ensda_fallback=no ;; if yes, when GFS enkf files missing, HWRF still runs, but will not run DA ensemble run_ens_relocation=yes ;; run relocation for ensemble members run_satpost=yes ;; make synthetic satellite products run_multistorm=no ;; run as a Multi Storm (even if 1 or No storms) ## Use spectral vs. GRIB input for init & bdy # # Sepecify data format of GFS/GDAS/ENKF IC/BC input # These flags only control what is used to prepare # the inputs to ungrib and metgrid. gfsinit_type=5 ;; 1=grib2, 2=nemsio, 3=spectral, 4=highres grib2, 5=NetCDF(GFS V16) gfsbdy_type=1 ;; 1=grib2, 2=nemsio, 3=spectral allow_fallbacks=no ;; if gsi fails, run off of GFS reloc vortex extra_trackers=no ;; turn on 6km and 18km trackers (slower products job) conditional_gsid03=no ;; run gsi_d03, only when high-res inner-core data are available conditional_gsid02=no ;; run gsi_d02, only when high-res inner-core data are available hybrid_fallback=no ;; if yes and not realtime run, will fallback to 3DVAR if ensemble < 40 # The following options describe the blending of data assimilation # increments within the tropical cyclone inner-core; the following # options (blend_ic_opt) are available # 1: Use the operational method; this implied zeroing out all # data-assimilation increments within some radius relative to the # tropical cyclone position (e.g., inner-core); this is the default # 2: Use a blending of the low-wavenumber spectra of increments within # the tropical cyclone inner-core -- this is experimental; please # see the blocks [merge] and the relavant GSI domain blocks (e.g., # [gsi_merge_d0*] in hwrf.conf blend_innercore=yes ;; use first guess in inner core region (merge job) blend_ic_opt=2 ;; 1 = use old method; 2 = use new method ## Pull data from external sources to a staging area. # # The input_catelog specifies a section (default: [hwrfdata]) to use # for. input_catalog=hwrfdata ## Enables email to the NOAA SDM. Do not email the NOAA Senior Duty # Meteorologist (PLEASE). email_sdm=no ; do not change unless you are NCEP Central Operations ## Failure testing configuration [failure] ## Force a failure of the ocean_init job. # # Forces a failure of the ocean_init job if set to one of two special strings # # * unexpected_failure - bypass fallback mechanisms even if enabled # and force a complete abort of the workflow at the ocean_init job # * expected_failure - simulate and expected failure of the ocean_init # job and fall back to uncoupled if fallbacks are enabled. ocean_init=none ## Force a failure of the wave_init job. # # Forces a failure of the wave_init job if set to one of two special strings # # * unexpected_failure - bypass fallback mechanisms even if enabled # and force a complete abort of the workflow at the wave_init job # * expected_failure - simulate and expected failure of the wave_init # job and fall back to no wave coupling if fallbacks are enabled. wave_init=none ## Abort workflow for North Atlantic storms with feature-based # initialization if the loop current file is missing or unusable. # # Activates a failure test inside the pom.init.fbtr.getinp() which will # abort the workflow in one of two different ways IF the loop current # files are missing or unusable # # * unexpected_failure - abort entire workflow at ocean_init job # * expected_failure - counts as a normal ocean_init failure, which will # fall back to uncoupled if fallbacks are enabled loop_current=none ## Simulate a failure of the gsi_d02 job in one of two different ways. # # Activates a failure test in scripts.exhwrf_gsi that will abort the # job in one of two different ways: # # * unexpected_failure - abort entire workflow at gsi_d02 job # * expected_failure - abort the gsi_d02 cleanly, allowing the # system to fall back to no GSI if that fallback is enabled gsi_d02=none ## Simulate a failure of the gsi_d03 job in one of two different ways. # # Activates a failure test in scripts.exhwrf_gsi that will abort the # job in one of two different ways: # # * unexpected_failure - abort entire workflow at gsi_d03 job # * expected_failure - abort the gsi_d03 cleanly, allowing the # system to fall back to no GSI if that fallback is enabled gsi_d03=none ## Simulate a failure of the enkf job in one of two different ways. ## ## Activates a failure test in scripts.exhwrf_enkf that will abort the ## job in one of two different ways: ## ## * unexpected_failure - abort entire workflow at enkf job ## * expected_failure - abort the enkf cleanly, allowing the ## system to fall back to no EnKF if that fallback is enabled enkf_failed=none ## Simulate a failure of the ensda job in one of two different ways. ### ### Activates a failure test in scripts.exhwrf_ensda that will abort the ### job in one of two different ways: ### ### * unexpected_failure - abort entire workflow at ensda job ### * expected_failure - abort the ensda cleanly, allowing the ### system to fall back to no ensda if that fallback is enabled ensda_mem001_failed=none ## Simulate a failure of the ensda_relocate job in one of two different ways. #### #### Activates a failure test in scripts.exhwrf_ensda_relocate that will abort the #### job in one of two different ways: #### #### * unexpected_failure - abort entire workflow at ensda_relocate job #### * expected_failure - abort the ensda_relocate cleanly, allowing the #### system to fall back to no ensda_relocate if that fallback is enabled ensda_relocate_mem001_failed=none ## Variables to set as string values when parsing the # hwrf_workflow.xml.in. This section is only used by the rocoto-based # workflow [rocotostr] FCST_RES=2KM ;; 3KM or 2KM: which processor counts to choose CDSAVE={dir/CDSAVE} ;; save area for Rocoto to use CDNOSCRUB={dir/CDNOSCRUB} ;; non-scrubbed area for Rocoto to use CDSCRUB={dir/CDSCRUB} ;; scrubbed area for Rocoto to use PARMhwrf={dir/PARMhwrf} ;; parm/ directory location USHhwrf={dir/USHhwrf} ;; ush/ directory location EXhwrf={dir/EXhwrf} ;; scripts/ directory location EXPT={config/EXPT} ;; experiment name SUBEXPT={config/SUBEXPT} ;; sub-experiment name OCEAN_MODEL={ocean_model} ;; Which ocean model is selected: POM or HYCOM? WAVE_MODEL={wave_model} ;; Which wave model is selected? CPU_ACCOUNT={cpu_account} ;; CPU account name ## Variables to set as boolean values when parsing the # hwrf_workflow.xml.in; they'll be changed to YES or NO. This section # is only used by the rocoto-based workflow. [rocotobool] RUN_GSI={run_gsi} ;; Do we run GSI? CONDITIONAL_GSID03={conditional_gsid03} ;; Do we disable d03 GSI when TDR is unavailable? CONDITIONAL_GSID02={conditional_gsid02} ;; Do we disable d02 GSI when TDR is unavailable? RUN_OCEAN={run_ocean} ;; Do we run with ocean coupling? RUN_WAVE={run_wave} ;; Do we run with wave coupling? RUN_RELOCATION={run_relocation} ;; Do we enable vortex relocation? ALLOW_FALLBACKS={allow_fallbacks} ;; Do we allow fallback when things fail? SCRUB_COM={scrub_com} ;; Should Rocoto scrub the COM directory? SCRUB_WORK={scrub_work} ;; Should Rocoto scrub the WORK directory? EXTRA_TRACKERS={extra_trackers} ;; Do we run the extra tracker jobs? RUN_ENS_RELOCATION={run_ens_relocation} ;; Do we run relocation for ensemble member? ## Configure the prelaunch configuration overrides, run in ## hwrf_expt, and implemented in hwrf.prelaunch [prelaunch] ensid_overrides=no ;; allow per-ensid overrides for forecast ensemble ungrib_overrides=yes ;; replace [ungrib] tbl with per-year tbl%Y values ## Minimum allowed wind for running relocation and GSI. # # The tcvitals wind will be checked and if the intensity is below this # value in m/s then relocation and GSI will be disabled. The GFS # analysis vortex will be used directly without relocation, intensity # adjustment, size adjustment, or data assimilation. # Note: This option is currently not implemented. min_wind_for_init=11 # Per-forecast-center configurations rsmc_overrides=no ;; read parm/hwrf_JTWC.conf and parm/hwrf_NHC.conf rsmc_conf={PARMhwrf}/hwrf_{RSMC}.conf ;; File to read for rsmc_overrides # Per-basin configurations: read no_basin_conf if basin_conf is missing basin_overrides=yes ;; read parm/hwrf_(basin).conf ## File to read for recognized basins when basin_overrides is enabled basin_conf={PARMhwrf}/hwrf_{vit.pubbasin2}.conf ## File to read for unrecognized basins when basin_overrides is enabled no_basin_conf={PARMhwrf}/hwrf_other_basins.conf ## Configure the sanity checks [sanity] ## Minimum fix file version # # The minimum required fix file version, checked by examining # fix/hwrf_fix_datestamp. # # @note fix_version must still be set even when check_fix=no fix_version=20191011 check_input=yes ;; check for an input file in HISTORY mode input_item=gfs_sfcanl ;; the input item to check for input_dataset=gfs ;; the dataset the item is in check_exec=yes ;; check for some executables check_fix=yes ;; check fix file version check_expt=yes ;; try to load hwrf_expt and run its sanity checks ## Configure directory paths. [dir] ## Main scrub directory WORKhwrf={CDSCRUB}/{RUNhwrf}/{vit[YMDH]}/{vit[stormid3]} ## HWRF install location HOMEhwrf={CDSAVE}/{EXPT} ## COM directories are used for communicating between cycles and ## storms. This is the COM directory specified by the vitals in the ## "vit" variable. #ZZcom={CDSCRUB}/{RUNhwrf}/com/{vit[YMDH]}/{vit[stormid3]} #ZZrealstormcom={CDSCRUB}/{RUNhwrf}/com/{YMDH}/{realstorm} com={CDSCRUB}/{RUNhwrf}/com/{vit[YMD]}/{vit[HH]}/{vit[stormid3]} realstormcom={CDSCRUB}/{RUNhwrf}/com/{YMD}/{HH}/{realstorm} realstormwork={CDSCRUB}/{RUNhwrf}/{YMDH}/{realstorm} #ZZoldcom={CDSCRUB}/{RUNhwrf}/com/{oldvit[YMDH]}/{oldvit[stormid3]} oldcom={CDSCRUB}/{RUNhwrf}/com/{oldvit[YMD]}/{oldvit[HH]}/{oldvit[stormid3]} oldsid={oldvit[stormid3]} # NOSCRUB delivery locations outatcf={CDNOSCRUB}/{SUBEXPT} ;; Delivery location for ATCF files outdiag={CDNOSCRUB}/diagtrak/{SUBEXPT} ;; Delivery location for wrfdiag files outstatus={CDNOSCRUB}/cycstatus/{SUBEXPT} ;; Delivery location for status files outatcfcorrected={CDNOSCRUB}/atcf/{SUBEXPT} ;; delivery location for corrected ATCF files outships={CDNOSCRUB}/ships/{SUBEXPT} ;; delivery location for SHIPS files ## Configure archive locations and methods. [archive] mkdir=yes ;; make the archive directory? yes or no ##Location for the extra wrfout file archive. # # Location for an extra archive file made with just the native WRF # output files. If this option is empty or missing, then the extra # archive will not be made, and the job to make it will not be # launched. # # Example: # # wrfout=hpss:/2year/NCEPDEV/emc-hwrf/Some.Jerk/{SUBEXPT}/wrfout/{out_prefix}.tar # # It must begin with hpss: and end with .tar wrfout= daonly=no ;; only archive selected files for DA purpose