# 3D models with 1D crust: append "_1Dcrust" the the 3D model name
# to take the 1D crustal model from the
# associated reference model rather than the default 3D crustal model
# e.g. s20rts_1Dcrust, s362ani_1Dcrust, etc.
MODEL = s362ani
# parameters describing the Earth model
OCEANS = .true.
ELLIPTICITY = .true.
TOPOGRAPHY = .true.
GRAVITY = .true.
ROTATION = .true.
ATTENUATION = .true.
# absorbing boundary conditions for a regional simulation
ABSORBING_CONDITIONS = .false.
# record length in minutes
RECORD_LENGTH_IN_MINUTES = 1d0
# to undo attenuation for sensitivity kernel calculations or forward runs with SAVE_FORWARD
# use one (and only one) of the two flags below. UNDO_ATTENUATION is much better (it is exact)
# but requires a significant amount of disk space for temporary storage.
PARTIAL_PHYS_DISPERSION_ONLY = .false.
UNDO_ATTENUATION = .false.
# How much memory (in GB) is installed on your machine per CPU core (only used for UNDO_ATTENUATION, can be ignored otherwise)
# (or per GPU card or per INTEL MIC Phi board)
# Beware, this value MUST be given per core, i.e. per MPI thread, i.e. per MPI rank, NOT per node.
# This value is for instance:
# - 4 GB on Tiger at Princeton
# - 4 GB on TGCC Curie in Paris
# - 4 GB on Titan at ORNL when using CPUs only (no GPUs); start your run with "aprun -n$NPROC -N8 -S4 -j1"
# - 2 GB on the machine used by Christina Morency
# - 2 GB on the TACC machine used by Min Chen
# - 1.5 GB on the GPU cluster in Marseille
# When running on GPU machines, it is simpler to set PERCENT_OF_MEM_TO_USE_PER_CORE = 100.d0
# and then set MEMORY_INSTALLED_PER_CORE_IN_GB to the amount of memory that you estimate is free (rather than installed)
# on the host of the GPU card while running your GPU job.
# For GPU runs on Titan at ORNL, use PERCENT_OF_MEM_TO_USE_PER_CORE = 100.d0 and MEMORY_INSTALLED_PER_CORE_IN_GB = 25.d0
# and run your job with "aprun -n$NPROC -N1 -S1 -j1"
# (each host has 32 GB on Titan, each GPU has 6 GB, thus even if all the GPU arrays are duplicated on the host
# this leaves 32 - 6 = 26 GB free on the host; leaving 1 GB for the Linux system, we can safely use 100% of 25 GB)
MEMORY_INSTALLED_PER_CORE_IN_GB = 16.0d0
# What percentage of this total do you allow us to use for arrays to undo attenuation, keeping in mind that you
# need to leave some memory available for the GNU/Linux system to run
# (a typical value is 85%; any value below is fine but the code will then save a lot of data to disk;
# values above, say 90% or 92%, can be OK on some systems but can make the adjoint code run out of memory
# on other systems, depending on how much memory per node the GNU/Linux system needs for itself; thus you can try
# a higher value and if the adjoint crashes then try again with a lower value)
PERCENT_OF_MEM_TO_USE_PER_CORE = 85.d0
# three mass matrices instead of one are needed to handle rotation very accurately;
# otherwise rotation is handled slightly less accurately (but still reasonably well);
# set to .true. if you are interested in precise effects related to rotation;
# set to .false. if you are solving very large inverse problems at high frequency and also undoing attenuation exactly
# using the UNDO_ATTENUATION flag above, in which case saving as much memory as possible can be a good idea.
# You can also safely set it to .false. if you are not in a period range in which rotation matters, e.g. if you are targetting very short-period body waves.
# if in doubt, set to .true.
# Set it to .true. if you have ABSORBING_CONDITIONS above, because in that case the code will use the three mass matrices anyway
# and thus there is no additional cost.
# this flag is of course unused if ROTATION above is set to .false.