diff git a/doc/latex/0_Title.tex b/doc/latex/0_Title.tex
index 114bffd8185125bfa6d896367cf38ffed7817d12..4a8c7c9f6e8d17b35ffd8e1a3b4a334964a180a2 100644
 a/doc/latex/0_Title.tex
+++ b/doc/latex/0_Title.tex
@@ 38,6 +38,8 @@ Tilman Metz
Niklas Thiel
+Florian Wittkamp
+
\end{minipage}
\hfill
\begin{minipage}[h]{0.65\textwidth}
diff git a/doc/latex/5_Getting_Started.tex b/doc/latex/5_Getting_Started.tex
index be031ae2a72fc95ef24b69248ab172b2aacbd0de..c285c9dacb7e3d70e53059c5b00cf98b3e358ddb 100644
 a/doc/latex/5_Getting_Started.tex
+++ b/doc/latex/5_Getting_Started.tex
@@ 27,13 +27,13 @@ This directory contains documentation on the software (this users guide) as well
Contains the model and benchmark files for DENISE.
\textbf{mfiles}\\
Here some Matlab routines (mfiles) are stored. These Matlab programs can be used to find optimal relaxation frequencies to approximate a constant Q (qapprox.m) or to plot Q as a function of frequency for certain relaxation frequencies and value of tau (qplot.m). It is necessary to have the Matlab Optimization Toolbox installed. For further details on the theory behind these algorithms we refer to the Phd thesis of T. Bohlen \cite{bohlen:98} and to the paper in which the socalled taumethod is described \cite{blanch:95}. The parameter tau is used in this software to define the level of attenuation.
+Here some Matlab routines (mfiles) are stored. These Matlab programs can be used to find optimal relaxation frequencies to approximate a constant Q (qapprox.m) or to plot Q as a function of frequency for certain relaxation frequencies and value of tau (qplot.m). It is necessary to have the Matlab Optimization Toolbox installed. For further details we refer to \cite{bohlen:98} and to the paper in which the socalled taumethod is described \cite{blanch:95}.
\textbf{par}\\
Parameter files for DENISE modeling.
\textbf{scripts}\\
Here, you will find examples of scriptfiles used to submit modeling jobs on clustercomputers.
+% \textbf{scripts}\\
+% Here, you will find examples of scriptfiles used to submit modeling jobs on clustercomputers.
\textbf{src}\\
This directory contains the complete source codes. The following programs are available and may be compiled using make $<$program$>$.
@@ 46,11 +46,7 @@ Before compiling the main program DENISE you have to compile the required additi
\textit{make}
\newline
% {\color{blue}{\begin{verbatim}
% bash2.05b$:~/DENISE/par> make
% \end{verbatim}}}

which should install the following libraries:
+which will install the following libraries:
{\color{blue}{\begin{verbatim}
lib cseife
@@ 71,44 +67,33 @@ The source code of DENISE is located in the directory DENISE/src. To compile DEN
# source code for model generation
MODEL_EL = half_space.c
+#MODEL = hh.c
+MODEL = ../genmod/1D_linear_gradient_visc.c
+MODEL_AC = ../genmod/1D_linear_gradient_ac.c
+MODEL_EL = ../genmod/1D_linear_gradient_el.c
+MODEL_VAC = ../genmod/1D_linear_gradient_viscac.c
EXEC= ../bin
+# Description:
+# CC = Compiler
+# LFLAGS = Linker flag
+# CFLAGS = Compiler flag
# Compiler (LAM: CC=hcc, CRAY T3E: CC=cc)

# ON Linux cluster running LAM (options for DENISE)
#CC=hcc
#LFLAGS=lm lmpi lcseife
#CFLAGS=O3
#SFLAGS=L./../libcseife
#IFLAGS=I./../libcseife

# On CRAY T3E
# CC=cc

# On MARWIN
CC=mpicc
LFLAGS=lm lcseife
+# LINUX with OpenMPI / IntelMPI and INTEL Compiler
+# Use icc whenever possible, this will be much faster than gcc
+CC=mpiicc
+LFLAGS=lm lcseife lstfinv laff lfourierxx lfftw3 lstdc++
CFLAGS=O3
SFLAGS=L./../libcseife
IFLAGS=I./../libcseife

# On HLRN system
#CC=mpcc
#LFLAGS=lm
+SFLAGS=L./../contrib/libcseife L./../contrib/bin
+IFLAGS=I./../contrib/libcseife I./../contrib/header I.
# ALTIX
#CC=icc
#CFLAGS=mp O3 ipo
#LFLAGS=lmpi lm istatic
+# LINUX with OpenMPI / IntelMPI and GCC Compiler
+#CC=mpicc
+#LFLAGS=lm lcseife lstfinv laff lfourierxx lfftw3 lstdc++
+#CFLAGS=O3
+#SFLAGS=L./../contrib/libcseife L./../contrib/bin
+#IFLAGS=I./../contrib/libcseife I./../contrib/header I.
# On the workstations in Karlsruhe (GPI)
CC=mpicc
LFLAGS=lm lcseife lstfinv laff lfourierxx lfftw3 lstdc++
CFLAGS=O3
SFLAGS=L./../libcseife L./../contrib/bin
IFLAGS=I./../libcseife I./../contrib/header
# after this line, no further editing should be necessary
# 
@@ 124,89 +109,67 @@ The command to start a simulation on 8 processor with the lowest priority of 19
\textit{mpirun np 8 nice 19 ../bin/denise DENISE.json }
\newline
% {\color{blue}{\begin{verbatim}
% mpirun np 8 nice 19 ../bin/denise DENISE.json
% \end{verbatim}}}

It is often useful to save the standard output of the program for later reference. The screen output may be saved to DENISE.out using
\newline
\textit{mpirun np 8 nice 19 ../bin/denise DENISE.json > DENISE.out}
\newline
% {\color{blue}{\begin{verbatim}
% mpirun np 8 nice 19 ../bin/denise DENISE.json > DENISE.out
% \end{verbatim}}}
+% \newpage
\newpage

After the output of geometry and model parameters the code starts the time stepping and displaying timing information:
+After the output of geometry and model parameters the code starts the time stepping and displaying information:
{\color{blue}{\begin{verbatim}
===============================================================================

 MYID=0 ***** Starting simulation (forward model) for shot 1 of 1 **********

===============================================================================

 Number of samples (nts) in source file: 3462
 Number of samples (nts) in source file: 3462
 Message from function wavelet written by PE 0
 1 source positions located in subdomain of PE 0
 have been assigned with a source signal.

 PE 0 outputs source time function in SU format to start/source_signal.0.su.shot1

 Computing timestep 1000 of 3462

 **Message from update_v (printed by PE 0):
 Updating particle velocities ... finished (real time: 0.00 s).
 particle velocity exchange between PEs ... finished (real time: 0.00 s).

 **Message from update_s (printed by PE 0):
 Updating stress components ... finished (real time: 0.00 s).
 stress exchange between PEs ... finished (real time: 0.00 s).
 total real time for timestep 1000 : 0.01 s.

 Computing timestep 2000 of 3462

 **Message from update_v (printed by PE 0):
 Updating particle velocities ... finished (real time: 0.00 s).
 particle velocity exchange between PEs ... finished (real time: 0.00 s).

 **Message from update_s (printed by PE 0):
 Updating stress components ... finished (real time: 0.00 s).
 stress exchange between PEs ... finished (real time: 0.00 s).
 total real time for timestep 2000 : 0.01 s.

 Computing timestep 3000 of 3462

 **Message from update_v (printed by PE 0):
 Updating particle velocities ... finished (real time: 0.00 s).
 particle velocity exchange between PEs ... finished (real time: 0.00 s).

 **Message from update_s (printed by PE 0):
 Updating stress components ... finished (real time: 0.00 s).
 stress exchange between PEs ... finished (real time: 0.00 s).
 total real time for timestep 3000 : 0.01 s.
 PE 0 is writing 11 seismograms (vx) to
 su/DENISE_US_x.su.shot1.it1
 PE 0 is writing 11 seismograms (vy) to
 su/DENISE_US_y.su.shot1.it1

 **Info from main (written by PE 0):
 CPU time of program per PE: 17 seconds.
 Total real time of program: 18.08 seconds.
 Average times for
 velocity update: 0.003 seconds
 stress update: 0.002 seconds
 velocity exchange: 0.000 seconds
 stress exchange: 0.000 seconds
 timestep: 0.005 seconds
+==============================================================================
+
+ MYID=0 * Starting simulation (forward model) for shot 1 of 5. Iteration 1 **
+
+==============================================================================
+
+ ****************************************
+ ****************************************
+
+==============================================================================
+
+ MYID=0 * Starting simulation (forward model) for shot 2 of 5. Iteration 1 **
+
+==============================================================================
+
+ ****************************************
+ ****************************************
+
+==============================================================================
+
+ MYID=0 * Starting simulation (forward model) for shot 3 of 5. Iteration 1 **
+
+==============================================================================
+
+ ****************************************
+ ****************************************
+
+==============================================================================
+
+ MYID=0 * Starting simulation (forward model) for shot 4 of 5. Iteration 1 **
+
+==============================================================================
+
+ ****************************************
+ ****************************************
+
+==============================================================================
+
+ MYID=0 * Starting simulation (forward model) for shot 5 of 5. Iteration 1 **
+
+==============================================================================
+
+ ****************************************
+ ****************************************
+
+ Forward calculation finished.
\end{verbatim}}}
\section{Postprocessing}
The wavefield snapshots can be merged using the program \textit{snapmerge}. The program snapmerge is not a MPI program. Therefore, it can be executed without MPI and the mpirun command. You can run snapmerge on any PC since a MPI environment (e.g. LAM) is not required. You may therefore copy the snapshot outputs of the different nodes to another nonMPI computer to merge the files together. \textit{snapmerge} reads the required information from the DENISE parameter file. Simply type
+The wavefield snapshots can be merged using the program \textit{snapmerge}. The program snapmerge is not a MPI program. Therefore, it can be executed without MPI and the mpirun command. You can run snapmerge on any PC since a MPI environment is not required. You may therefore copy the snapshot outputs of the different nodes to another nonMPI computer to merge the files together. \textit{snapmerge} reads the required information from the DENISE parameter file. Simply type
\newline
\textit{../bin/snapmerge DENISE.json }
diff git a/doc/latex/6_Parameter_Definition.tex b/doc/latex/6_Parameter_Definition.tex
index a986a9794116ee7b549ae410f20e5727cd6e1d61..c03101256d6232ee63870c660181f0116de69eae 100644
 a/doc/latex/6_Parameter_Definition.tex
+++ b/doc/latex/6_Parameter_Definition.tex
@@ 21,12 +21,6 @@ Sometimes possible values are described in comments, feel free to add own commen
If you use the json input file some default values for the forward modeling and the inversion are set. The default values are written in the following subsections in red. The input file \texttt{DENISE\_FW\_all\_parameters.json} in the directory \texttt{par/in\_and\_out} is an input file for a forward modeling containing all parameters that can be defined. Analog to that the input file \texttt{DENISE\_INV\_all\_parameters.json} is an example for an inversion input file. The input files \texttt{DENISE\_FW.json} and \texttt{DENISE\_INV.json} contain only the parameters that must be set by the user.\\
% In the beginning of the code development DENISE used a different kind of input parameter file. The current version is still able to read this old input file. The old parameter file was organized as follows:
% {\color{blue}{\begin{verbatim}
% description_of_parameter_(VARNAME)_(switches) = parameter value
% \end{verbatim}}}
% where VARNAME denotes the name of the global variable in which the value is saved in all functions of the program. The possible values are described in switches. A comment line is indicated by a \# on the very first position of a line. You can find an example of such a parameter file in \texttt{par/in\_and\_out/DENISE\_HESSIAN.inp}. You can switch between the two possible input files via the file extension. Use ``.inp'' as file extension to read in the old input file or the file extension ``.json'' to use the new input file.

\section{Domain decomposition}
{\color{blue}{\begin{verbatim}
@@ 349,10 +343,8 @@ Default values are:
NDT=1
\end{verbatim}}}
If SEISMO$>$0 seismograms recorded at the receiver positions are written to the corresponding output files. The sampling rate of the seismograms is NDT*DT seconds. In case of a small
time step interval and a high number of time steps, it might be useful to choose a high NDT in order to avoid a unnecessary detailed sampling of the seismograms and consequently large files of seismogram data. Possible output formats of the seismograms are SU, ASCII and BINARY. It is recommended to use SU format for saving the seismograms. The main advantage of this format is that the time step interval (NDT*DT) and the acquisition geometry (shot and receiver locations) are stored in the corresponding SU header words. Also additional header words like offset are set by DENISE. This format thus facilitates a further visualization and processing of the synthetic seismograms. Note, however, that SU cannot handle sampling rates smaller than 1.0e6 seconds and the number of samples is limited to about 32.000. In such cases, you should increase the sampling rate by increasing NDT. If this is impossible (for example because the Nyquist criterion is violated) you must choose a different output format (ASCII or binary).
+If SEISMO$>$0 seismograms recorded at the receiver positions are written to the corresponding output files. The sampling rate of the seismograms is NDT*DT seconds. In case of a small time step interval and a high number of time steps, it might be useful to choose a high NDT in order to avoid a unnecessary detailed sampling of the seismograms and consequently large files of seismogram data. Possible output formats of the seismograms are SU, ASCII and BINARY. It is recommended to use SU format for saving the seismograms. The main advantage of this format is that the time step interval (NDT*DT) and the acquisition geometry (shot and receiver locations) are stored in the corresponding SU header words. Also additional header words like offset are set by DENISE. This format thus facilitates a further visualization and processing of the synthetic seismograms. Note, however, that SU cannot handle sampling rates smaller than 1.0e6 seconds and the number of samples is limited to about 32.000. In such cases, you should increase the sampling rate by increasing NDT. If this is impossible (for example because the Nyquist criterion is violated) you must choose a different output format (ASCII or binary).
\newpage
\section{Qapproximation}
\label{q_approx}
@@ 371,19 +363,11 @@ Default values are:
L=0
\end{verbatim}}}
These lines may be used to define an overall level of intrinsic (viscoelastic) attenuation of seismic waves. In case of L=0, a purely elastic simulation is performed
(no absorption). The frequency dependence of the (intrinsic) Quality factor $Q(\omega)$ is defined by
the L relaxation
frequencies (FL=$f_l=2\pi/\tau_{\sigma l}$) and one value $\tau$ (see equation 5 in \cite{bohlen:02}). For a single relaxation mechanism (L=1) $Q \approx 2/\tau$ \citep{bohlen:98,blanch:95,bohlen:02}. If the model is generated ''on the fly'' the value
of TAU can be assigned to all gridpoints for both P and Swaves. Thus, intrinsic attenuation is homogeneous and equal for P and Swaves ($Q_p(\omega)=Q_s(\omega)$). However, it is possible to simulate any spatial distribution of
absorption by assigning the gridpoints with different Qvalues by reading external grid files for $Q_p$ (Pwaves) and $Q_s$ (Swaves) (see \texttt{src/readmod.c}) or by generating these files ''on the fly'' (see section \ref{gen_of_mod} or exemplary model function \texttt{genmod/1D\_linear\_gradient\_visc.c}).
+These lines may be used to define an overall level of intrinsic (viscoelastic) attenuation of seismic waves. In case of L=0, a purely elastic simulation is performed (no absorption). The frequency dependence of the (intrinsic) Quality factor $Q(\omega)$ is defined by the L relaxation frequencies (FL=$f_l=2\pi/\tau_{\sigma l}$) and one value $\tau$ (see equation 5 in \cite{bohlen:02}). For a single relaxation mechanism (L=1) $Q \approx 2/\tau$ \citep{bohlen:98,blanch:95,bohlen:02}. If the model is generated ''on the fly'' the value of TAU can be assigned to all gridpoints for both P and Swaves. Thus, intrinsic attenuation is homogeneous and equal for P and Swaves ($Q_p(\omega)=Q_s(\omega)$). However, it is possible to simulate any spatial distribution of absorption by assigning the gridpoints with different Qvalues by reading external grid files for $Q_p$ (Pwaves) and $Q_s$ (Swaves) (see \texttt{src/readmod.c}) or by generating these files ''on the fly'' (see section \ref{gen_of_mod} or exemplary model function \texttt{genmod/1D\_linear\_gradient\_visc.c}).
Small $Q$ values ($Q<50$) may lead to significant amplitude decay and velocity dispersion. Please note, that due to dispersive media properties the viscoelastic velocity model is defined for the reference frequency only. In DENISE, this
reference frequency is specified as the center source frequency. With F\_REF one can set the reference frequency manually. At the exact reference frequency, elastic and viscoelastic models are equivalent. As a consequence, slightly smaller and larger minimum and maximum velocity values
occure in the viscoelastic model.
+Small $Q$ values ($Q<50$) may lead to significant amplitude decay and velocity dispersion. Please note, that due to dispersive media properties the viscoelastic velocity model is defined for the reference frequency only. In DENISE, this reference frequency is specified as the center source frequency. With F\_REF one can set the reference frequency manually. At the exact reference frequency, elastic and viscoelastic models are equivalent. As a consequence, slightly smaller and larger minimum and maximum velocity values occure in the viscoelastic model.
The frequency dependence of attenuation, i.e. $Q$ and phase velocity as a function of frequency, may be calculated using the Matlab functions in the
directory mfiles.
+The frequency dependence of attenuation, i.e. $Q$ and phase velocity as a function of frequency, may be calculated using the Matlab functions in the directory mfiles. The Matlab script /mfiles/qplot.m can be used to plot $Q(\omega)$ for different values of L, $f_l$ and $\tau$. The mfile qapprox.m in the same directory finds optimal values for L, $f_l$ and $\tau$ that fit a desired function $Q(\omega)=const$ in a leastsquares sense.
\section{Wavefield snapshots}
@@ 440,7 +424,6 @@ If TIME\_FILT is set to one the log file L2\_LOG.dat contains a 9th column with
"DATA_DIR" : "su/measured_data/DENISE_real",
"INVMAT1" : "1",
"INVMAT" : "0",
% "INVTYPE" : "2",
"QUELLTYPB" : "1",
"MISFIT_LOG_FILE" : "L2_LOG.dat",
"VELOCITY" : "0",
@@ 449,30 +432,24 @@ If TIME\_FILT is set to one the log file L2\_LOG.dat contains a 9th column with
"INV_RHO_ITER" : "0",
"INV_VP_ITER" : "0",
"INV_VS_ITER" : "0",
%
% "Cosine taper" : "comment",
% "TAPER" : "0",
% "TAPERLENGTH" : "10",
\end{verbatim}}}
{\color{red}{\begin{verbatim}
Default values are:
% INVTYPE=2
MISFIT_LOG_FILE=L2_LOG.dat
VELOCITY=0
INV_RHO_ITER=0
INV_VP_ITER=0
INV_VS_ITER=0
% TAPER=0
% TAPERLENGTH=10
\end{verbatim}}}
This section covers some general inversion parameters. The maximum number of iterations is defined by ITERMAX. The switch INVMAT controls if only the forward modeling code should be used (INVMAT=10), e.\,g. to calculate synthetic seismograms or a complete FWT run (INVMAT=0). The seismic sections of the real data need to be located in DATA\_DIR and should have the ending \_x.su.shot for the xcomponent and so on. As noted in section \ref{model parametrizations} the gradients can be expressed for different model parameterizations. The switch INVMAT1 defines which parameterization should be used, seismic velocities and density (Vp,Vs,rho, INVMAT1=1), seismic impedances (Zp,Zs,rho, INVMAT1=2) or Lam$\rm{\acute{e}}$ parameters ($\rm{\lambda,\mu,\rho}$, INVMAT1=3). If models are read from binary files appropriate file extensions are required for the different models (see section \ref{gen_of_mod}). Depending on the data different components of the seismic sections can be backpropagated. For two component data (x and ycomponent) set QUELLTYPB=1, only the ycomponent (QUELLTYPB=2) and only the xcomponent (QUELLTYPB=3). For acoustic modelling pressure seismograms and QUELLTYPB=4 have to be used.\\
During the inversion the misfit values are saved in a log file specified in MISFIT\_LOG\_FILE. The log file consists of eight or nine columns and each line corresponds to one iteration step. The used step length is written in the first column. In the second to fourth column the three test step lengths used for the step length estimation are saved. The corresponding misfit values for these test step lengthes and the test shots are written to column five to seven. Column eight corresponds to the total misfit for all shots and if you use freqeuncy filtering then the ninth column corresponds to the corner frequency of the lowpass filter used in the inversion step.\\
In general DENISE tries to minimize the misfit in the particle displacement between the observed data and the synthetic data. If you set the switch VELOCITY to 1 the misfit in the particle velocity seismograms is minimized.\\
The parameters INV\_RHO\_ITER, INV\_VP\_ITER and INV\_VS\_ITER define from which inversion step on an inversion for density, Vp and Vs, respectively, is applied. To invert for one parameter from the beginning of an inversion set it to 0 or 1.\\
+This section covers some general inversion parameters. The maximum number of iterations is defined by ITERMAX. The switch INVMAT controls if only the forward modeling code should be used (INVMAT=10), e.\,g. to calculate synthetic seismograms or a complete FWT run (INVMAT=0). The seismic sections of the real data need to be located in DATA\_DIR and should have the ending \_x.su.shot for the xcomponent and so on. As noted in section \ref{model parametrizations} the gradients can be expressed for different model parameterizations. The switch INVMAT1 defines which parameterization should be used, seismic velocities and density (Vp,Vs,rho, INVMAT1=1), seismic impedances (Zp,Zs,rho, INVMAT1=2) or Lam$\rm{\acute{e}}$ parameters ($\rm{\lambda,\mu,\rho}$, INVMAT1=3). If models are read from binary files appropriate file extensions are required for the different models (see section \ref{gen_of_mod}). Depending on the data different components of the seismic sections can be backpropagated. For two component data (x and ycomponent) set QUELLTYPB=1, only the ycomponent (QUELLTYPB=2) and only the xcomponent (QUELLTYPB=3). For acoustic modelling pressure seismograms and QUELLTYPB=4 have to be used.
+
+During the inversion the misfit values are saved in a log file specified in MISFIT\_LOG\_FILE. The log file consists of eight or nine columns and each line corresponds to one iteration step. The used step length is written in the first column. In the second to fourth column the three test step lengths used for the step length estimation are saved. The corresponding misfit values for these test step lengthes and the test shots are written to column five to seven. Column eight corresponds to the total misfit for all shots and if you use frequency filtering then the ninth column corresponds to the corner frequency of the lowpass filter used in the inversion step.
% The parameters TAPER, TAPERLENGTH and INVTYPE are debug parameters and should not be changed.
+In general DENISE tries to minimize the misfit in the particle displacement between the observed data and the synthetic data. If you set the switch VELOCITY to 1 the misfit in the particle velocity seismograms is minimized.
+
+The parameters INV\_RHO\_ITER, INV\_VP\_ITER and INV\_VS\_ITER define from which inversion step on an inversion for density, Vp and Vs, respectively, is applied. To invert for one parameter from the beginning of an inversion set it to 0 or 1.
\section{Output of inversion results}
@@ 503,11 +480,14 @@ Default values are:
\end{verbatim}}}
With the use of a workflow file, you can define different FWI stages. For instance, one FWI stage could refer to one corner frequency of a lowpass filter or to a specific time window. Every line in the workflow file corresponds to one FWI stage. To use a workflow file the switch USE\_WORKFLOW have to be set to 1. The algorithm will automatically change to the next line of the workflow file if the abort criterium of the current line is reached or if no step length could be found which reduces the misfit. The structure of the variables inside the workflow file is as follow: \\
+\noindent
\begin{tabular}{llllllllllll}
\# & INV\_VS & INV\_VP & INV\_RHO & PRO & TIME\_FILT & FC & WAVETYPE & 0 & 0 & EPRECOND & EPSILON\_WE\\
\end{tabular}
\\ \\
The first column is the number of the line. With INV\_* etc. you can activate the inversion for VS, VP or RHO, respectively. The abort criterium in percent for this FWI stage will be the declared in the variable PRO. With TIME\_FILT you can activate the frequency filtering with the corner frequency FC. WAVETYPE can be used to switch between PSV and SH modeling, however it is recommended to use PSV modeling (WAVETYPE==1) due to the current development on SH modeling. The following to zeros are placeholders for an upcoming update. With EPRECOND and EPSILON\_WE you can control the approx. Hessian. Please note, that all features which are used eg. TIME\_FILT within the workflow have to be activated in the .JSON file.\\
+
+\ \\
+The first column is the number of the line. With INV\_* etc. you can activate the inversion for VS, VP or RHO, respectively. The abort criterium in percent for this FWI stage will be the declared in the variable PRO. With TIME\_FILT you can activate the frequency filtering with the corner frequency FC. WAVETYPE can be used to switch between PSV and SH modeling, however it is recommended to use PSV modeling (WAVETYPE==1) due to the current development on SH modeling. The following to zeros are placeholders for an upcoming update. With EPRECOND and EPSILON\_WE you can control the approx. Hessian. Please note, that all features which are used eg. TIME\_FILT (see section \ref{sec:filtering}) within the workflow have to be activated in the .JSON file.
+
For an example of a workflow file, have a look in the par/ folder.
@@ 553,22 +533,21 @@ The preconditioned conjugate gradient (PCG), which can be used by GRAD\_METHOD==
The LBFGS method is a quasinewton method, which will implicitly approximate the Hessian and can be chosen by setting GRAD\_METHOD to 2. Therefore the differences of the models and the gradients of a N\_LBFGS last iterations will be stored. First tests showed that N\_LBFGS=10 should be enough. The LBFGS algorithm is implemented after the book \cite{nocedal:1999}, which is recommended to be read prior to using the LBFGS method. Prior to starting the LBFGS method one update with steepest decent have to be done to get a first set of model and gradient differences which are necessary to start LBFGS.
The ensure a stable convergence when LBFGS is used the step length has to satisfy the wolf condition. A detailed description of the wolfe condition can be also found in \cite{nocedal:1999}. The first wolfe condition, which is also called sufficient decrease condition, requires that the misfit for a chosen step length will be decreased at least linear with the step length. Let $f(x)$ be the objective function, $x$ the current model, $\alpha$ the step length and $p$ the desired model update, so the first wolfe condition reads \citep{nocedal:1999}:
+To ensure a stable convergence when LBFGS is used the step length has to satisfy the wolf condition. A detailed description of the wolfe condition can be also found in \cite{nocedal:1999}. The first wolfe condition, which is also called sufficient decrease condition, requires that the misfit for a chosen step length will be decreased at least linear with the step length. Let $f(x)$ be the objective function, $x$ the current model, $\alpha$ the step length and $p$ the desired model update, so the first wolfe condition reads \citep{nocedal:1999}:
\begin{align}
f(x+\alpha \cdot p) \le f(x) + c_1 \cdot \alpha \cdot\bigtriangledown f(x)^{T} \cdot p
\end{align}
 The second one, which is called curvature condition, requires that the slope of the misfit function will be higher than the initial slope. This ensures that the next misfit minimum will be reached fast enough.
 The second criterium is defined as \citep{nocedal:1999}:
 \begin{align}
 \bigtriangledown f(x+\alpha \cdot p)^{T} \cdot p \geq c_2 \cdot\bigtriangledown f(x)^{T} \cdot p
 \end{align}
 To verify this condition the full gradient (with all shots!) of the updated model have to be calculated, so the verification of this condition is very expensive in terms of computation time. In theory always a step length of one should be tried first, however to save time it is possible to chose the step length from the iteration before which satisfied the wolfe condition. This behavior can be controlled with WOLFE\_TRY\_OLD\_STEPLENGTH, if this is set to 1 always the old step length will be tried first. In practice the experience has shown that at the beginning of a FWI stage a couple of step lengths will be tested and then the algorithm converges to a step length which will be accepted by the wolfe condition until the misfit cannot be reduced further. With the variable WOLFE\_NUM\_TEST a maximum number of test calculations can be defined. If after WOLFE\_NUM\_TEST tests no step length could be found, the algorithm takes the step length of the tests which reduced the misfit most, even if this step length does not satisfy the wolfe condition. If no step length was found which reduces the misfit, the algorithm will switch to the next FWI stage in the workflow file or abort the inversion.
 The variable $c_1$ will be $10^{7}\cdot max(f(x)^{1}$ and $c_2$ is set to 0.9. With WOLFE\_C1\_SL and WOLFE\_C2\_SL it is possible to manipulate $c_1$ and $c_2$ with the JSON input file. If the parameters are not set, they will be set to default values, so in most cases it should be the best to do not specify theses parameters in the JSON file.
 A step length search which is based on the wolfe condition can be used which the switch WOLFE\_CONDITION. Please note that this step length search is only available if GRAD\_METHOD==2.


 However, it is possible to chose a second step length search which the LBFGS method. This one is available by the switch LBFGS\_STEP\_LENGTH=0. If you use LBFGS\_STEP\_LENGTH the wolfe condition based step length search will be deactivated automatically. Whit this search the step length 0.1, 0.5 and 1.0 will be tried and with a parabolic fit the best step length will be estimated. This search algorithm is implemented to save computation time, however the wolfe condition will not be checked, so the LBFGS stability will not be satisfied. It is highly recommended to use the step length search based on the wolfe condition. It is likely that this option will be removed in a future releas.
+The second one, which is called curvature condition, requires that the slope of the misfit function will be higher than the initial slope. This ensures that the next misfit minimum will be reached fast enough.
+The second criterium is defined as \citep{nocedal:1999}:
+\begin{align}
+ \bigtriangledown f(x+\alpha \cdot p)^{T} \cdot p \geq c_2 \cdot\bigtriangledown f(x)^{T} \cdot p
+\end{align}
+To verify this condition the full gradient (with all shots!) of the updated model have to be calculated, so the verification of this condition is very expensive in terms of computation time. In theory always a step length of one should be tried first, however to save time it is possible to chose the step length from the iteration before which satisfied the wolfe condition. This behavior can be controlled with WOLFE\_TRY\_OLD\_STEPLENGTH, if this is set to 1 always the old step length will be tried first. In practice the experience has shown that at the beginning of a FWI stage a couple of step lengths will be tested and then the algorithm converges to a step length which will be accepted by the wolfe condition until the misfit cannot be reduced further. With the variable WOLFE\_NUM\_TEST a maximum number of test calculations can be defined. If after WOLFE\_NUM\_TEST tests no step length could be found, the algorithm takes the step length of the tests which reduced the misfit most, even if this step length does not satisfy the wolfe condition. If no step length was found which reduces the misfit, the algorithm will switch to the next FWI stage in the workflow file or abort the inversion.
+The variable $c_1$ will be $10^{7}\cdot \max(f(x)^{1}$ and $c_2$ is set to 0.9. With WOLFE\_C1\_SL and WOLFE\_C2\_SL it is possible to manipulate $c_1$ and $c_2$ with the JSON input file. If the parameters are not set, they will be set to default values, so in most cases it should be the best to do not specify theses parameters in the JSON file.
+A step length search which is based on the wolfe condition can be used which the switch WOLFE\_CONDITION. Please note that this step length search is only available if GRAD\_METHOD==2.
+
+However, it is possible to chose a second step length search which the LBFGS method. This one is available by the switch LBFGS\_STEP\_LENGTH=0. If you use LBFGS\_STEP\_LENGTH the wolfe condition based step length search will be deactivated automatically. Whit this search the step length 0.1, 0.5 and 1.0 will be tried and with a parabolic fit the best step length will be estimated. This search algorithm is implemented to save computation time, however the wolfe condition will not be checked, so the LBFGS stability will not be satisfied. It is highly recommended to use the step length search based on the wolfe condition. It is likely that this option will be removed in a future releas.
\section{Misfit definition}
@@ 626,8 +605,7 @@ EPS\_SCALE += EPS\_SCALE/SCALEFAC.\\
If a corresponding value epst(3) can be found after STEPMAX forward modellings, DENISE can fit a parabola through the 3 points (L2t(i),epst(i)) and estimates an optimum step length at the minimum of the parabola. If the L2value L2t(3) after STEPMAX forward models is still smaller than L2t(2) the optimum steplength estimated by parabolic fitting will be not larger than epst(3).\\
Please note: This step length search is only available wenn PCG method (GRAD\_METHOD==1) is used and will be automatically selected.
\newpage
+Please note: This step length search is only available wenn PCG method (GRAD\_METHOD==1) is used and will be automatically selected.
\section{Abort criterion}
@@ 640,8 +618,88 @@ Additionally to the parameter ITERMAX a second abort criterion is implemented in
the current iteration step and the misfit of the second to last iteration step is calculated with regard to the misfit of the second to last iteration step. If this relative change is
smaller than PRO the inversion aborts or in case of using frequency filtering (TIME\_FILT==1) it increases the corner frequency of the low pass filter and therefore switches to next higher bandwidth.
+\newpage
+
+\section{Source wavelet inversion}
+To remove the contribution of the unknown source time function (STF) from the waveform residuals, it is necessary to design a filter which minimizes the misfit to the field recordings and raw synthetics. Therefore, a second forward simulation is applied. The first one is done with the wavelet specified in QUELLART and the second one with the optimized source wavelet saved in SIGNAL\_FILE (see Section~\ref{sec:sources}). This optimized source wavelet is kept constant within N\_STF or within a frequency range (see below).\\
+
+{\color{blue}{\begin{verbatim}
+"Definition of inversion for source time function" : "comment",
+ "INV_STF" : "0",
+ "PARA" : "fdlsq:exp=1.0",
+ "N_STF" : "10",
+ "N_STF_START" : "1",
+ "TAPER_STF" : "0",
+ "TRKILL_STF" : "0",
+ "TRKILL_FILE_STF" : "./trace_kill/trace_kill.dat",
+\end{verbatim}}}
+
+{\color{red}{\begin{verbatim}
+Default values are:
+ INV_STF=0
+\end{verbatim}}}
+
+INV\_STF should be switched to 1 if you want to invert for the source time function.
+\newline
+
+An example for the parameter string provided in PARA is:
+\begin{itemize}
+ \item To select frequency domain least squares (fdlsq), apply offset dependent weights and a waterlevel use\\
+ \textit{fdlsq:exp=1.0:waterlevel=0.01}
+\end{itemize}
+
+In most cases the frequency domain least squares engine is the best approach to find a suitable wavelet. There are also other possibilities, if you want to use those a detailed look at the libraries and the acompanying documentation provided in the folder /contrib is recommended. Here only the two main parameters for the fdlsq approach are described.
+
+Due to the amplitude decay with offset to the source signals from receivers at larger offset would contribute less to the optimization criterion for which the source wavelet correction filter is constructed. The option exp=k provides means to add a weight factor $(r/1\text{m})^k$ to each signal, where r is the receiver to source offset. This is used to compensate the decrease in signal amplitude.
+
+The procedure is implemented in the Fourier domain. Fourier coefficients for the correction filter are constructed such that the residual between Fourier coefficients of recorded signals and Fourier coefficients of synthetic signals after application of the correction filter are minimized in a leastsquares sense. The leastsquares solution is subject to a damping constraint. If the weighted power spectral density of the synthetics at a given frequency drops below the lth fraction (waterlevel parameter) of the average (over all frequencies) power spectral density, the filter coefficients are damped (artificially made smaller) by the damping constraint (i.e. waterlevel as seen from the perspective of deconvolution).
+
+Start with l=0.01 as a reasonable initial value for the waterlevel. The best fitting waterlevel must be searched by trial and error and depends of signal noise and on how well the synthetics describe the actually observed wave propagtion.
+
+The theory behind the Fourier domain least squares procedure is outlined by Lisa Groos (2013, Appendix F, page 146). She also describes a way to find an approrpiate waterlevel by application of the Lcurve criterion (Groos, 2013, Appendix G, page 148).
+\newline
+
+N\_STF is the increment between the iteration steps. N\_STF\_START defines at which iterationstep the inversion for STF should start. This parameter has to be set at least to 1 NOT(!) 0. When using TAPER\_STF = 1, the source signal is tapered. See \texttt{src/taper.c} for the taper definition. With TRKILL\_STF = 1 it is possible to apply a trace killing for the estimation of the source wavelet correction filter.
+\newline
+
+Please note: If you additionally switch on frequency filtering during the inversion (TIME\_FILT=1 or TIME\_FILT=2), the parameters N\_STF and N\_STF\_START will be ignored. But the optimal source time function will be inverted for the first iteration and after every change of the frequency range.
+
+For more information see chapter \ref{cha:STFInversion}.
+
+
+\section{Frequency filtering}
+\label{sec:filtering}
+{\color{blue}{\begin{verbatim}
+"Frequency filtering during inversion" : "comment",
+ "TIME_FILT" : "0",
+ "F_HP" : "1",
+ "FC_START" : "10.0",
+ "FC_END" : "75.0",
+ "FC_INCR" : "10.0",
+ "ORDER" : "2",
+ "ZERO_PHASE" : "0",
+ "FREQ_FILE" : "frequencies.dat",
+ "WRITE_FILTERED_DATA" : "0",
+\end{verbatim}}}
+
+{\color{red}{\begin{verbatim}
+Default values are:
+ TIME_FILT=0
+ ZERO_PHASE=0
+\end{verbatim}}}
+
+TIME\_FILT = 1 can be set to use frequency filtering. The parameter FC\_START defines the corner frequency of the Butterworth low pass filter at the beginning of the inversion. The parameter FC\_END defines the maximum corner frequency used in the inversion. The parameter FC\_INCR controls in which steps the bandwidth is increased during the inversion.
+
+If TIME\_FILT = 2 individual frequencies for each step can be read from FREQ\_FILE. In this file the first entry must be the number of frequencies used for filtering. Each frequency in Hz has to be specified in a row. The example file frequencies.dat can be found in \texttt{trunk/par}.
+
+The parameter ORDER defines the order of the Butterworth low pass filter. If the variable ZERO\_PHASE is set to one a zero phase filter is applied. It is realized by filtering the the traces in both forward and reverse direction with the defined Butterworth low pass filter. Therefore, the effective order of the low pass filter is doubled.
+
+With F\_HP an additional high pass filter can be applied, where F\_HP is the corner frequency in Hz.
+
+With the parameter PRO (see~\ref{json:abort_criterion}) one has to adjust the criterion that defines at which points the bandwidth of the signals are increased.
+
+With the parameter WRITE\_FILTERED\_DATA it is possible to write the time filtered measured data to disk which are filtered with the same filter as the synthetic data. Therefore this output can be used to visualize the residuals between synthetic and measured data.
\section{Minimum number of iteration per frequency}
{\color{blue}{\begin{verbatim}
"Minimum number of iteration per frequency" : "comment",
"MIN_ITER" : "10",
@@ 654,7 +712,43 @@ MIN_ITER=0
If you are using frequeny filtering (TIME\_FILT==1) during the inversion, you can set a minimum number of iterations per frequency. Within this minimum number of iteration per frequency the abort criterion PRO will receive no consideration.
\section{Definition of the gradient taper geometry}
+\section{Data manipulation}
+\subsection{Time windowing}
+{\color{blue}{\begin{verbatim}
+"Time windowing and damping" : "comment",
+ "TIMEWIN" : "0",
+ "TW_IND" : "0",
+ "PICKS_FILE" : "./picked_times/picks"
+ "TWLENGTH_PLUS" : "0.01",
+ "TWLENGTH_MINUS" : "0.01",
+ "GAMMA" : "100000",
+\end{verbatim}}}
+
+{\color{red}{\begin{verbatim}
+Default values are:
+ TIMEWIN=0
+\end{verbatim}}}
+
+To apply time windowing in a time series the paramter TIMEWIN must set to 1. A automatic picker routine is not integrated at the moment. The point in time (picked time) for each source must be specified in seperate files. The folder and file name can be set with the parameter PICKS\_FILE. The files must be named like this PICKS\_FILE\_.dat. So the number of sources in (SRCREC) must be equal to the number of files. Each file must contain the picked times for every receiver.\
+The parameters TWLENGTH\_PLUS and TWLENGTH\_MINUS specify the length of the time window after (PLUS) and before (MINUS) the picked time. The unit is seconds. The damping factor GAMMA must be set individually. When usig TW\_IND = 1 three columns are expected in PICK\_FILE for each trace with the picked time, time window length plus and time window length minus. In this case TWLENGTH\_PLUS and TWLENGTH\_MINUS are ignored.
+
+\subsection{Trace killing}
+{\color{blue}{\begin{verbatim}
+"Trace killing" : "comment",
+ "TRKILL" : "0",
+ "TRKILL_FILE" : "./trace_kill/trace_kill.dat",
+\end{verbatim}}}
+
+{\color{red}{\begin{verbatim}
+Default values are:
+ TRKILL=0
+\end{verbatim}}}
+
+If you don't want to use single traces or a subset of your data, the parameter TRKILL can be used. If it is set to 1, trace killing is in use. The necessary file can be selected with the parameter TRKILL\_FILE. The file should include a kill table, where the number of rows is the number of receivers and the number of columns reflects the number of sources. Now, a 1 (ONE) means, YES, please kill the trace. The trace is NOT used, it should be killed. A 0 (ZERO) means, NO, this trace should NOT be killed.
+
+
+\section{Gradient manipulation}
+\subsection{Definition of a gradient taper}
\label{sec:Definition_of_gradient_taper_geometry}
{\color{blue}{\begin{verbatim}
"Definition of gradient taper geometry" : "comment",
@@ 689,8 +783,7 @@ To apply an externally defined taper on the gradients in DENISE, the parameter S
It is also possible to apply externally defined taper files to the gradients of the single shots before summation of these gradients. This can be done by setting SWS\_TAPER\_FILE\_PER\_SHOT to one. DENISE expects the tapers in TAPER\_FILE\_NAME.shot for the vp gradients, in TAPER\_FILE\_NAME\_U.shot for the vs gradients and in TAPER\_FILE\_NAME\_RHO.shot for the density gradients.

\section{Definition of spatial filtering of the gradients}
+\subsection{Spatial filtering of the gradients}
{\color{blue}{\begin{verbatim}
"Definition of smoothing (spatial filtering) of the gradients" : "comment",
"SPATFILTER" : "0",
@@ 706,8 +799,7 @@ Default values are:
One can apply a spatial Gaussian filter to the gradients that acts in horizontal direction (SPATFILTER=1). Have a look at the function \texttt{spat\_filt.c} for more information.

\section{Smoothing the gradients}
+\subsection{Smoothing the gradients}
{\color{blue}{\begin{verbatim}
"Definition of smoothing the gradients with a 2DGaussian filter" : "comment",
"GRAD_FILTER" : "0",
@@ 730,7 +822,8 @@ For GRAD\_FILT\_WAVELENGTH = 1 (and TIME\_FILT=1) a new wavelength dependent fil
where FC is the corner frequency of TIME\_FILT and A is a weighting factor.
\section{Limits for the model parameters}
+\section{Model manipulation}
+\subsection{Limits for the model parameters}
{\color{blue}{\begin{verbatim}
"Upper and lower limits for model parameters" : "comment",
"VPUPPERLIM" : "3500",
@@ 753,8 +846,7 @@ RHOLOWERLIM=0.0
The six limits for the model parameters specify the minimum and maximum values which may be achieved by the inversion. Here, known a priori information can be used. Depending on the choice of the parameter INVMAT1, either vp and vs or lambda and mu is meant.

\section{Limited update of model parameters}
+\subsection{Limited update of model parameters}
{\color{blue}{\begin{verbatim}
"Limited model update in reference to the starting model" : "comment",
"S" : "0",
@@ 768,8 +860,7 @@ Default values are:
\end{verbatim}}}
For S=1 individual limited updates of the elastic model parameters are considered. S\_VS, S\_VP and S\_RHO are the maximum updates of the model parameters at each grid point (given in \%) in reference to the model parameters of their starting models for Swave and Pwave velocity as well as density, respectively. For example S\_VS = 30.0 means that the model parameter update of the Swave velocity model is limited to 30\,\% at each grid point in reference to the starting model of the Swave velocity model. Zero (S\_VS=0.0, S\_VP=0.0 or S\_RHO=0.0) means that the model parameter won't be updated at all. S\_VS=100.0, S\_VP=100.0 or S\_RHO=100.0 equal to S=0.

\section{Vp/Vs ratio}
+\subsection{Vp/Vs ratio}
{\color{blue}{\begin{verbatim}
"Minimum Vp/Vsratio" : "comment",
"VP_VS_RATIO" : "1.5",
@@ 780,69 +871,7 @@ Default values are:
\end{verbatim}}}
With VP\_VS\_RATIO = 1.5 (e.g.) it is possible to ensure a minimum Vp/Vs ratio during the inversion. This is done by checking the Vp/Vs ratio at every grid point after every model update and if it is less than (e.g.) 1.5 the Pwave velocity will be increased. For smaller values than one (1) this criterion is disregarded.

\section{Time windowing and damping}
{\color{blue}{\begin{verbatim}
"Time windowing and damping" : "comment",
 "TIMEWIN" : "0",
 "TW_IND" : "0",
 "PICKS_FILE" : "./picked_times/picks"
 "TWLENGTH_PLUS" : "0.01",
 "TWLENGTH_MINUS" : "0.01",
 "GAMMA" : "100000",
\end{verbatim}}}

{\color{red}{\begin{verbatim}
Default values are:
 TIMEWIN=0
\end{verbatim}}}

To apply time windowing in a time series the paramter TIMEWIN must set to 1. A automatic picker routine is not integrated at the moment. The point in time (picked time) for each source must be specified in seperate files. The folder and file name can be set with the parameter PICKS\_FILE. The files must be named like this PICKS\_FILE\_.dat. So the number of sources in (SRCREC) must be equal to the number of files. Each file must contain the picked times for every receiver.\
The parameters TWLENGTH\_PLUS and TWLENGTH\_MINUS specify the length of the time window after (PLUS) and before (MINUS) the picked time. The unit is seconds. The damping factor GAMMA must be set individually. When usig TW\_IND = 1 three columns are expected in PICK\_FILE for each trace with the picked time, time window length plus and time window length minus. In this case TWLENGTH\_PLUS and TWLENGTH\_MINUS are ignored.


\section{Source wavelet inversion}
To remove the contribution of the unknown source time function (STF) from the waveform residuals,
it is necessary to design a filter which minimizes the misfit to the field recordings and raw synthetics. Therefore, a second forward simulation is applied. The first one is done with the wavelet specified in QUELLART and the second one with the optimized source wavelet saved in SIGNAL\_FILE (see Section~\ref{sec:sources}). This optimized source wavelet is kept constant within N\_STF or within a frequency range (see below).\\

{\color{blue}{\begin{verbatim}
"Definition of inversion for source time function" : "comment",
 "INV_STF" : "0",
 "PARA" : "fdlsq:tshift=0.0",
 "N_STF" : "10",
 "N_STF_START" : "1",
 "TAPER_STF" : "0",
 "TRKILL_STF" : "0",
 "TRKILL_FILE_STF" : "./trace_kill/trace_kill.dat",
\end{verbatim}}}

{\color{red}{\begin{verbatim}
Default values are:
 INV_STF=0
\end{verbatim}}}

INV\_STF should be switched to 1 if you want to invert for the source time function. %How to construct parameter strings PARA see doxygen documentation.

Examples for the parameter PARA are:
\begin{itemize}
\item To select frequency domain least squares (fdlsq) and shift the returned source time function by 0.4s, pass the following parameter string:\\
 \textit{fdlsq:tshift=0.4}

\item To select fdlsq, apply offset dependent weights and use a power of two to speed up the FFT\\
 \textit{fdlsq:exp=1.4:pow2}

\end{itemize}
N\_STF is the increment between the iteration steps. N\_STF\_START defines at which iterationstep the inversion for STF should start. This parameter has to be set at least to 1 NOT(!) 0.
When using TAPER\_STF = 1, the source signal is tapered. See \texttt{src/taper.c} for the taper definition.
With TRKILL\_STF = 1 it is possible to apply a trace killing for the estimation of the source wavelet correction filter.
\newline

Please note: If you additionally switch on frequency filtering during the inversion (TIME\_FILT=1 or TIME\_FILT=2), the parameters N\_STF and N\_STF\_START will be ignored. But the optimal source time function will be inverted for the first iteration and after every change of the frequency range.

For more information see \ref{cha:STFInversion}


\section{Smoothing the models}
+\subsection{Smoothing the models}
{\color{blue}{\begin{verbatim}
"Definition of smoothing the models vp and vs" : "comment",
"MODEL_FILTER" : "0",
@@ 855,87 +884,3 @@ Default values are:
\end{verbatim}}}
If MODEL\_FILTER = 1 vp and vsmodels are smoothed with a 2D median filter after every iterationstep. With FILT\_SIZE you can set the filter length in gridpoints.


\section{Frequency filtering}
{\color{blue}{\begin{verbatim}
"Frequency filtering during inversion" : "comment",
 "TIME_FILT" : "0",
 "F_HP" : "1",
 "FC_START" : "10.0",
 "FC_END" : "75.0",
 "FC_INCR" : "10.0",
 "ORDER" : "2",
 "ZERO_PHASE" : "0",
 "FREQ_FILE" : "freqeuncies.dat",
 "WRITE_FILTERED_DATA" : "0",
\end{verbatim}}}

{\color{red}{\begin{verbatim}
Default values are:
 TIME_FILT=0
 ZERO_PHASE=0
\end{verbatim}}}

TIME\_FILT = 1 can be set to use frequency filtering. The parameter FC\_START defines the corner frequency of the Butterworth low pass filter at the beginning of the inversion. The parameter FC\_END defines the maximum corner frequency used in the inversion. The parameter FC\_INCR controls in which steps the bandwidth is increased during the inversion.

If TIME\_FILT = 2 individual frequencies for each step can be read from FREQ\_FILE. In this file the first entry must be the number of frequencies used for filtering. Each frequency in Hz has to be specified in a row. The example file freqeuncies.dat can be found in \texttt{trunk/par}.

The parameter ORDER defines the order of the Butterworth low pass filter. If the variable ZERO\_PHASE is set to one a zero phase filter is applied. It is realized by filtering the the traces in both forward and reverse direction with the defined Butterworth low pass filter. Therefore, the effective order of the low pass filter is doubled.

With F\_HP an additional high pass filter can be applied, where F\_HP is the corner frequency in Hz.

With the parameter PRO (see~\ref{json:abort_criterion}) one has to adjust the criterion that defines at which points the bandwidth of the signals are increased.

With the parameter WRITE\_FILTERED\_DATA it is possible to write the time filtered measured data to disk which are filtered with the same filter as the synthetic data. Therefore this output can be used to visualize the residuals between synthetic and measured data.

\section{Trace killing}
{\color{blue}{\begin{verbatim}
"Trace killing" : "comment",
 "TRKILL" : "0",
 "TRKILL_FILE" : "./trace_kill/trace_kill.dat",
\end{verbatim}}}

{\color{red}{\begin{verbatim}
Default values are:
 TRKILL=0
\end{verbatim}}}

For not using all traces, the parameter TRKILL is introduced. If TRKILL is set to 1, trace killing is in use. The necessary file can be selected with the parameter TRKILL\_FILE. The file should include a kill table, where
the number of rows is the number of receivers and the number of columns reflects the number of sources. Now, a 1 (ONE) means, YES, please kill the trace. The trace is NOT used, it should be killed. A 0 (ZERO) means, NO, this trace should NOT be killed.

% \newpage

% \section{Receiver array}
% {\color{blue}{\begin{verbatim}
% "Receiver array" : "comment",
% "REC_ARRAY" : "0",
% "REC_ARRAY_DEPTH" : "70.0",
% "REC_ARRAY_DIST" : "40.0",
% "DRX" : "4",
% \end{verbatim}}}
%
% {\color{red}{\begin{verbatim}
% Default values are:
% REC_ARRAY=0
% \end{verbatim}}}
%
% These options are obsolete. Receiver arrays have to be defined directly in receiver.dat.
%
%
% \section{Checkpointing}
% {\color{blue}{\begin{verbatim}
% "Checkpoints" : "comment",
% "CHECKPTREAD" : "0",
% "CHECKPTWRITE" : "0",
% "CHECKPTFILE" : "tmp/checkpoint_fdveps",
% \end{verbatim}}}
%
% {\color{red}{\begin{verbatim}
% Default values are:
% CHECKPTREAD=0
% CHECKPTWRITE=0
% CHECKPTFILE="tmp/checkpoint_fdveps"
% \end{verbatim}}}
%
% These options are obsolete and will not be supported in the current version of DENISE.
\ No newline at end of file
diff git a/doc/latex/7_Examples.tex b/doc/latex/7_Examples.tex
index b6b08322e096a7bdc9c7dddb4306dae1d9b763b2..95fcdd37fd015c0f4324d43bed56008143ee5b3a 100644
 a/doc/latex/7_Examples.tex
+++ b/doc/latex/7_Examples.tex
@@ 1,235 +1,6 @@
% 
\chapter{Examples} %2  the elastic Marmousi2 model}
% \label{The Marmousi2 model}

% 

% % 
%
% \chapter{Example 3  inversion of ultrasonic data}
% \label{Example 3}
%
% % 
%
% \section{Homogenous plexiglas bloc model}
% The second example consists of real data which was recorded at a Plexiglas bloc with size of 10*15*20 cm. Using a simple regression a Rayleighwavevelocity of 1277.2 m/s could be estimated. A waveform inversion with DENISE can be achieved in XX steps. The necessary MATLAB scripts are located in the \textit{/mfiles/US} directory.
%
% \begin{figure}[bh]
% \includegraphics[width=15cm]{figures/ultrasonic/Plexiglasblock.jpg}
% \caption{Photo of the homogenous plexiglas bloc with ultrasonic acquisition geometry.}
% \label{Plexiglasblock.jpg}
% \end{figure}
%
% \clearpage
%
% \begin{enumerate}
% \item First we have to find some initial guess for the elastic parameters of plexiglas. The parameters in \cite{zerwer:2000} seem to be in good agreement with the measured Rayleigh wave velocity: $\rm{Vp=2700\; m/s}$, $\rm{Vs=1370\; m/s}$ and $\rm{\rho=1190\; kg/m^3}$ leads to a Rayleigh wave velocity of $\rm{VR=1280\; m/s}$. Assuming a maximum frequency $\rm{f_{max} = 600\; kHz}$ and using the grid disperson, Courant criterion and the extension of the acquisition geometry the following FDmodel parameters are required:
% \begin{itemize}
% \item DH = 2.0e4 m
% \item DT = 5.2e8 s
% \item NX = 860 gridpoints, NY = 200 gridpoints
% \item NT = 3077 time steps
% \end{itemize}
% The necessary files for the input parameters DENISE\_US.inp, source\_ricker.dat, source\_spike.dat and source\_wavelet.dat, receiver\_US.dat and model definition half\_space.c are located in the directory DENISE/genmod/US. Copy these files to the DENISE/par and DENISE/src directories, respectively. Set
% {\color{blue}{\begin{verbatim}
% QUELLART=1
% SOURCE_FILE = ./source/source_ricker.dat
% SNAP=3
% INVMAT=10
% \end{verbatim}}}
% in the parameter file \textit{DENISE$\_$US.inp} and start a first test model run:
% {\color{blue}{\begin{verbatim}
% bash2.05b$:~/DENISE/par> mpirun np 4 ../bin/denise DENISE_US.inp
% \end{verbatim}}}
% Merge the resulting wavefield snapshots with
% {\color{blue}{\begin{verbatim}
% bash2.05b$:~/DENISE/par> snapmerge ../bin/denise < DENISE_US.inp
% \end{verbatim}}}
% Copy the file snap/waveform\_forward.bin.rot to the snapdirectory in mfiles/US. With the MATLABscript US\_snap you can visualize the propagation of the surface wave. The script is selfexplanatory. Set
% {\color{blue}{\begin{verbatim}
% 14 % name of the snap file
% 15 file_div='snap/waveform_forward.bin.rot';
% [...]
% 20 % optimize clipping values
% 21 caxis_value_vp1=5e1;
% 22 caxis_value_vp2=5e1;
% [...]
% 24 % define first and last frame to display
% 25 firstframe=1;
% 26 lastframe=50;
% [...]
% 28 % gridsize and grid spacing (as specified in parameterfile)
% 29 NX1glob=1; NX2glob=860;
% 30 NY1glob=1; NY2glob=200;
% 31 IDX=1; IDY=1;
% 32 dh_glob=2e4;
% [...]
% 34 % time increment for snapshots:
% 35 TSNAPINC=3e6; TSNAP1=3e6;
% \end{verbatim}}}
% and run the script. A few representive snapshots of the Rayleighwave propagation are shown in \FIG{Rayleighwavepropagation}
% \begin{figure}[bh]
% \includegraphics[width=8.5cm]{figures/ultrasonic/US_snap_6.pdf}\includegraphics[width=8.5cm]{figures/ultrasonic/US_snap_10.pdf}\\
% \includegraphics[width=8.5cm]{figures/ultrasonic/US_snap_18.pdf}\includegraphics[width=8.5cm]{figures/ultrasonic/US_snap_27.pdf}\\
% \includegraphics[width=8.5cm]{figures/ultrasonic/US_snap_36.pdf}\includegraphics[width=8.5cm]{figures/ultrasonic/US_snap_44.pdf}\\
% \caption{A few snapshots of the Rayleighwave propagating through the homogenous Plexiglas block.}
% \label{Rayleighwavepropagation}
% \end{figure}
% \clearpage
% \item In the next step we take a look at the raw data and convert the ASCII data to SU format. Open the MATLABscript ASCII2SU.m. Define the location of the plexiglas data:
% {\color{blue}{\begin{verbatim}
% 83 file_name = ['plexiglas_profil_data/',int2str(i),'mm.txt'];
% \end{verbatim}}}
% Each trace of the ultrasonic data is located in a file $\bf{mm.txt}$, the acquistion geometry is defined in $\bf{readme.txt}$. The range of the offset values, the increment and the sample interval:
% {\color{blue}{\begin{verbatim}
% 63 ntr1=62; % minimum offset
% 64 ntr2=162; % maximum offset
% 65 dntr1=10; % offset interval
% ...
% 69 DT=1.0e7; % sampling interval of the US data
% \end{verbatim}}}
% Activate the interpolation of the data and define the sampling interval and number of time steps in DENISE:
% {\color{blue}{\begin{verbatim}
% 41 INTERP=1;
% 42 DT1=5.2e8; % sampling rate for DENISE
% 43 nt_DENISE=3462; % number of time steps in DENISE
% \end{verbatim}}}
% Define a time delay, which makes the later source wavelet inversion much easier:
% {\color{blue}{\begin{verbatim}
% 23 DELAY=1;
% 24 delay=2.8e5; % time delay for real data
% \end{verbatim}}}
% and finally activate the output of the SU file and enter a name for the SU file:
% {\color{blue}{\begin{verbatim}
% 55 WRITE=1;
% 56 % name of output SU file
% 57 imfile=['plexiglas_profil_data/DENISE_plexiglas_y.su.shot1'];
% \end{verbatim}}}
% After running the script you get the following seismic section:
% \begin{figure}[bh]
% \centering
% \includegraphics[width=15cm]{figures/ultrasonic/plexiglas_step1.png}
% \caption{Elastic waveform inversion of ultrasonic data  step 1 (raw data).}
% \label{Plexiglas_step_1}
% \end{figure}
% \clearpage
% \item The data looks a bit noisy, therefore activate the bandpass frequency filter, where f1, f2, f3 and f4 define the corner frequencies:
% {\color{blue}{\begin{verbatim}
% 47 FILTER=1;
% 48 f1=35000;
% 49 f2=40000;
% 50 f3=450000;
% 51 f4=500000;
% \end{verbatim}}}
% We want to restrict the inversion to the wavelet of the first arrival. Therefore activate the time window:
% {\color{blue}{\begin{verbatim}
% 28 TWIN=1;
% 29 twinc=5.36e5+delay; % center of time window
% 30 twinp=1e5; % twinc + twinp
% 31 twinm=1e5; % twinc  twinm
% 32 ad=1e12; % damping parameter
% 33 vnmo=1280; % velocity of the Rayleigh wave
% \end{verbatim}}}
% The parameter twinc should be approximately at the center of the first arrival wavelet. twinp and twinm define the length of the time window, ad the damping parameter and vnmo the normal moveout of the Rayleigh wave. Optimize these parameters until the window is well defined. For each trace the value of twinc will be written to
% {\color{blue}{\begin{verbatim}
% 278 imfile=['picks_1.dat'];
% \end{verbatim}}}
% An optimized seismic section is shown in \FIG{Plexiglas_step_2}. The yellow dots denote the time twinc, the blue dots the time twinc+twinp and the red dots the time twinctwinp, respectively.
% \begin{figure}[bh]
% \centering
% \includegraphics[width=15cm]{figures/ultrasonic/plexiglas_step2.png}
% \caption{Elastic waveform inversion of ultrasonic data  step 2 (optimized seismic section with time window).}
% \label{Plexiglas_step_2}
% \end{figure}
% \clearpage
% \item Copy the generated SUfile \textit{DENISE$\rm{\_}$plexiglas$\rm{\_}$y.su.shot1} to the data directory \textit{DENISE/par/su/plexiglas} and the file with the definition of the time window \textit{picks$\_$1.dat} to \textit{DENISE/par/picked$\_$times}.
% \item Define
% {\color{blue}{\begin{verbatim}
% QUELLART=6
% SOURCE_FILE = ./source/source_spike.dat
% SNAP=0
% INVMAT=10
% TWIN=0
% INV_STF=0
% \end{verbatim}}}
% in the parameter file \textit{DENISE$\_$US.inp}, set the time delay td in the sourcefile \textit{source$\_$spike.dat} to 0.0 s and run a forward model.
% \item Copy su/DENISE\_US\_y.su.shot1.it1 to the FD\_seis directory and open the Matlab script Plot\_US\_Residuals.m. Be sure that the parameters delay, twinc, twinp, twinm, ad, vnmo, f1, f2, f3, f4, ntr, ntr1, ntr2, dntr1, DT1 and in\_data are equal to the definitions in ASCII2SU.m. Turn the time window off (TWIN=0). After running the script you should get the following seismic sections. On the left side a comparison of the seismic section between the model data (red) and the field data (black) and on the right the data residuals:
% \begin{figure}[bh]
% \centering
% \includegraphics[width=18cm]{figures/ultrasonic/plexiglas_step5.png}
% \caption{Elastic waveform inversion of ultrasonic data  step 5 (first model result).}
% \label{Plexiglas_step_5}
% \end{figure}
% \clearpage
% \item Zoom into the first trace of the data comparison and pick time values of the measured t1 and modelled wavelets t2, respectively (\FIG{Plexiglas_step_6}). From this values calculate the time delay td between the wavelets.
% In this example, t1=8.05e5 s, t2=4.93e5 s and td=t1t2=3.12e5 s.
% \begin{figure}[bh]
% \centering
% \includegraphics[width=16cm]{figures/ultrasonic/plexiglas_step6.pdf}
% \caption{Elastic waveform inversion of ultrasonic data  step 6 (measure time delay).}
% \label{Plexiglas_step_6}
% \end{figure}
% \clearpage
% \item Change the time delay td in source\_spike.dat to the measured value, run a new model, copy the resulting DENISE\_US\_y.su.shot1.it1 file to the FD\_seis directory again and check if there is still a significant time delay present. Optimize it further until the wavelets approximately fit (\FIG{Plexiglas_step_7}).
% \begin{figure}[bh]
% \centering
% \includegraphics[width=16cm]{figures/ultrasonic/plexiglas_step7.pdf}
% \caption{Elastic waveform inversion of ultrasonic data  step 7 (optimum wavelet fit).}
% \label{Plexiglas_step_7}
% \end{figure}
% \item After fitting the arrival times of the wavelets approximately we proceed with the source wavelet inversion. Copy the initial source wavelet from start/half\_space\_source\_signal.0.su.shot1 to US/wavelet. Open the matlab script est\_source\_wavelet.m and check if the parameters delay, twinc, twinp, twinm, ad, vnmo, f1, f2, f3, f4, ntr, ntr1, ntr2, dntr1, DT1, in\_data and in\_model are consistent with those in the other two matlab scripts and additionally define in\_source. Set the MarquardtLevenber damping factor epsilon=0.0 and run the script. With no regularization the data fit looks nearly perfect (\FIG{Plexiglas_step_8a}), but the resulting source wavelet looks very unrealistic and contains a lot of spikes. When increasing the damping factor epsilon (f.e. to epsilon=1e1) the solutions are damped and become more plausible, while the data misfit increases (\FIG{Plexiglas_step_8b}). Optimize epsilon until you find a good compromise between plausible source wavelet and minimum data misfit.
% \begin{figure}[bh]
% \centering
% \includegraphics[width=16cm]{figures/ultrasonic/plexiglas_data_step8_lam_0.pdf}
% \includegraphics[width=16cm]{figures/ultrasonic/plexiglas_wavelet_step8_lam_0.pdf}
% \caption{Elastic waveform inversion of ultrasonic data  step 8. Resulting seismic sections for the estimated source wavelet (top) with no regularization ($\epsilon=0.0$) and the estimated source wavelet (bottom).}
% \label{Plexiglas_step_8a}
% \end{figure}
% \clearpage
% \begin{figure}[bh]
% \centering
% \includegraphics[width=16cm]{figures/ultrasonic/plexiglas_data_step8_lam_1e1.pdf}
% \includegraphics[width=16cm]{figures/ultrasonic/plexiglas_wavelet_step8_lam_1e1.pdf}
% \caption{Elastic waveform inversion of ultrasonic data  step 8. Resulting seismic sections for the estimated source wavelet (top) with regularization ($\epsilon=1e1$) and the estimated source wavelet (bottom).}
% \label{Plexiglas_step_8b}
% \end{figure}
% \clearpage
% \item Copy wavelet\_US\_opt.dat to /par/wavelet, edit DENISE\_US.inp
% {\color{blue}{\begin{verbatim}
% SOURCE_FILE = ./source/source_wavelet.dat
% QUELLART = 3
% SIGNAL_FILE = ./wavelet/wavelet_US_opt.dat
% \end{verbatim}}}
% set the time delay td in the sourcefile \textit{source$\_$wavelet.dat} to 0.0 and run DENISE again.
% \item Copy DENISE/su/DENISE\_US\_y.su.shot1.it1 to the FD\_seis directory and run the Matlab script Plot\_US\_Residuals.m. You should get the following seismic sections:
% \begin{figure}[bh]
% \centering
% \includegraphics[width=16cm]{figures/ultrasonic/plexiglas_step9.pdf}
% \caption{Elastic waveform inversion of ultrasonic data  step 9 (model result with estimated source wavelet).}
% \label{Plexiglas_step_9}
% \end{figure}
% \clearpage
% \item Similar to step 6 measure the time delay td in Matlab. In this case the first arrival of the measured data is earlier than the modelled arrival. Therefore the time delay is negative, td=5.875e5 s:
% \begin{figure}[bh]
% \centering
% \includegraphics[width=16cm]{figures/ultrasonic/plexiglas_step10.pdf}
% \caption{Elastic waveform inversion of ultrasonic data  step 10 (measure time delay).}
% \label{Plexiglas_step_10}
% \end{figure}
% \clearpage
% \item Change the time delay td in source\_wavelet.dat according to the measured time delay and run DENISE again. Copy the resulting DENISE/su/DENISE\_US\_y.su.shot1.it1 to the FD\_seis directory and run the Matlab script Plot\_US\_Residuals.m. You should get the following seismic sections with an approximately well fitted first arrival wavelet:
% \begin{figure}[bh]
% \centering
% \includegraphics[width=16cm]{figures/ultrasonic/plexiglas_step11.pdf}
% \caption{Elastic waveform inversion of ultrasonic data  step 11 (model result with estimated source wavelet and corrected time delay).}
% \label{Plexiglas_step_11}
% \end{figure}
%
% \end{enumerate}

% \chapter{Example 4  shallow seismics}
% \label{example4}
+\chapter{Examples}
\section{Toy Example Shallow Seismics: Inversion of Viscoelastic Observations}
You can find the input files for the small toy example in the directory \texttt{par/in\_and\_out/toy\_example}. To run the example you can use the shell script \texttt{run\_toy\_example.sh} in the directory \texttt{par}. It is adjusted for a PC with at least 4 CPUs. If you have less CPUs you have to adjust the number of processors in the input files as well as in the call of DENISE in the shell script. The shell script includes all relevant steps. First all libraries and DENISE are compiled. (Do not get nervous about the huge output during compiling.) If you run into problems during this step you maybe have to adjust the variables in \texttt{Makefile\_var} in the directory \texttt{contrib}. Afterwards DENISE starts to simulate observed data for the inversion. The simulated seismograms are renamed for the inversion and DENISE is again compiled with another model function which creates the initial models for the inverison on the fly (see section~\ref{gen_of_mod}). The last step in the shell script is the call of DENISE to start the inversion.\\
@@ 242,7 +13,6 @@ You will need less than 1~GB working memory to run the inversion and approximate
You can use the script \texttt{mfiles/Plot\_results\_toy\_example.m} to visualize the results of the toy example. This script can be used with Matlab or Octave.

\begin{figure}[ht]
\centering
\subfloat[][]{%
@@ 258,7 +28,6 @@ You can use the script \texttt{mfiles/Plot\_results\_toy\_example.m} to visualiz
\label{Rheinstetten_true_model}
\end{figure}

\begin{figure}[ht]
\centering
\includegraphics[width=10cm]{figures/Rheinstetten/start_vs_model}
@@ 280,120 +49,3 @@ You can use the script \texttt{mfiles/Plot\_results\_toy\_example.m} to visualiz
\end{figure}
\clearpage

\section{The complex Marmousi2 model}
\label{complex Marmousi model}
Developed in the 1990s by the French Petroleum Institute (IFP) (\cite{versteeg:94}) the Marmousi model is a widely used test problem for seismic imaging techniques. Beside the original acoustic version of the model an elastic version was developed by \cite{martin:06}. This model consists of parts with simple (approximately 1D) and complex geological situations. In the following two sections the performance of the FWT code will be tested for the complex and the simple part of the Marmousi2 model, respectively.

The Marmousi2 model (Fig. \ref{marmousi_geology}) consists of a 500 m thick water layer above an elastic subseafloor model. The sediment model is very simple near the left and right boundaries but rather complex in the centre. At both sides, the subseafloor is approximately horizontally layered, while steep thrust faults are disturbing the layers in the centre of the model. Embedded in the thrust fault system and layers are small hydrocarbon reservoirs (\FIG{marmousi_geology}, \cite{martin:06}).
\begin{itemize}
\item One shallow gas sand in a simple structural area (A).
\item One relatively shallow oil sand in a structural simple area (B).
\item Four faulted trap gas sands at varying depths (C1,C2,C3,C4).
\item Two faulted trap oil sands at medium to deep depths (D1,D2).
\item One deep oil and gas sand anticlinal trap (E1,E2).
\item Water wet sand.
\end{itemize}
The deeper parts of the model consist of salt and reef structures. The thrust fault system and the reef structures are not easy to resolve by conventional first arrival tomography, so it is an ideal test model for the FWT. Due to computational restrictions the original MarmousiII model could not be used, because the very low Swave velocities in the sediments would require a too small spatial sampling of the model. Therefore new Swave velocities are calculated using the empirical scaling relation for hard rocks. Additionally the size of the Marmousi2 model is reduced from $\rm{17 \times 3.5\;km}$ to $\rm{10 \times 3.48\; km}$ (\FIG{marmousi_II_true_model}).
\begin{figure}[!bh]
\centering
\includegraphics[width=14cm]{figures/marmousi/marmousi_geology.pdf}
\caption{Marmousi2 model  geology.}
\label{marmousi_geology}
\end{figure}
\begin{figure}[!ht]
\centering
\includegraphics[width=14cm]{figures/marmousi/marmousi_complete_caption.pdf}
\caption{The reduced complex Marmousi2 model used for the elastic FWT.}
\label{marmousi_II_true_model}
\end{figure}
\clearpage\subsection{Acquisition geometry and FD model}
\label{marmousi_acq}
The acquisition geometry consists of a fixed streamer located 40 m below the free surface in the water layer. The streamer contains 400 two component geophones recording the displacements $\rm{u_i}$. For the synthetic dataset 100 airgun shots are recorded. The sources are located at the same depth as the receivers. The source signature is a 10 Hz Ricker wavelet. The model has the dimensions $\rm{10 \times 3.48\; km}$. Using an 8th order spatial FD operator the model can be discretized with $\rm{500 \times 174}$ gridpoints in x and ydirection with a spatial gridpoint distance of 20.0 m. The time is discretized using DT = 2.7 ms, thus for a recording time of T = 6.0 s 2222 time steps are needed.
\subsection{Elastic wave propagation in the complex Marmousi model}
\label{marmousi_complex}
Fig. \ref{wavefield_marmousi_true} shows the development of the pressure wavefield excited by shot 50 for the central part of the complex elastic Marmousi2 model at 6 different time steps. The P wave is traveling from the source through the water column (T=100.0 ms) and is reflected at the seafloor (T=400.0 ms). In the elastic subseafloor medium the wavefield becomes very complex. The layers in the steep thrust fault system produce numerous reflections and internal multiples (T=600.0 ms). Additionally strong diffracted waves are generated at the sharp corners of the thrust faults between the disturbed high velocity sediment blocks within the thrust faults and the surrounding low velocity sediments. At the free surface strong multiple reflections occur (T=800.0 ms). The wavefront of the direct wave is quite deformed due to strong velocity contrasts within the thrust fault system. After 1500 ms nearly all kinds of waves which can be found in the literature are present: Reflections, refractions, diffractions, (internal) multiples or interface waves. The trapped gas sand reservoirs C1, C2 and C3 produce strong reflections and mode conversions. This complexity is also visible in the seismic section, recorded by the streamer in the water column. As an example Fig. \ref{marmousi_seis}, (f) shows the seismic section of the ycomponent for shot 50. Beside the direct wave and a strong reflection from the seafloor numerous small reflection events from the thrust fault system are dominating the seismic section.
\begin{figure}
\begin{center}
\includegraphics[width=7.5cm]{figures/marmousi/shot93_snap_2.pdf}\includegraphics[width=7.5cm]{figures/marmousi/shot93_snap_5.pdf}\\
\includegraphics[width=7.5cm]{figures/marmousi/shot93_snap_7.pdf}\includegraphics[width=7.5cm]{figures/marmousi/shot93_snap_9.pdf}\\
\includegraphics[width=7.5cm]{figures/marmousi/shot93_snap_14.pdf}\includegraphics[width=7.5cm]{figures/marmousi/shot93_snap_16.pdf}\\
\caption{Pressure wavefield excited by shot 50 for the elastic Marmousi2 model at 6 different time steps .}
\label{wavefield_marmousi_true}
\end{center}
\end{figure}
\clearpage
\subsection{FWT of the complex Marmousi model}
\label{marmousi_complex_FWT}
Due to the results of the last section, I choose the seismic velocities as model parameters for the inversion. To generate a starting model which describes the long wavelength part of the material parameters correctly the true model $\rm{\mathbf{m}=[V_p,\;V_s,\;\rho]}$ was filtered using a spatial 2DGaussian filter
\begin{equation}
\rm{\mathbf{m}_{smooth}(x,y) = \frac{1}{2 \pi \lambda_c^2} \int_{\lambda_c}^{\lambda_c} \int_{\lambda_c}^{\lambda_c} dx' dy' \mathbf{m}(xx',yy') exp\biggl(\frac{(xx')^2+(yy')^2)}{2 \lambda_c^2}\biggr)}
\label{gaussian}
\end{equation}
with a correlation length $\rm{\lambda_c = 200.0\;m}$. As a result all the small scale structures vanished and only the large scale structures are present (Fig. \ref{marmousi_II_start}).
\begin{figure}[!bth]
\begin{center}
\includegraphics[width=12cm]{figures/marmousi/marmousi_II_start.pdf}
\caption{Starting models for the MarmousiII model.}
\label{marmousi_II_start}
\end{center}
\end{figure}
\clearpage
As in the case of the spherical low velocity anomaly the application of a preconditioning operator is vital to suppress the large gradient values near the source and receiver positions. Additionally strong artefacts are present near the free surface (\FIG{marmousi_II_precond}, top) which are a few orders of magnitude larger than the gradient of the material parameters. This problem is also known in case of the acoustic inversion problem (\cite{benhadj:2008}). To suppress these effects a spatial preconditioning operator is applied on the gradient, which is defined as (\FIG{taper_grad_marm})
\EQ{marmousi_prec:1}{\rm{P=}
\begin{cases}
\rm{0} & \text{if } \rm{y_0 = 0.0\; m} \le y \le \rm{y_{gradt1} = 380.0\; m}\\
\rm{exp(\frac{1}{2}(a\frac{yy_{gradt1}\Delta l}{\Delta l/2})^2)} & \text{if } \rm{y_{gradt1} = 380.0\; m} \le y \le \rm{y_{gradt2} = 480.0\; m}\\
\rm{\frac{y}{y_{gradt2}}} & \text{if } \rm{y_{gradt2} = 480.0\; m} < y \\
\end{cases}}
with $\rm{a=3.0}$, $\rm{\Delta l=100.0\;m}$. \\
\begin{figure}[!ht]
\begin{center}
\includegraphics[width=12cm]{figures/marmousi/taper_grad_refl.pdf}
\caption{The preconditioning operator for the reflection geometry.}
\label{taper_grad_marm}
\end{center}
\end{figure}
The preconditioning operator sets the gradient near the free surface and the sources/receivers to zero. In a transition zone between 380.0 m and 480.0 m depth a Gaussian taper is applied. Beyond a depth of 480 m the operator scales the gradient linear with depth. This is a very crude correction for the amplitude loss in larger depths due to geometrical spreading and reflections in the upper parts of the model. Before the application of the preconditioning operator no subsurface structures are visible at all \FIG{marmousi_II_precond} (top), while strong reflectors are visible after its application.
\clearpage
\begin{figure}[!bth]
\begin{center}
\includegraphics[width=15cm]{figures/marmousi/marmousi_II_precond.pdf}
\caption{The influence of the preconditioning operator P for the elastic Marmousi2 model. The Gradient $\rm{\delta V_p}$ (iteration no. 1) before (top) and after the application of the preconditioning operator (bottom).}
\label{marmousi_II_precond}
\end{center}
\end{figure}
To achieve a smooth transition from the long wavelength starting model to the inversion result with short wavelength structures the application of a frequency filter with variable bandwidth on the data residuals $\rm{\delta \mathbf{u}}$ is vital, to avoid the convergence into a local minimum. In this case the inversion is separated in two parts. In part I only frequencies below 10 Hz are inverted, while in part II the full spectral content up to 20 Hz is inverted. The inversion results after 350 iterations are shown in Fig. \ref{marmousi_II_result}. Additionally depth profiles at $\rm{x_{p1}=3.5\;km}$ and $\rm{x_{p2}=6.4\;km}$ of the starting model and inversion result are compared with the true model in Fig. \ref{marmousi_II_result_profile}. The results contain a lot of small details. All fine layers which are completely absent in the starting model could be resolved. The thrust faults and the reef structures in the deeper part of the model are imaged also very well. Despite the deep hydro carbon reservoirs E1 and E2, the oil and gas reservoirs C1, C2, C3, C4, D1 and D2 are all clearly visible.
\begin{figure}
\centering
\includegraphics[width=15cm]{figures/marmousi/marmousi_II_result_1.pdf}
\caption{Results of the elastic FWT for the MarmousiII model. The dashed lines denote the positions of the depth profiles 1 and 2 shown in Fig. \ref{marmousi_II_result_profile}.}
\label{marmousi_II_result}
\end{figure}
\clearpage
It is quite surprising, that the shear wave velocity model could also be resolved very well, even though only streamer data and therefore mainly Pwave information is used. Even the density, a parameter which can be hardly estimated from seismic data, could be recovered from the seismic wavefield. Keep in mind though, that the density image is based not only density information, but contains also $\rm{V_p}$ and $\rm{V_s}$ information due to the ambiguity investigated by the CTS test problem \citep[chapter 5.2]{koehn:11}. The depth profiles show the strong deviation of the starting model from the true Marmousi model, in some parts up to $\rm{\pm 5001000\;m/s}$ for the velocity models and $\rm{\pm 250500\;kg/m^3}$ for the density models. Even though the FWT could reconstruct the velocity and density structures quantitatively fairly well. In Fig. \ref{marmousi_seis} the seismic section of shot 50 is plotted for the starting model (a), the inversion result (b) the true model (c), the initial residuals (d), the final residuals (e) and the evolution of the residual energy (f). Notice the good fit of the first arrivals for the starting model, but the lack of small details beyond the first arrivals. Only the direct wave, the reflection from the ocean bottom and a few multiples are present. The inversion result fits most of the phases and amplitudes of the later small scale reflections from within the thrust fault system and therefore the data residuals are very small. The misfit function decreases smoothly for the first 20 Iteration steps, but exhibits strong fluctuations for the later iterations, due to the strong nonlinear character of the inversion problem for this complex geology.
The performance of the FWT code was benchmarked on an Altix 4700 using the Marmousi2 model. Fig.\ref{bench_time} shows the computation time (top) and the memory requirements for up to 50 CPUs (bottom). On a single CPU the elastic timedomain FWT is not efficient at the moment. For 150 iteration steps a total calculation time of 85 days and about 10 GB of RAM would be required. When using 50 CPUs the computation
time is reduced to roughly 2 days. Note the linear speedup of the FWT code due to the optimized parallelization of the forward modelling FD code.\\
In conclusion the results look very impressive, but you should not forget that the starting model for the seismic velocities and density are unrealistically accurate and are not easy to derive from the seismic data for such a complex model.
\begin{figure}
\centering
\includegraphics[width=16cm]{figures/marmousi/marmousi_II_result_profile_1.pdf}
\caption{Depth profiles at $\rm{x_{p1}=3.5\;km}$ (top) and $\rm{x_{p2}=6.4\;km}$ (bottom) of the starting model and FWT result are compared with the true model for the MarmousiII model: Pwave velocity (left), Swave velocity (center) and density (right).}
\label{marmousi_II_result_profile}
\end{figure}
\clearpage
\begin{figure}
\centering
\includegraphics[width=12.5 cm]{figures/marmousi/marmousi_seis_results_WIT.pdf}
\caption{Seismic sections (shot 50, ycomponent) for the MarmousiII model. (a) starting model, (b) FWT result, (c) initial residuals, (d) final residuals , (f) true model and (e) evolution of the residual energy.}
\label{marmousi_seis}
\end{figure}
\clearpage
\begin{figure}
\centering
\includegraphics[width=11cm]{figures/marmousi/cpu_altix_DENISE.pdf}\\
\includegraphics[width=11cm]{figures/marmousi/mem_altix_DENISE.pdf}
\caption{Benchmark results for the Marmousi2 model. The absolute calculation times (top) and the memory requirements (bottom) for up to 50 CPUs on an Altix 4700.}
\label{bench_time}
\end{figure}
\clearpage
\ No newline at end of file
diff git a/doc/latex/manual_DENISE.tex b/doc/latex/manual_DENISE.tex
index 6be6a291a3fb57fbf1860c6daa38403d98f91081..e90e24511cbd27aa143917f1fa415c434d93f2c0 100644
 a/doc/latex/manual_DENISE.tex
+++ b/doc/latex/manual_DENISE.tex
@@ 75,17 +75,6 @@
% biblio GJI
\bibliographystyle{abbrvnat}
% \newcommand{\toall}[1]{\textbf{*** All: #1 ***}}
% \newcommand{\tojeroen}[1]{\textbf{*** Jeroen: #1 ***}}
% \newcommand{\tobrian}[1]{\textbf{*** Brian: #1 ***}}
% \newcommand{\tovala}[1]{\textbf{*** Vala: #1 ***}}
% \newcommand{\tovalabrian}[1]{\textbf{*** Vala \& Brian: #1 ***}}
% \newcommand{\tovalaqinya}[1]{\textbf{*** Vala \& Qinya: #1 ***}}
% \newcommand{\toqinya}[1]{\textbf{*** Qinya: #1 ***}}
% \newcommand{\tomin}[1]{\textbf{*** Min: #1 ***}}
% \newcommand{\toalessia}[1]{\textbf{*** Alessia: #1 ***}}
% \newcommand{\todimitri}[1]{\textbf{*** Dimitri: #1 ***}}

\newcommand{\nexxi}{\mbox{\texttt{NEX\_XI\/}}}
\newcommand{\nexeta}{\mbox{\texttt{NEX\_ETA\/}}}
\newcommand{\nprocxi}{\mbox{\texttt{NPROC\_XI\/}}}
@@ 112,14 +101,6 @@
\input{0_Title.tex}
% \begin{center}
% \thispagestyle{empty}%
% \vspace*{1.8truecm}
% \noindent\makebox[\textwidth]{%
% \noindent\includegraphics[width=0.83\paperwidth]{figures/DENISEcover.png}
% }
% \end{center}

\title{\textbf{DENISE}\\
\textbf{User Manual}}
@@ 165,7 +146,8 @@ Martin Sch\"afer,\\
(since 2014) \\
Laura Ga\ss ner,\\
Tilman Metz,\\
Niklas Thiel.\\
+Niklas Thiel,\\
+Florian Wittkamp.\\
% (add other developers here in the future).
\newline
@@ 197,29 +179,8 @@ If you use this code for your own research, please cite at least one article
written by the developers of the package, for instance:\\
\newline
\bibentry{koehn:11}\\
% \cite{koehn:11}\\
% \newline
% or\\
% \newline

% (XX add more references here)\\
% \newline
% and/or other articles from \urlwithparentheses{http://www.geophysik.unikiel.de/~dkoehn/publications.htm}
% \newline
%
% \noindent
% The corresponding Bib\TeX{} entries may be found
% in file \texttt{doc/USER\_MANUAL/thesis.bib}.


% %%
%
% \chapter*{Contact}
%
% %%
%
% Please email your feedback, questions, comments, and suggestions
% to\\ Daniel K\"ohn \urlwithparentheses{dkoehnATgeophysik.unikiel.de}.
+
+\noindent\bibentry{kohn2012influence}\\
\newpage{}
diff git a/doc/latex/thesis.bib b/doc/latex/thesis.bib
index 237c8880cb3b6d8bfc21b71afa5a39029a508994..2582e02ececa4bb2095d088ca94e81a7995128b2 100644
 a/doc/latex/thesis.bib
+++ b/doc/latex/thesis.bib
@@ 35,26 +35,26 @@
@string{SEG02 = {73rd Ann. Internat. Mtg., Soc. Expl. Geophys., Expanded Abstracts}}
@string{SEG03 = {74th Ann. Internat. Mtg., Soc. Expl. Geophys., Expanded Abstracts}}
@article{shin2001efficient,
 title={Efficient calculation of a partialderivative wavefield using reciprocity for seismic imaging and inversion},
 author={Shin, Changsoo and Yoon, Kwangjin and Marfurt, Kurt J and Park, Keunpil and Yang, Dongwoo and Lim, Harry Y and Chung, Seunghwan and Shin, Sungryul},
 journal={Geophysics},
 volume={66},
 number={6},
 pages={18561863},
 year={2001},
 publisher={Society of Exploration Geophysicists}
}
@article{plessix2004frequency,
 title={Frequencydomain finitedifference amplitudepreserving migration},
 author={Plessix, RE and Mulder, WA},
 journal={Geophysical Journal International},
 volume={157},
 number={3},
 pages={975987},
 year={2004},
 publisher={Oxford University Press}
}
+@article{shin2001efficient,
+ title={Efficient calculation of a partialderivative wavefield using reciprocity for seismic imaging and inversion},
+ author={Shin, Changsoo and Yoon, Kwangjin and Marfurt, Kurt J and Park, Keunpil and Yang, Dongwoo and Lim, Harry Y and Chung, Seunghwan and Shin, Sungryul},
+ journal={Geophysics},
+ volume={66},
+ number={6},
+ pages={18561863},
+ year={2001},
+ publisher={Society of Exploration Geophysicists}
+}
+@article{plessix2004frequency,
+ title={Frequencydomain finitedifference amplitudepreserving migration},
+ author={Plessix, RE and Mulder, WA},
+ journal={Geophysical Journal International},
+ volume={157},
+ number={3},
+ pages={975987},
+ year={2004},
+ publisher={Oxford University Press}
+}
@book{aki:80,
AUTHOR = {Aki, K. and Richards, P.G. },
@@ 1384,6 +1384,17 @@ author = {Inazaki, T. and Isahai, H. and Kawamura, S. and Kuruhashi, T. and Haya
SCHOOL = {{K}iel {U}niversity},
YEAR = {2011} }
+@article{kohn2012influence,
+ title={On the influence of model parametrization in elastic full waveform tomography},
+ author={K{\"o}hn, D and De Nil, D and Kurzmann, A and Przebindowska, A and Bohlen, T},
+ journal={Geophysical Journal International},
+ volume={191},
+ number={1},
+ pages={325345},
+ year={2012},
+ publisher={Oxford University Press}
+}
+
@article{kolsky:56,
AUTHOR = {Kolsky, H.},
TITLE = {The propagation of stress pulses in viscoelastic solids},