Commit abdeb652 authored by Florian Wittkamp's avatar Florian Wittkamp

Merge branch 'release/Release_2.0.2'

parents d3d7b0ac 22a7975e
......@@ -10,7 +10,7 @@ The [**manual**](https://git.scc.kit.edu/GPIAG-Software/IFOS2D/wikis/home) is in
# Download and Newsletter
You can download the [**latest Release**](https://git.scc.kit.edu/GPIAG-Software/IFOS2D/tags/Release_2.0.1) or the current [**Beta-Version**](https://git.scc.kit.edu/GPIAG-Software/IFOS2D/tree/develop).
You can download the [**latest Release 2.0.2**](https://git.scc.kit.edu/GPIAG-Software/IFOS2D/tags/Release_2.0.2) or the current [**Beta-Version**](https://git.scc.kit.edu/GPIAG-Software/IFOS2D/tree/develop).
To receive news and updates please [register](http://www.gpi.kit.edu/Software-FWI.php) on the email list [IFOS@lists.kit.edu](http://www.gpi.kit.edu/Software-FWI.php).
Please use this list also to ask questions or to report problems or bugs.
\ No newline at end of file
......@@ -2,6 +2,7 @@
# Created by https://www.gitignore.io/api/latex
### LaTeX ###
.texpadtmp/
*.acn
*.acr
*.alg
......
......@@ -3,7 +3,7 @@
\chapter{Theoretical Background}
%------------------------------------------------------------------------------------------------%
This chapter is mainly based on the dissertation of Daniel Köhn (\cite{koehn:11}), who originally has written this manual.
\section{Equations of motion for an elastic medium}\label{elastic_fd_model}
The propagation of waves in a general elastic medium can be described by a system of coupled linear partial differential equations. They consist of the equations of motion
\EQ{m:1}{\begin{split}
......@@ -27,7 +27,6 @@ This expression is called {\bf{Stress-Displacement}} formulation. Another common
\rm{\frac{\partial \epsilon_{ij}}{\partial t}}&\rm{=\frac{1}{2}\biggl(\frac{\partial v_i}{\partial x_j}+\frac{\partial v_j}{\partial x_i}\biggr)}
\end{split}}
This expression is called {\bf{Stress-Velocity}} formulation. For simple cases \ER{2:20} and \ER{2:20:1} can be solved analytically. More complex problems require numerical solutions. One possible approach for a numerical solution is described in the next section.
\newpage
\section{Solution of the elastic wave equation by finite differences}\label{elastic_FD_Code}
\subsection{Discretization of the wave equation}
For the numerical solution of the elastic equations of motion, Eqs. \ER{2:20} have to be discretized in time and space on a grid. The
......@@ -182,7 +181,7 @@ A comparison between the exponential damping and the PML boundary is shown in Fi
\begin{figure}[ht]
\begin{center}
\includegraphics[width=13cm]{figures/ABS_PML_comp_shots.pdf}
\caption{\label{comp_EXP_PML} Comparison between exponential damping (left column) and PML (right column) absorbing boundary conditions for a homogeneous full space model.}
\caption{\label{comp_EXP_PML} Comparison between exponential damping (left column) and PML (right column) absorbing boundary conditions for a homogeneous full space model. (\cite{koehn:11})}
\end{center}
\end{figure}
\end{enumerate}
......@@ -251,7 +250,7 @@ Holberg) of FD operators. For the Holberg coefficients n is calculated for a min
\epsfig{file=figures/homogenous_grid_n_16_5.pdf, width=6 cm}\epsfig{file=figures/homogenous_grid_n_16_10.pdf, width=6 cm}
\epsfig{file=figures/homogenous_grid_n_4_5.pdf, width=6 cm}\epsfig{file=figures/homogenous_grid_n_4_10.pdf, width=6 cm}
\epsfig{file=figures/homogenous_grid_n_2_5.pdf, width=6 cm}\epsfig{file=figures/homogenous_grid_n_2_10.pdf, width=6 cm}
\caption{\label{grid_disp_pics} The influence of grid dispersion in FD modeling: Spatial sampling of the wavefield using n=16 (top), n=4 (center) and n=2 gridpoints (bottom) per minimum wavelength $\rm{\lambda_{min}}$.}
\caption{\label{grid_disp_pics} The influence of grid dispersion in FD modeling: Spatial sampling of the wavefield using n=16 (top), n=4 (center) and n=2 gridpoints (bottom) per minimum wavelength $\rm{\lambda_{min}}$. (\cite{koehn:11})}
\end{center}
\end{figure}
\clearpage
......@@ -284,7 +283,7 @@ FDORDER & h (Taylor) & h (Holberg) \\ \hline
\begin{center}
\epsfig{file=figures/courandt_1.pdf, width=7 cm}\epsfig{file=figures/courandt_2.pdf, width=7 cm}
\epsfig{file=figures/courandt_3.pdf, width=7 cm}\epsfig{file=figures/courandt_4.pdf, width=7 cm}
\caption{\label{courandt_pics} Temporal evolution of the Courant instability. In the colored areas the wave amplitudes are extremly large.}
\caption{\label{courandt_pics} Temporal evolution of the Courant instability. In the colored areas the wave amplitudes are extremly large. (\cite{koehn:11})}
\end{center}
\end{figure}
\clearpage
% ------------------------------------
\chapter{The adjoint problem}
% ------------------------------------
This chapter is mainly based on the dissertation of Daniel Köhn (\cite{koehn:11}), who originally has written this manual.\\
\newline
The aim of full waveform tomography is to find an ''optimum'' model which can explain the data very well. It should not only explain the
first arrivals of specific phases of the seismic wavefield like refractions or reflections, but also the amplitudes which contain information
on the distribution of the elastic material parameters in the underground. To achieve this goal three problems have to be solved:
......@@ -31,7 +32,7 @@ has a special physical meaning. It represents the residual elastic energy contai
\begin{figure}[!bh]
\begin{center}
\includegraphics[width=15 cm]{figures/data_res_sketch_1.pdf}
\caption{Definition of data residuals $\rm{\mathbf{\delta u}}$.}
\caption{Definition of data residuals $\rm{\mathbf{\delta u}}$. (\cite{koehn:11})}
\label{sketch_data_res}
\end{center}
\end{figure}
......@@ -49,7 +50,7 @@ along the search direction $\rm{\mathbf{\delta m}_{1}}$ with the step length $\r
\begin{figure}[!bh]
\begin{center}
\includegraphics[width=12.5cm]{figures/sketch_grad_1.pdf}
\caption{Schematic sketch of the residual energy at one point in space as a function of two model parameters $\rm{m_1}$ and $\rm{m_2}$. The blue dot denotes the starting point in the parameter space, while the red cross marks a minimum of the objective function.}
\caption{Schematic sketch of the residual energy at one point in space as a function of two model parameters $\rm{m_1}$ and $\rm{m_2}$. The blue dot denotes the starting point in the parameter space, while the red cross marks a minimum of the objective function. (\cite{koehn:11})}
\label{sketch_grad}
\end{center}
\end{figure}
......@@ -105,7 +106,7 @@ Eq. \ER{dL2_dm} can be related to the mapping of small changes from the data to
\begin{figure}[ht]
\begin{center}
\includegraphics[width=10cm]{figures/mapping_data_model.pdf}
\caption{Mapping between model and data space and vice versa.}
\caption{Mapping between model and data space and vice versa. (\cite{koehn:11})}
\label{mapping_data_model}
\end{center}
\end{figure}
......@@ -424,7 +425,7 @@ The aim is to find the minimum of this function loacted at the point [1,1] which
\begin{center}
\includegraphics[width=15cm]{figures/Rosenbrock_1.pdf}\\
\includegraphics[width=15cm]{figures/Rosenbrock_2.pdf}\\
\caption{Results of the convergence test for the Rosenbrock function. The minimum is marked with a red cross, the starting point with a blue point. The maximum number of iterations is 16000. The step length $\rm{\mu_n}$ varies between $\rm{2e-3}$ (top) and $\rm{6.1e-3}$ (bottom).}
\caption{Results of the convergence test for the Rosenbrock function. The minimum is marked with a red cross, the starting point with a blue point. The maximum number of iterations is 16000. The step length $\rm{\mu_n}$ varies between $\rm{2e-3}$ (top) and $\rm{6.1e-3}$ (bottom). (\cite{koehn:11})}
\label{Rosenbrock_constant}
\end{center}
\end{figure}
......@@ -432,7 +433,7 @@ gradient of the Rosenbrock function is large. After reaching the narrow valley t
\begin{figure}[ht]
\begin{center}
\includegraphics[width=15cm]{figures/sl_case1_final}
\caption{Line search algorithm to find the optimum step length $\rm{\mu_{opt}}$: The true misfit function (yellow line) is approximated by a parabola fitted by 3 points.}
\caption{Line search algorithm to find the optimum step length $\rm{\mu_{opt}}$: The true misfit function (yellow line) is approximated by a parabola fitted by 3 points. (\cite{koehn:11})}
\label{sl_case1_final}
\end{center}
\end{figure}
......@@ -502,7 +503,7 @@ This approach leads to a smoother decrease of the objective function, but also i
\begin{figure}[ht]
\begin{center}
\includegraphics[width=16cm]{figures/Rosenbrock_3}
\caption{Results of the convergence test for the Rosenbrock function. The minimum is marked by a red cross, the starting point by a blue point. The maximum number of iterations is 4000. The optimum step length is calculated at each iteration by the parabola fitting algorithm. Note the criss-cross pattern of the updates in the narrow valley near the minimum.}
\caption{Results of the convergence test for the Rosenbrock function. The minimum is marked by a red cross, the starting point by a blue point. The maximum number of iterations is 4000. The optimum step length is calculated at each iteration by the parabola fitting algorithm. Note the criss-cross pattern of the updates in the narrow valley near the minimum. (\cite{koehn:11})}
\label{Rosenbrock_variable}
\end{center}
\end{figure}
......@@ -544,7 +545,7 @@ I use the very popular choice $\rm{\beta_n = max[0,\beta_n^{PR}]}$ which provide
\begin{figure}[ht]
\includegraphics[width=17cm]{figures/Rosenbrock_4}\\
\caption{Results of the convergence test for the Rosenbrock function using the conjugate gradient method, where the optimum step length is calculated with the parabolic fitting algorithm. The minimum is marked by a red cross, the starting point by a blue point. The maximum number of iterations is 2000.}
\caption{Results of the convergence test for the Rosenbrock function using the conjugate gradient method, where the optimum step length is calculated with the parabolic fitting algorithm. The minimum is marked by a red cross, the starting point by a blue point. The maximum number of iterations is 2000. (\cite{koehn:11})}
\label{Rosenbrock_cg}
\end{figure}
......
......@@ -230,7 +230,7 @@ Default value is:
With this option pure acoustic modelling and/or inversion can be performed (ACOUSTIC = 1). Only a P-wave and a density model need to be provided. Acoustic modelling and inversion can be a quick estimate, especially for marine environments.
For acoustic modelling the option VELOCITY is not available and only PARAMETERIZATION = 1 is possible.
For acoustic modelling only PARAMETERIZATION = 1 is possible.
\section{PSV and SH modelling}
{\color{blue}{\begin{verbatim}
......@@ -487,12 +487,12 @@ Default values are:
With the use of a workflow file, you can define different FWI stages. For instance, one FWI stage could refer to one corner frequency of a low-pass filter or to a specific time window. Every line in the workflow file corresponds to one FWI stage. To use a workflow file the switch USE\_WORKFLOW have to be set to 1. The algorithm will automatically change to the next line of the workflow file if the abort criterium of the current line is reached or if no step length could be found which reduces the misfit. The structure of the variables inside the workflow file is as follow: \\
\noindent
\begin{tabular}{llllllllllll}
\# & INV\_VS & INV\_VP & INV\_RHO & PRO & TIME\_FILT & FC & WAVETYPE & 0 & 0 & EPRECOND & EPSILON\_WE\\
\begin{tabular}{lllllllllllll}
\# & INV\_VS & INV\_VP & INV\_RHO & PRO & TIME\_FILT & F\_HIGH\_PASS & F\_LOW\_PASS & WAVETYPE & 0 & 0 & EPRECOND & EPSILON\_WE\\
\end{tabular}
\ \\
The first column is the number of the line. With INV\_* etc. you can activate the inversion for VS, VP or RHO, respectively. The abort criterium in percent for this FWI stage will be the declared in the variable PRO. With TIME\_FILT you can activate the frequency filtering with the corner frequency FC. WAVETYPE can be used to switch between PSV and SH modeling, however it is recommended to use PSV modeling (WAVETYPE==1) due to the current development on SH modeling. The following to zeros are placeholders for an upcoming update. With EPRECOND and EPSILON\_WE you can control the approx. Hessian. Please note, that all features which are used eg. TIME\_FILT (see section \ref{sec:filtering}) within the workflow have to be activated in the .JSON file.
The first column is the number of the line. With INV\_* etc. you can activate the inversion for VS, VP or RHO, respectively. The abort criterium in percent for this FWI stage will be the declared in the variable PRO. With TIME\_FILT you can activate the frequency filtering with the corner frequency F\_HIGH\_PASS of the high-pass and F\_LOW\_PASS of the low-pass filter. WAVETYPE can be used to switch between PSV and SH modeling, however it is recommended to use PSV modeling (WAVETYPE==1) due to the current development on SH modeling. The following to zeros are placeholders for an upcoming update. With EPRECOND and EPSILON\_WE you can control the approx. Hessian. Please note, that all features which are used eg. TIME\_FILT (see section \ref{sec:filtering}) within the workflow have to be activated in the .JSON file.
For an example of a workflow file, have a look in the par/ folder.
......@@ -549,11 +549,11 @@ The second criterium is defined as \citep{nocedal:1999}:
\begin{align}
\bigtriangledown f(x+\alpha \cdot p)^{T} \cdot p \geq c_2 \cdot\bigtriangledown f(x)^{T} \cdot p
\end{align}
To verify this condition the full gradient (with all shots!) of the updated model have to be calculated, so the verification of this condition is very expensive in terms of computation time. In theory always a step length of one should be tried first, however to save time it is possible to chose the step length from the iteration before which satisfied the wolfe condition. This behavior can be controlled with WOLFE\_TRY\_OLD\_STEPLENGTH, if this is set to 1 always the old step length will be tried first. In practice the experience has shown that at the beginning of a FWI stage a couple of step lengths will be tested and then the algorithm converges to a step length which will be accepted by the wolfe condition until the misfit cannot be reduced further. With the variable WOLFE\_NUM\_TEST a maximum number of test calculations can be defined. If after WOLFE\_NUM\_TEST tests no step length could be found, the algorithm takes the step length of the tests which reduced the misfit most, even if this step length does not satisfy the wolfe condition. If no step length was found which reduces the misfit, the algorithm will switch to the next FWI stage in the workflow file or abort the inversion.
To verify this condition the full gradient (with all shots!) of the updated model have to be calculated, so the verification of this condition is very expensive in terms of computation time. In theory always a step length of one should be tried first, however to save time it is possible to chose the step length from the iteration before which satisfied the Wolfe condition. This behavior can be controlled with WOLFE\_TRY\_OLD\_STEPLENGTH, if this is set to 1 always the old step length will be tried first. In practice the experience has shown that at the beginning of a FWI stage a couple of step lengths will be tested and then the algorithm converges to a step length which will be accepted by the Wolfe condition until the misfit cannot be reduced further. With the variable WOLFE\_NUM\_TEST a maximum number of test calculations can be defined. If after WOLFE\_NUM\_TEST tests no step length could be found, the algorithm takes the step length of the tests which reduced the misfit most, even if this step length does not satisfy the wolfe condition. If no step length was found which reduces the misfit, the algorithm will switch to the next FWI stage in the workflow file or abort the inversion.
The variable $c_1$ will be $10^{-7}\cdot \max(f(x)^{-1}$ and $c_2$ is set to 0.9. With WOLFE\_C1\_SL and WOLFE\_C2\_SL it is possible to manipulate $c_1$ and $c_2$ with the JSON input file. If the parameters are not set, they will be set to default values, so in most cases it should be the best to do not specify theses parameters in the JSON file.
A step length search which is based on the wolfe condition can be used which the switch WOLFE\_CONDITION. Please note that this step length search is only available if GRAD\_METHOD==2.
A step length search which is based on the Wolfe condition can be used which the switch WOLFE\_CONDITION. Please note that this step length search is only available if GRAD\_METHOD==2.
However, it is possible to chose a second step length search which the L-BFGS method. This one is available by the switch LBFGS\_STEP\_LENGTH=0. If you use LBFGS\_STEP\_LENGTH the wolfe condition based step length search will be deactivated automatically. Whit this search the step length 0.1, 0.5 and 1.0 will be tried and with a parabolic fit the best step length will be estimated. This search algorithm is implemented to save computation time, however the wolfe condition will not be checked, so the L-BFGS stability will not be satisfied. It is highly recommended to use the step length search based on the wolfe condition. It is likely that this option will be removed in a future releas.
However, it is possible to chose a second step length search while the L-BFGS method is used. This one is available by the switch LBFGS\_STEP\_LENGTH=0. If you use LBFGS\_STEP\_LENGTH the Wolfe condition based step length search will be deactivated automatically. With this search the step length 0.1, 0.5 and 1.0 will be tried and with a parabolic fit the best step length will be estimated. This search algorithm is implemented to save computation time, however the Wolfe condition will not be checked, so the L-BFGS stability will not be satisfied. It is \textbf{highly} recommended to use the step length search based on the Wolfe condition (LBFGS\_STEP\_LENGTH=1 and WOLFE\_CONDITION=1). Most likely the option LBFGS\_STEP\_LENGTH=0 will be removed in a future release.
\section{Step length estimation}
......@@ -637,8 +637,14 @@ To remove the contribution of the unknown source time function (STF) from the wa
"N_STF" : "10",
"N_STF_START" : "1",
"TAPER_STF" : "0",
"TRKILL_STF" : "0",
"TRKILL_FILE_STF" : "./trace_kill/trace_kill",
"TRKILL_STF_OFFSET" : "0",
"TRKILL_STF_OFFSET_LOWER" : "10",
"TRKILL_STF_OFFSET_UPPER" : "20",
"TRKILL_STF_OFFSET_INVERT" : "0",
\end{verbatim}}}
{\color{red}{\begin{verbatim}
......@@ -661,12 +667,12 @@ Due to the amplitude decay with offset to the source signals from receivers at l
The procedure is implemented in the Fourier domain. Fourier coefficients for the correction filter are constructed such that the residual between Fourier coefficients of recorded signals and Fourier coefficients of synthetic signals after application of the correction filter are minimized in a least-squares sense. The least-squares solution is subject to a damping constraint. If the weighted power spectral density of the synthetics at a given frequency drops below the l-th fraction (waterlevel parameter) of the average (over all frequencies) power spectral density, the filter coefficients are damped (artificially made smaller) by the damping constraint (i.e. waterlevel as seen from the perspective of deconvolution).
Start with l=0.01 as a reasonable initial value for the waterlevel. The best fitting waterlevel must be searched by trial and error and depends of signal noise and on how well the synthetics describe the actually observed wave propagtion.
Start with l=0.01 as a reasonable initial value for the water-level. The best fitting water-level must be searched by trial and error and depends of signal noise and on how well the synthetics describe the actually observed wave propagation.
The theory behind the Fourier domain least squares procedure is outlined by Lisa Groos (2013, Appendix F, page 146). She also describes a way to find an approrpiate waterlevel by application of the L-curve criterion (Groos, 2013, Appendix G, page 148).
The theory behind the Fourier domain least squares procedure is outlined by Lisa Groos (2013, Appendix F, page 146). She also describes a way to find an appropriate water-level by application of the L-curve criterion (Groos, 2013, Appendix G, page 148).
\newline
N\_STF is the increment between the iteration steps. N\_STF\_START defines at which iterationstep the inversion for STF should start. This parameter has to be set at least to 1 NOT(!) 0. When using TAPER\_STF = 1, the source signal is tapered. See \texttt{src/taper.c} for the taper definition. With TRKILL\_STF = 1 it is possible to apply a trace killing for the estimation of the source wavelet correction filter (see section \ref{sec:trace_killing}).
N\_STF is the increment between the iteration steps. N\_STF\_START defines at which iteration-step the inversion for STF should start. This parameter has to be set at least to 1 NOT(!) 0. When using TAPER\_STF = 1, the source signal is tapered. See \texttt{src/taper.c} for the taper definition. With TRKILL\_STF = 1 it is possible to apply a trace killing for the estimation of the source wavelet correction filter. Similar as for the normal trace killing (see section \ref{sec:trace_killing}) it is possible to use an offset based trace kill. To use this one, TRKILL\_STF\_OFFSET and TRKILL\_STF have to be set one. Than all offsets between TRKILL\_STF\_OFFSET\_LOWER (in meter) and TRKILL\_STF\_OFFSET\_UPPER (in meter) will be killed. The offset is calculated internally for each source - receiver combination. If TRKILL\_STF\_OFFSET is set to two, the trace kill file and the offset based trace kill will be combined. Additionally, the option TRKILL\_STF\_OFFSET\_INVERT allows to invert the sense of the offset based trace kill for the STF inversion. If TRKILL\_STF\_OFFSET\_INVERT is set to one, all offsets which do not lie inside the specified range will be killed. This means, the specified offset range will work like a band pass. However, it is not possible to combine a trace kill file and the offset based trace kill, if you invert the sense of the offset based trace kill. \textit{Example}: If you only want to use an offset range from ten to 20 meters for the STF inversion, you would have to set TRKILL\_STF and TRKILL\_STF\_OFFSET to one, TRKILL\_STF\_OFFSET\_LOWER to ten, TRKILL\_STF\_OFFSET\_UPPER to 20 and TRKILL\_STF\_OFFSET\_INVERT to one.
\newline
Please note: If you additionally switch on frequency filtering during the inversion (TIME\_FILT=1 or TIME\_FILT=2), the parameters N\_STF and N\_STF\_START will be ignored. But the optimal source time function will be inverted for the first iteration and after every change of the frequency range.
......@@ -680,10 +686,10 @@ For more information see chapter \ref{cha:STF-Inversion}.
{\color{blue}{\begin{verbatim}
"Frequency filtering during inversion" : "comment",
"TIME_FILT" : "0",
"F_HP" : "1",
"FC_START" : "10.0",
"FC_END" : "75.0",
"FC_INCR" : "10.0",
"F_HIGH_PASS" : "1",
"F_LOW_PASS_START" : "10.0",
"F_LOW_PASS_END" : "75.0",
"F_LOW_PASS_INCR" : "10.0",
"ORDER" : "2",
"ZERO_PHASE" : "0",
"FREQ_FILE" : "frequencies.dat",
......@@ -700,13 +706,13 @@ Default values are:
MIN_ITER=0
\end{verbatim}}}
TIME\_FILT = 1 can be set to use frequency filtering. The parameter FC\_START defines the corner frequency of the Butterworth low pass filter at the beginning of the inversion. The parameter FC\_END defines the maximum corner frequency used in the inversion. The parameter FC\_INCR controls in which steps the bandwidth is increased during the inversion.
TIME\_FILT = 1 can be set to use frequency filtering. The parameter F\_LOW\_PASS\_START defines the corner frequency of the Butterworth low pass filter at the beginning of the inversion. The parameter F\_LOW\_PASS\_END defines the maximum corner frequency used in the inversion. The parameter F\_LOW\_PASS\_INCR controls in which steps the bandwidth is increased during the inversion.
If TIME\_FILT = 2 individual frequencies for each step can be read from FREQ\_FILE. In this file the first entry must be the number of frequencies used for filtering. Each frequency in Hz has to be specified in a row. The example file frequencies.dat can be found in \texttt{trunk/par}.
The parameter ORDER defines the order of the Butterworth low pass filter. If the variable ZERO\_PHASE is set to one a zero phase filter is applied. It is realized by filtering the traces in both forward and reverse direction with the defined Butterworth low pass filter. Therefore, the effective order of the low pass filter is doubled.
With F\_HP an additional high pass filter can be applied, where F\_HP is the corner frequency in Hz.
With F\_HIGH\_PASS an additional high pass filter can be applied, where F\_HIGH\_PASS is the corner frequency in Hz.
With the parameter PRO (see~\ref{json:abort_criterion}) one has to adjust the criterion that defines at which points the bandwidth of the signals are increased.
......@@ -734,7 +740,7 @@ To apply time windowing in a time series the paramter TIMEWIN must set to 1. A a
The parameters TWLENGTH\_PLUS and TWLENGTH\_MINUS specify the length of the time window after (PLUS) and before (MINUS) the picked time. The unit is seconds. The damping factor GAMMA must be set individually.
When usig TW\_IND = 1 three columns are expected in PICK\_FILE for each trace with the picked time, time window length plus and time window length minus. In this case TWLENGTH\_PLUS and TWLENGTH\_MINUS are ignored.
When using TW\_IND = 1 three columns are expected in PICK\_FILE for each trace with the picked time (PT), time window length plus (TWLP) and time window length minus (TWLM). In this case TWLENGTH\_PLUS and TWLENGTH\_MINUS are ignored. It is also possible to use two time windows for each trace which can be utilized with TW\_IND = 2. PICK\_FILE must then contain six columns with values for PT\_1, TWLP\_1, TWLM\_1, PT\_2, TWLP\_2 and TWLM\_2 for each trace.
If you use the option "Workflow" (section \ref{sec:workflow}) it is possible to define separate files for each workflow stage. They have to be named PICKS\_FILE\_<sourcenumber>\_<workflowstage>.dat. If you don't provide separate files for some or all stages the standard file (PICKS\_FILE\_<sourcenumber>.dat) is used. This means a standard file should exist in any case.
......@@ -759,7 +765,8 @@ If you don't want to use single traces or a subset of your data, the parameter T
If you use the option "Workflow" (section \ref{sec:workflow}) it is possible to define separate files for each workflow stage. They have to be named TRKILL\_FILE\_<workflowstage>.dat. If you don't provide separate files for some or all stages the standard file (TRKILL\_FILE.dat) is used. This means a standard file should exist in any case.
Moreover, you can directly define a offset range that should be killed. Simply set TRKILL\_OFFSET and TRKILL to one and all traces with a offset between TRKILL\_OFFSET\_LOWER and TRKILL\_OFFSET\_UPPER will be killed.
Moreover, you can directly define an offset range that should be killed. Simply set TRKILL\_OFFSET and TRKILL to one and all traces with an offset between TRKILL\_OFFSET\_LOWER (in meter) and TRKILL\_OFFSET\_UPPER (in meter) will be killed. The offset is internally calculated for each source - receiver combination. In this case the trace kill file will be ignored. To use both, the trace kill file and the offset based trace kill, you have to set TRKILL\_OFFSET to two. Hereby, if you use a workflow, the same naming convention as described above will be utilized.
\textit{Example}: You want to kill the near offset for each source, lets say you only want to use traces with an offset greater than ten meters. Therefore, you would have to set TRKILL\_OFFSET and TRKILL to one, TRKILL\_OFFSET\_LOWER to zero and TRKILL\_OFFSET\_UPPER to ten. If you want to use additionally a trace kill file, switch TRKILL\_OFFSET to two and the trace kill file in TRKILL\_FILE will be applied also.
\section{Gradient manipulation}
\subsection{Definition of a gradient taper}
......@@ -833,7 +840,7 @@ For GRAD\_FILT\_WAVELENGTH = 1 (and TIME\_FILT=1) a new wavelength dependent fil
\begin{equation}
\mbox{FILT\_SIZE\_GRAD} = \frac{V_{s,\text{average}}\cdot \text{A}}{\text{FC}}
\end{equation}
where FC is the corner frequency of TIME\_FILT and A is a weighting factor.
where F\_LOW\_PASS is the corner frequency of TIME\_FILT and A is a weighting factor.
\section{Model manipulation}
......
......@@ -112,10 +112,12 @@
\section*{Authors}
The IFOS2D code (formerly DENISE) was at first developed by Daniel K\"ohn, Denise De Nil and Andr$\rm{\acute{e}}$ Kurzmann at the Christian-Albrechts-Universit\"at Kiel and TU Bergakademie Freiberg (Germany) from 2005 to 2009.\\
\newline
This documentation was originally written by Daniel K\"ohn.\\ Large parts of the shown theory is extracted from his dissertation \cite{koehn:11}.\\
\newline
The forward code is based on the viscoelastic FD code fdveps (now SOFI2D) by \cite{bohlen:02}.\\
\newline
Different external libraries for timedomain filtering are used.\\
Different external libraries for time domain filtering are used.\\
The copyright of the source codes are held by different persons:\\
\newline
cseife.c, cseife.h, lib\_stfinv, lib\_aff, lib\_fourier:\\
......@@ -160,13 +162,14 @@ The Authors of IFOS2D are listed in file \path{AUTHORS}.
%------------------------------------------------------------------------------------------------%
We thank for constructive discussions and further code improvements:\\
\newline
\newline
Daniel K\"ohn (Christian-Albrechts-Universit\"at Kiel), \\
Anna Przebindowska (Karlsruhe Institute of Technology), \\
Olaf Hellwig (TU Bergakademie Freiberg), \\
Dennis Wilken and Wolfgang Rabbel (Christian-Albrechts-Universit\"at Kiel).
\newline
\noindent The development of the code was suppported by the Christian-Albrechts-Universität Kiel, TU Bergakademie Freiberg, Deutsche Forschungsgemeinschaft (DFG), Bundesministerium für Bildung und Forschung (BMBF), the Wave Inversion Technology (WIT) Consortium and the Verbundnetz-Gas AG (VNG).
\noindent The development of the code was supported by the Christian-Albrechts-Universität Kiel, TU Bergakademie Freiberg, Deutsche Forschungsgemeinschaft (DFG), Bundesministerium für Bildung und Forschung (BMBF), the Wave Inversion Technology (WIT) Consortium and the Verbundnetz-Gas AG (VNG).
\noindent The code was tested and optimized at the computing centres of Kiel University, TU Bergakademie Freiberg, TU Chemnitz, TU Dresden, the Karlsruhe Institute of Technology (KIT) and the Hochleistungsrechenzentrum Nord (HLRN 1+2).
\newline
......
......@@ -154,19 +154,21 @@
"N_STF" : "10",
"N_STF_START" : "1",
"TAPER_STF" : "0",
"TRKILL_STF" : "0",
"TRKILL_FILE_STF" : "./trace_kill/trace_kill",
"TRKILL_STF_OFFSET" : "0",
"TRKILL_STF_OFFSET_LOWER , TRKILL_STF_OFFSET_UPPER" : "0 , 10",
"TRKILL_FILE_STF" : "./trace_kill/trace_kill",
"TRKILL_STF_OFFSET_INVERT" : "0",
"Frequency filtering during inversion" : "comment",
"TIME_FILT" : "0",
"F_HP" : "1",
"FC_START" : "10.0",
"FC_END" : "75.0",
"FC_INCR" : "10.0",
"F_HIGH_PASS" : "1",
"F_LOW_PASS_START" : "10.0",
"F_LOW_PASS_END" : "75.0",
"F_LOW_PASS_INCR" : "10.0",
"ORDER" : "2",
"ZERO_PHASE" : "0",
"FREQ_FILE" : "frequencies.dat",
"WRITE_FILTERED_DATA" : "0",
......@@ -183,9 +185,10 @@
"Trace killing" : "comment",
"TRKILL" : "0",
"TRKILL_FILE" : "./trace_kill/trace_kill",
"TRKILL_OFFSET" : "0",
"TRKILL_OFFSET_LOWER , TRKILL_OFFSET_UPPER" : "0 , 10",
"TRKILL_FILE" : "./trace_kill/trace_kill",
"Definition of a gradient taper" : "comment",
"SWS_TAPER_GRAD_VERT" : "0",
......
......@@ -127,8 +127,8 @@
"Frequency filtering during inversion" : "comment",
"TIME_FILT" : "1",
"FC_START" : "10.0",
"FC_END" : "70.0",
"FC_INCR" : "10.0",
"F_LOW_PASS_END" : "70.0",
"F_LOW_PASS_INCR" : "10.0",
"ORDER" : "4",
"Minimum number of iteration per frequency" : "comment",
......
......@@ -139,9 +139,9 @@
"Frequency filtering during inversion" : "comment",
"TIME_FILT" : "0",
"FC_START" : "5.0",
"FC_END" : "5.0",
"FC_INCR" : "10.0",
"F_LOW_PASS_START" : "5.0",
"F_LOW_PASS_END" : "5.0",
"F_LOW_PASS_INCR" : "10.0",
"ORDER" : "4",
"Minimum number of iteration per frequency" : "comment",
......
......@@ -136,10 +136,10 @@
"Frequency filtering during inversion" : "comment",
"TIME_FILT" : "0",
"F_HP" : "1",
"FC_START" : "10.0",
"FC_END" : "70.0",
"FC_INCR" : "10.0",
"F_HIGH_PASS" : "1",
"F_LOW_PASS_START" : "10.0",
"F_LOW_PASS_END" : "70.0",
"F_LOW_PASS_INCR" : "10.0",
"ORDER" : "4",
"ZERO_PHASE" : "1",
......
# VS VP RHO PRO FIL FC WT J J EPRE EPSI
1 1 0 0 0.01 0 0 2 0 0 0 0.005
2 1 0 0 0.01 0 0 2 0 0 0 0.005
3 1 0 0 0.01 0 0 2 0 0 0 0.005
4 1 0 0 0.01 0 0 2 0 0 0 0.005
# VS VP RHO PRO F_FILT F_HIGH F_LOW WT J J EPRE EPSI
1 1 0 0 0.01 0 0 0 2 0 0 0 0.005
2 1 0 0 0.01 0 0 0 2 0 0 0 0.005
3 1 0 0 0.01 0 0 0 2 0 0 0 0.005
4 1 0 0 0.01 0 0 0 2 0 0 0 0.005
......@@ -17,7 +17,7 @@
-----------------------------------------------------------------------------------------*/
/* ----------------------------------------------------------------------
* This is program IFOS Version 2.0.1
* This is program IFOS Version 2.0.2
* Inversion of Full Observerd Seismograms
*
* ----------------------------------------------------------------------*/
......@@ -58,7 +58,7 @@ int main(int argc, char **argv){
float L2_SH, L2sum_SH, L2_all_shots_SH, L2sum_all_shots_SH;
// Pointer for dynamic wavefields:
float ** psxx, ** psxy, ** psyy, ** psxz, ** psyz, **psp, ** ux, ** uy, ** uxy, ** uyx, ** Vp0, ** uttx, ** utty, ** Vs0, ** Rho0;
float ** psxx, ** psxy, ** psyy, ** psxz, ** psyz, **psp, ** ux, ** uy, ** uxy, ** uyx, ** u, ** Vp0, ** uttx, ** utty, ** Vs0, ** Rho0;
float ** pvx, ** pvy, ** pvz, **waveconv, **waveconv_lam, **waveconv_mu, **waveconv_rho, **waveconv_rho_s, **waveconv_u, **waveconvtmp, **wcpart, **wavejac,**waveconv_rho_s_z,**waveconv_u_z,**waveconv_rho_z;
float **waveconv_shot, **waveconv_u_shot, **waveconv_rho_shot, **waveconv_u_shot_z, **waveconv_rho_shot_z;
float ** pvxp1, ** pvyp1, ** pvzp1, ** pvxm1, ** pvym1, ** pvzm1;
......@@ -99,6 +99,9 @@ int main(int argc, char **argv){
int SOURCE_SHAPE_OLD=0;
int SOURCE_SHAPE_OLD_SH=0;
/* Variables for conjungate gradient */
int PCG_iter_start=1;
/* Variables for L-BFGS */
int LBFGS_NPAR=3;
int LBFGS_iter_start=1;
......@@ -149,8 +152,8 @@ int main(int argc, char **argv){
WORKFLOW_STAGE=1;
/* variable for time domain filtering */
float FC;
float *FC_EXT=NULL;
float F_LOW_PASS;
float *F_LOW_PASS_EXT=NULL;
int nfrq=0;
int FREQ_NR=1;
......@@ -222,9 +225,6 @@ int main(int argc, char **argv){
at the receiver positions to calculate the adjoint sources and to do
the backpropagation; look at function saveseis_glob.c to see that every
NDT sample for the forward modeled wavefield is written to su files*/
lsnap=iround(TSNAP1/DT); /* first snapshot at this timestep */
lsamp=NDT;
/* output of parameters to log-file or stdout */
if (MYID==0) write_par(FP);
......@@ -408,6 +408,8 @@ int main(int argc, char **argv){
uxz = matrix(-nd+1,NY+nd,-nd+1,NX+nd);
uyz = matrix(-nd+1,NY+nd,-nd+1,NX+nd);
}
}else{
u = matrix(-nd+1,NY+nd,-nd+1,NX+nd);
}
switch (WAVETYPE) {
......@@ -504,13 +506,7 @@ int main(int argc, char **argv){
cjm = vector(1,L);
}
/*nf=4;
nfstart=4;*/
NTST=20;
NTSTI=NTST/DTINV;
nxny=NX*NY;
nxnyi=(NX/IDXI)*(NY/IDYI);
/* Parameters for step length calculations */
......@@ -907,9 +903,9 @@ int main(int argc, char **argv){
RECINC=1;
switch(TIME_FILT){
case 1: FC=FC_START; break;
case 1: F_LOW_PASS=F_LOW_PASS_START; break;
/*read frequencies from file*/
case 2: FC_EXT=filter_frequencies(&nfrq); FC=FC_EXT[FREQ_NR]; break;
case 2: F_LOW_PASS_EXT=filter_frequencies(&nfrq); F_LOW_PASS=F_LOW_PASS_EXT[FREQ_NR]; break;
}
/* Save old SOURCE_SHAPE, which is needed for STF */
......@@ -927,7 +923,7 @@ int main(int argc, char **argv){
// At each iteration the workflow is applied
if(USE_WORKFLOW&&(FORWARD_ONLY==0)){
apply_workflow(workflow,workflow_lines,workflow_header,&iter,&FC,wavetype_start,&change_wavetype_iter,&LBFGS_iter_start);
apply_workflow(workflow,workflow_lines,workflow_header,&iter,&F_LOW_PASS,wavetype_start,&change_wavetype_iter,&LBFGS_iter_start);
}
......@@ -956,6 +952,8 @@ int main(int argc, char **argv){
gradient_optimization=1;
}
/* Reset fail status of parabolic step length search */
step3=0;
}
if (MYID==0){
......@@ -1084,7 +1082,7 @@ int main(int argc, char **argv){
if (TIME_FILT==0){
fprintf(FPL2,"opteps_vp \t epst1[1] \t epst1[2] \t epst1[3] \t L2t[1] \t L2t[2] \t L2t[3] \t L2t[4] \n");}
else{
fprintf(FPL2,"opteps_vp \t epst1[1] \t epst1[2] \t epst1[3] \t L2t[1] \t L2t[2] \t L2t[3] \t L2t[4] \t FC \n");
fprintf(FPL2,"opteps_vp \t epst1[1] \t epst1[2] \t epst1[3] \t L2t[1] \t L2t[2] \t L2t[3] \t L2t[4] \t F_LOW_PASS \n");
}
}
}
......@@ -1309,7 +1307,7 @@ int main(int argc, char **argv){
if(!ACOUSTIC) {
update_s_elastic_PML(1, NX, 1, NY, pvx, pvy, ux, uy, uxy, uyx, psxx, psyy, psxy, ppi, pu, puipjp, absorb_coeff, prho, hc, infoout, K_x, a_x, b_x, K_x_half, a_x_half, b_x_half, K_y, a_y, b_y, K_y_half, a_y_half, b_y_half, psi_vxx, psi_vyy, psi_vxy, psi_vyx);
} else {
update_p_PML(1, NX, 1, NY, pvx, pvy, psp, ppi, absorb_coeff, prho, hc, infoout, K_x, a_x, b_x, K_x_half, a_x_half, b_x_half, K_y, a_y, b_y, K_y_half, a_y_half, b_y_half, psi_vxx, psi_vyy, psi_vxy, psi_vyx);
update_p_PML(1, NX, 1, NY, pvx, pvy, psp, u, ppi, absorb_coeff, prho, hc, infoout, K_x, a_x, b_x, K_x_half, a_x_half, b_x_half, K_y, a_y, b_y, K_y_half, a_y_half, b_y_half, psi_vxx, psi_vyy, psi_vxy, psi_vyx);
}
}
if (WAVETYPE==2 || WAVETYPE==3) {
......@@ -1447,21 +1445,21 @@ int main(int argc, char **argv){
if(WAVETYPE==1 || WAVETYPE==3){
if ((ADJOINT_TYPE==1)|| (ADJOINT_TYPE==2)){
inseis(fprec,ishot,sectionvy_obs,ntr_glob,ns,2,iter);
timedomain_filt(sectionvy_obs,FC,ORDER,ntr_glob,ns,1);
timedomain_filt(sectionvy_obs,F_LOW_PASS,ORDER,ntr_glob,ns,1);
}
if (ADJOINT_TYPE==4){
inseis(fprec,ishot,sectionp_obs,ntr_glob,ns,9,iter);
timedomain_filt(sectionp_obs,FC,ORDER,ntr_glob,ns,1);
timedomain_filt(sectionp_obs,F_LOW_PASS,ORDER,ntr_glob,ns,1);
}
}
if(WAVETYPE==2 || WAVETYPE==3){
inseis(fprec,ishot,sectionvz_obs,ntr_glob,ns,10,iter);
timedomain_filt(sectionvz_obs,FC,ORDER,ntr_glob,ns,1);
timedomain_filt(sectionvz_obs,F_LOW_PASS,ORDER,ntr_glob,ns,1);
}
printf("\n ====================================================================================================== \n");
printf("\n Time Domain Filter is used for the inversion: lowpass filter, corner frequency of %.2f Hz, order %d\n",FC,ORDER);
printf("\n Time Domain Filter is used for the inversion: lowpass filter, corner frequency of %.2f Hz, order %d\n",F_LOW_PASS,ORDER);
printf("\n ====================================================================================================== \n");
if(iter==1){
......@@ -1475,15 +1473,15 @@ int main(int argc, char **argv){
if(WAVETYPE==1 || WAVETYPE==3){
if ((ADJOINT_TYPE==1)|| (ADJOINT_TYPE==2)){
stf(FP,fulldata_vy,sectionvy_obs,sectionvy_conv,source_time_function,recpos,recpos_loc,ntr_glob,ntr,srcpos,ishot,ns,iter,nsrc_glob,FC,0,nsrc_glob);
stf(FP,fulldata_vy,sectionvy_obs,sectionvy_conv,source_time_function,recpos,recpos_loc,ntr_glob,ntr,srcpos,ishot,ns,iter,nsrc_glob,F_LOW_PASS,0,nsrc_glob);
}
if (ADJOINT_TYPE==4){
stf(FP,fulldata_p,sectionp_obs,sectionp_conv,source_time_function,recpos,recpos_loc,ntr_glob,ntr,srcpos,ishot,ns,iter,nsrc_glob,FC,0,nsrc_glob);
stf(FP,fulldata_p,sectionp_obs,sectionp_conv,source_time_function,recpos,recpos_loc,ntr_glob,ntr,srcpos,ishot,ns,iter,nsrc_glob,F_LOW_PASS,0,nsrc_glob);
}
}
if(WAVETYPE==2 || WAVETYPE==3){
stf(FP,fulldata_vz,sectionvz_obs,sectionvz_conv,source_time_function,recpos,recpos_loc,ntr_glob,ntr,srcpos,ishot,ns,iter,nsrc_glob,FC,1,nsrc_glob);
stf(FP,fulldata_vz,sectionvz_obs,sectionvz_conv,source_time_function,recpos,recpos_loc,ntr_glob,ntr,srcpos,ishot,ns,iter,nsrc_glob,F_LOW_PASS,1,nsrc_glob);
}
......@@ -1513,15 +1511,15 @@ int main(int argc, char **argv){
}
if ((ADJOINT_TYPE==1)|| (ADJOINT_TYPE==2)){
stf(FP,fulldata_vy,sectionvy_obs,sectionvy_conv,source_time_function,recpos,recpos_loc,ntr_glob,ntr,srcpos,ishot,ns,iter,nsrc_glob,FC,0,nsrc_glob);
stf(FP,fulldata_vy,sectionvy_obs,sectionvy_conv,source_time_function,recpos,recpos_loc,ntr_glob,ntr,srcpos,ishot,ns,iter,nsrc_glob,F_LOW_PASS,0,nsrc_glob);
}
if (ADJOINT_TYPE==4){
stf(FP,fulldata_p,sectionp_obs,sectionp_conv,source_time_function,recpos,recpos_loc,ntr_glob,ntr,srcpos,ishot,ns,iter,nsrc_glob,FC,0,nsrc_glob);
stf(FP,fulldata_p,sectionp_obs,sectionp_conv,source_time_function,recpos,recpos_loc,ntr_glob,ntr,srcpos,ishot,ns,iter,nsrc_glob,F_LOW_PASS,0,nsrc_glob);
}
}
if(WAVETYPE==2 || WAVETYPE==3){
inseis(fprec,ishot,sectionvz_obs,ntr_glob,ns,10,iter);
stf(FP,fulldata_vz,sectionvz_obs,sectionvz_conv,source_time_function,recpos,recpos_loc,ntr_glob,ntr,srcpos,ishot,ns,iter,nsrc_glob,FC,1,nsrc_glob);
stf(FP,fulldata_vz,sectionvz_obs,sectionvz_conv,source_time_function,recpos,recpos_loc,ntr_glob,ntr,srcpos,ishot,ns,iter,nsrc_glob,F_LOW_PASS,1,nsrc_glob);
}
}
}
......@@ -1592,11 +1590,11 @@ int main(int argc, char **argv){
/*------------------------------------------------------------------------------*/
if (((TIME_FILT==1) || (TIME_FILT==2)) && (SOURCE_SHAPE!=6) && (INV_STF==0)){
fprintf(FP,"\n Time Domain Filter applied: Lowpass with corner frequency of %.2f Hz, order %d\n",FC,ORDER);
fprintf(FP,"\n Time Domain Filter applied: Lowpass with corner frequency of %.2f Hz, order %d\n",F_LOW_PASS,ORDER);
/*time domain filtering of the source signal */
if(WAVETYPE==1||WAVETYPE==3) timedomain_filt(signals,FC,ORDER,nsrc_loc,ns,1);
if(WAVETYPE==2||WAVETYPE==3) timedomain_filt(signals_SH,FC,ORDER,nsrc_loc,ns,1);
if(WAVETYPE==1||WAVETYPE==3) timedomain_filt(signals,F_LOW_PASS,ORDER,nsrc_loc,ns,1);
if(WAVETYPE==2||WAVETYPE==3) timedomain_filt(signals_SH,F_LOW_PASS,ORDER,nsrc_loc,ns,1);
}
/*------------------------------------------------------------------------------*/
......@@ -1752,7 +1750,7 @@ int main(int argc, char **argv){
if(!ACOUSTIC) {
update_s_elastic_PML(1, NX, 1, NY, pvx, pvy, ux, uy, uxy, uyx, psxx, psyy, psxy, ppi, pu, puipjp, absorb_coeff, prho, hc, infoout, K_x, a_x, b_x, K_x_half, a_x_half, b_x_half, K_y, a_y, b_y, K_y_half, a_y_half, b_y_half, psi_vxx, psi_vyy, psi_vxy, psi_vyx);
} else {
update_p_PML(1, NX, 1, NY, pvx, pvy, psp, ppi, absorb_coeff, prho, hc, infoout, K_x, a_x, b_x, K_x_half, a_x_half, b_x_half, K_y, a_y, b_y, K_y_half, a_y_half, b_y_half, psi_vxx, psi_vyy, psi_vxy, psi_vyx);
update_p_PML(1, NX, 1, NY, pvx, pvy, psp, u, ppi, absorb_coeff, prho, hc, infoout, K_x, a_x, b_x, K_x_half, a_x_half, b_x_half, K_y, a_y, b_y, K_y_half, a_y_half, b_y_half, psi_vxx, psi_vyy, psi_vxy, psi_vyx);
}
}
if (WAVETYPE==2 || WAVETYPE==3) {
......@@ -1842,7 +1840,11 @@ int main(int argc, char **argv){
}else{
for (j=1;j<=NY;j=j+IDYI){
for (i=1;i<=NX;i=i+IDXI){
forward_prop_p[j][i][hin]=psp[j][i];
if(VELOCITY==0){
forward_prop_p[j][i][hin]=psp[j][i];
}else{
forward_prop_p[j][i][hin]=u[j][i];
}
}
}
}
......@@ -1989,7 +1991,7 @@ int main(int argc, char **argv){
if((ADJOINT_TYPE==1)||(ADJOINT_TYPE==3)){ /* if ADJOINT_TYPE */
inseis(fprec,ishot,sectionread,ntr_glob,ns,1,iter);
if ((TIME_FILT==1 )|| (TIME_FILT==2)){
timedomain_filt(sectionread,FC,ORDER,ntr_glob,ns,1);
timedomain_filt(sectionread,F_LOW_PASS,ORDER,ntr_glob,ns,1);
}
h=1;
for(i=1;i<=ntr;i++){
......@@ -2012,7 +2014,7 @@ int main(int argc, char **argv){
if((ADJOINT_TYPE==1)||(ADJOINT_TYPE==2)){ /* if ADJOINT_TYPE */
inseis(fprec,ishot,sectionread,ntr_glob,ns,2,iter);
if ((TIME_FILT==1 )|| (TIME_FILT==2)){
timedomain_filt(sectionread,FC,ORDER,ntr_glob,ns,1);
timedomain_filt(sectionread,F_LOW_PASS,ORDER,ntr_glob,ns,1);
}
h=1;
for(i=1;i<=ntr;i++){
......@@ -2034,7 +2036,7 @@ int main(int argc, char **argv){
if(ADJOINT_TYPE==4){ /* if ADJOINT_TYPE */
inseis(fprec,ishot,sectionread,ntr_glob,ns,9,iter);
if ((TIME_FILT==1 )|| (TIME_FILT==2)){
timedomain_filt(sectionread,FC,ORDER,ntr_glob,ns,1);
timedomain_filt(sectionread,F_LOW_PASS,ORDER,ntr_glob,ns,1);
}
h=1;
for(i=1;i<=ntr;i++){
......@@ -2056,7 +2058,7 @@ int main(int argc, char **argv){
if(WAVETYPE==2 || WAVETYPE==3){
inseis(fprec,ishot,sectionread,ntr_glob,ns,10,iter);
if ((TIME_FILT==1 )|| (TIME_FILT==2)){
timedomain_filt(sectionread,FC,ORDER,ntr_glob,ns,1);
timedomain_filt(sectionread,F_LOW_PASS,ORDER,ntr_glob,ns,1);
}
h=1;
for(i=1;i<=ntr;i++){
......@@ -2260,7 +2262,7 @@ int main(int argc, char **argv){
update_s_elastic_PML_SH(1, NX, 1, NY, pvz,psxz,psyz,uxz,uyz,hc,infoout, K_x, a_x, b_x, K_x_half, a_x_half, b_x_half, K_y, a_y, b_y, K_y_half, a_y_half, b_y_half,psi_vzx, psi_vzy,puipjp,pu,prho);
}
} else {
update_p_PML(1, NX, 1, NY, pvx, pvy, psp, ppi, absorb_coeff, prho, hc, infoout, K_x, a_x, b_x, K_x_half, a_x_half, b_x_half, K_y, a_y, b_y, K_y_half, a_y_half, b_y_half, psi_vxx, psi_vyy, psi_vxy, psi_vyx);
update_p_PML(1, NX, 1, NY, pvx, pvy, psp, u, ppi, absorb_coeff, prho, hc, infoout, K_x, a_x, b_x, K_x_half, a_x_half, b_x_half, K_y, a_y, b_y, K_y_half, a_y_half, b_y_half, psi_vxx, psi_vyy, psi_vxy, psi_vyx);
}
}
......@@ -2424,11 +2426,13 @@ int main(int argc, char **argv){
muss = 0;
lamss = prho[j][i] * ppi[j][i] * ppi[j][i] - 2.0 * muss;
waveconv_lam[j][i] = (1.0/(4.0 * (lamss+muss) * (lamss+muss))) * waveconv_lam[j][i];
if(!ACOUSTIC)
waveconv_lam[j][i] = (1.0/(4.0 * (lamss+muss) * (lamss+muss))) * waveconv_lam[j][i];
else
waveconv_lam[j][i] = (1.0/((lamss+muss) * (lamss+muss))) * waveconv_lam[j][i];
/* calculate Vp gradient */
waveconv_shot[j][i] = 2.0 * ppi[j][i] * prho[j][i] * waveconv_lam[j][i];
// waveconv_shot[j][i] = waveconv_lam[j][i];
}
if(PARAMETERIZATION==2){
......@@ -2900,17 +2904,25 @@ int main(int argc, char **argv){
/* ----------------------------------------------------------------------*/
/* ----------- Preconditioned Conjugate Gradient Method (PCG) ----------*/
/* ----------------------------------------------------------------------*/
if((GRAD_METHOD==1)){
if(GRAD_METHOD==1){
if( (iter-PCG_iter_start) < 2 ) {
fprintf(FP,"\n\n ----- Conjugate Gradient Method -----");
fprintf(FP,"\n Will not use second last gradient for conjugate gradient");
if( (iter-PCG_iter_start) < 1 ) {
fprintf(FP,"\n Will not use last gradient for conjugate gradient");
}