Commit 5e029cd5 authored by niklas.baumgarten's avatar niklas.baumgarten
Browse files

worked on slides

parent c78abf6a
Pipeline #106771 failed with stages
in 5 seconds
No preview for this file type
......@@ -18,10 +18,7 @@
\section*{Monte Carlo methods}
\input{src/mlmc}
\input{src/experimental_setup}
\section*{Intro}
\input{src/numerical_results}
% \input{src/experimental_setup}
\section*{Stochastic Linear Transport problem}
......
\begin{frame}{Multilevel Monte Carlo Method I}
\begin{frame}{Introduction and Example I}
\begin{itemize}
\item \underline{Problem:} Let $u(\omega)$ be a random
PDE solution on $\Omega$ and let $\goal(\omega)$ be some functional of $u(\omega)$.
Estimate $\EE[\goal(\omega)]$.
Estimate $\EE[\goal(\omega)]$
\item \underline{Assumptions:} Let $u_h(\omega, x) \in V_h$ be the
corresponding FEM solution with convergence rate $\alpha > 0$, i.e.
\item \underline{Example (1D Elliptic Model Problem):} Let $D \subset \RR^{d=1}$.
Search for $u \in V$, such that
\begin{align*}
- (\kappa(\omega,x) u'(\omega,x))' = 0, \quad
u'(0) = 1, \quad
u(1) = 0
\end{align*}
with $\kappa(\omega, x) = \log(g(\omega, x))$, where $g$ is a Gaussian field
\item
\end{itemize}
\end{frame}
\begin{frame}{Assumptions}
\begin{itemize}
\item \underline{FEM (Finite Element Method:)} Let $u_h(\omega, x) \in V_h$ be the
corresponding FEM solution to $u(\omega, x)$ and $\goal_h(\omega)$ be the
functional
\item \underline{Assumptions:} The FEM method is convergent with convergence
rate $\alpha > 0$, i.e.
\begin{equation}
\label{eq:alpha-assumption}
\abs{\EE[\goal_h - \goal]} \lesssim h^\alpha, \quad
\abs{\EE[\goal_h - \goal]} \lesssim N^{-\alpha / d}, \quad
N = \dim(V_h),
N = \dim(V_h)
\end{equation}
the cost for one sample can be bounded with $\gamma > 0$ by
The cost for one sample can be bounded with $\gamma > 0$ by
\begin{equation}
\label{eq:gamma-assumption}
\cost(\goal_h(\omega_m)) \lesssim h^{-\gamma}, \quad
\cost(\goal_h(\omega_m)) \lesssim N^{\gamma / d}, \quad
\omega_m \in \Omega
\end{equation}
and the variance of $\goal_l - \goal_{l-1}$ decays with $\beta > 0$
The variance of $\goal_l - \goal_{l-1}$ decays with $\beta > 0$
\begin{equation}
\label{eq:beta-assumption}
\abs{\VV[\goal_l - \goal_{l-1}]} \lesssim h^\beta, \quad
......@@ -28,28 +46,13 @@
\end{itemize}
\end{frame}
\begin{frame}{Examples I}
\begin{itemize}
\item \underline{Elliptic Model Problem:} Let $D \subset \RR^d$. Search for $u
\in V$, such that
\begin{equation}
\label{eq:model_problem}
- \div(\kappa(\omega,x) \nabla u(\omega,x)) = f(\omega,x)
\end{equation}
with Neumann and Dirichlet boundary conditions.
\item Simple 1D Problem already with results?
\item Vielleicht gemittelte Lösung mit unterschiedlichen epsilon
\end{itemize}
\end{frame}
\begin{frame}{Multilevel Monte Carlo Method II}
\begin{frame}{Monte Carlo Estimator}
\begin{itemize}
\item \underline{MC Estimator:} Draw $\omega_m \in \Omega$ and compute
\item \underline{MC Estimator:} Draw $\omega_m \in\Omega$ and compute
\begin{align*}
\widehat{\goal}_{h,M}^{MC} = \frac{1}{M} \sum_{m=1}^M \goal_h(\omega_m)
\end{align*}
\item \underline{RMSE (Root mean square error):}
\item \underline{RMSE (Root Mean Square Error):}
\begin{align*}
e(\widehat{\goal}^{MC}_{h,M})^2 =
\EE \left[ (\widehat{\goal}^{MC}_{h,M} - \EE[\goal])^2 \right] =
......@@ -67,13 +70,20 @@
\end{itemize}
\end{frame}
\begin{frame}{Examples II}
\begin{frame}{Example II}
\begin{itemize}
\item 2D Problem etwas irregulär
\item \underline{Example (2D Elliptic Model Problem):} Let $D \subset \RR^{d=2}$.
Search for $u \in V$, such that
\begin{align*}
- \div(\kappa(\omega,x) \nabla u(\omega,x)) = 0, \quad
\nabla u(x) \cdot n = -1, \quad
u(x) = 0
\end{align*}
with $\kappa(\omega, x) = \log(g(\omega, x))$, where $g$ is a Gaussian field
\end{itemize}
\end{frame}
\begin{frame}{Multilevel Monte Carlo Methods III}
\begin{frame}{Multilevel Monte Carlo Methods I}
\begin{itemize}
\item \underline{Main idea:} Draw samples from several approximation levels
and balance cost per level $\cost_l$ with total sample amount per level $M_l$
......@@ -90,7 +100,7 @@
\widehat{\dgoal}^{MC}_{h,M_0} =
\frac{1}{M_0} \sum_{m=1}^{M_0} \goal_0 (\omega_m)
\end{align*}
\item MLMC estimator:
\item \underline{MLMC estimator:}
\begin{align*}
\widehat{\goal}^{MLMC}_{h,\{ M_l \}_{l=0}^L} =
\sum_{l=0}^L \widehat{\dgoal}^{MC}_{h,M_l} =
......@@ -101,24 +111,81 @@
\begin{frame}{Multilevel Monte Carlo Methods II}
\begin{itemize}
\item \underline{RMSE (Root mean square error):}
\begin{equation*}
\item \underline{RMSE (Root Mean Square Error):}
\begin{align*}
e(\widehat{\goal}^{MLMC}_{h,\{ M_l \}_{l=0}^L})^2 =
\underbrace{\sum_{l=0}^L \frac{1}{M_l} \VV[\dgoal_l]}_{\text{estimator error}} +
\underbrace{\left( \EE[\goal_L - \goal] \right)^2}_{\text{FEM error}}.
\underbrace{\left( \EE[\goal_L - \goal] \right)^2}_{\text{FEM error}}
\end{align*}
\item \underline{Total cost:}
\begin{align*}
\cost(\widehat{\goal}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) \lesssim
\sum_{l=0}^L M_l \cost_l, \quad
\cost_{\epsilon}(\widehat{\goal}^{MLMC}_{h,\{M_l \}_{l=0}^L}) \lesssim
\epsilon^{-2-(\gamma - \beta)/\alpha}
\end{align*}
Here, we assumed $\beta < \gamma$.
\item Arguments (...)
\end{itemize}
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Multilevel Monte Carlo method}
\begin{theorem}[Multilevel Monte Carlo method]
\label{MLMC_theorem}
{\footnotesize Suppose that there are positive constants $\alpha, \beta, \gamma, c_1, c_2, c_3 > 0$ such that $\alpha \geq \frac{1}{2} \min(\beta, \gamma)$ and
\begin{enumerate}
{\footnotesize
\item $\left| \E[Q_{h_l} - Q] \right| \leq c_1 h_l^\alpha$
\item $\V[Q_{h_l} - Q_{h_{l-1}}] \leq c_2 h_l^\beta$
\item $\mathcal{C}_l \leq c_3 h_l^{- \gamma}$.}
\end{enumerate}
Then, for any $0 < \epsilon < \frac{1}{e}$, there exists an $L$ and a sequence $\{ M_l \}_{l=0}^L$, such that
\begin{equation*}
e(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L})^2 = \E\left[( \widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L} - \E[Q])^2 \right] < \epsilon^2
\end{equation*}
\item This leads leads to a better computational cost since:
\begin{itemize}
\item Assume $Q_h \rightarrow Q$, then $\VV[\left( Q_{h_l}(\omega_i) - Q_{h_{l-1}}(\omega_i) \right)] \rightarrow 0$.
\item The $Q_{h_0}(\omega_i)$ are not getting more expensive for more accuracy.
\item The optimal choice for the sequence $M_l$ is given by
\begin{equation*}
M_l = \left\lceil 2 \epsilon^{-2} \sqrt{\frac{\VV[Y_l]}{\cost_l}} \left( \sum_{l=0}^L \sqrt{\VV[Y_l] \cost_l} \right) \right\rceil.
\end{equation*}
\end{itemize}
\item This gives an overall cost of (given $\cost_{\epsilon}$ is best case)
and
\begin{equation*}
\cost(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) = \sum_{l=0}^L M_l \cost_l, \quad \cost_{\epsilon}(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) \lesssim \epsilon^{-2}.
\mathcal{C}_\epsilon(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) \lesssim \begin{cases}
\epsilon^{-2}, &\text{if } \beta > \gamma \\
\epsilon^{-2}\log(\epsilon)^2, &\text{if } \beta = \gamma \\
\epsilon^{-2-(\gamma - \beta)/\alpha}, &\text{if } \beta < \gamma
\end{cases},
\end{equation*}
where the hidden constant depends on $c_1, c_2, c_3$.}
\end{theorem}
\end{frame}
\begin{frame}{Multilevel Monte Carlo method - Algorithm}
\begin{itemize}
\item Challenge in using the MLMC method is showing that the assumptions hold
\item The MLMC approach can be parallelized in an easy manner
\end{itemize}
\vspace{-0.4cm}
\begin{algorithm}[H]
\caption{Multilevel Monte Carlo method}
\begin{algorithmic}[1]
\label{MLMC algorithm}
{\footnotesize
\STATE Set $l_0 = 3$, $L_0 = 5$ and the initial number of samples $M_0 = \{ 200, 100, 50 \}$
\STATE Set range of levels $\{l_0, \dots, L_0 \}$ and the number of needed samples $\{ \Delta M_l = M_0 \}_{l = 0}^{L}$
\WHILE {$ \Delta M_l > 0$ on any level}
\FOR {levels with needed samples}
\STATE Retrieve functionals and cost: $Y_l, \, \mathcal{C}_l \leftarrow \texttt{SubroutineEstimator}(\Delta M_l, l)$
\STATE Update statistics: $\mathcal{C}_l$, $|\E[Y_l]|$, $\V[Y_l]$ and set: $M_l = \Delta M_l$, $\Delta M_l = 0$
\ENDFOR
\STATE Estimate exponents $\alpha$, $\beta$, $\gamma$ with the assumptions of the previous Theorem
\STATE Estimate optimal $M_l$, $l = 0, \dots, L$ with $M_l = \left\lceil 2 \epsilon^{-2} \sqrt{\frac{\V[Y_l]}{\mathcal{C}_l}} \left( \sum_{l=0}^L \sqrt{\V[Y_l] \mathcal{C}_l} \right) \right\rceil$
\STATE Test for weak convergence with $|\E[Q_{h_L} - Q_{h_{L-1}}]| < (2^\alpha - 1) \frac{\epsilon}{\sqrt{2}}$
\STATE If not converged, increase range of levels by one level and initialize new $M_L$
\ENDWHILE}
\end{algorithmic}
\end{algorithm}
\end{frame}
\begin{frame}{Multilevel Monte Carlo method}
\begin{itemize}
\item Main idea MLMC: Sample from several approximation $Q_h$ on different fine triangulations.
\item With the linearity of the expectation operator it holds
\begin{equation*}
\E[Q_h] = \E[Q_{h_0}] + \sum_{l=1}^L \E[Q_{h_l} - Q_{h_{l-1}}] = \sum_{l=1}^L \E[Y_l].
\end{equation*}
\item Now estimate each $Y_l$ with the classical MC method, thus
\begin{equation*}
\widehat{Y}^{MC}_{h,M_l} = \frac{1}{M_l} \sum_{i=1}^{M_l} \left( Q_{h_l}(\omega_i) - Q_{h_{l-1}}(\omega_i) \right), \quad
\widehat{Y}^{MC}_{h,M_0} = \frac{1}{M_0} \sum_{i=1}^{M_0} Q_{h_0}(\omega_i)
\end{equation*}
\item This gives the MLMC estimator
\begin{equation*}
\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L} = \sum_{l=0}^L \widehat{Y}^{MC}_{h,M_l} = \sum_{l=0}^L \frac{1}{M_l} \sum_{i=1}^{M_l} Y_l(\omega_{i}).
\end{equation*}
\end{itemize}
\begin{itemize}
\item Main idea MLMC: Sample from several approximation $Q_h$ on different fine triangulations.
\item With the linearity of the expectation operator it holds
\begin{equation*}
\E[Q_h] = \E[Q_{h_0}] + \sum_{l=1}^L \E[Q_{h_l} - Q_{h_{l-1}}] = \sum_{l=1}^L \E[Y_l].
\end{equation*}
\item Now estimate each $Y_l$ with the classical MC method, thus
\begin{equation*}
\widehat{Y}^{MC}_{h,M_l} = \frac{1}{M_l} \sum_{i=1}^{M_l} \left( Q_{h_l}(\omega_i) - Q_{h_{l-1}}(\omega_i) \right), \quad
\widehat{Y}^{MC}_{h,M_0} = \frac{1}{M_0} \sum_{i=1}^{M_0} Q_{h_0}(\omega_i)
\end{equation*}
\item This gives the MLMC estimator
\begin{equation*}
\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L} = \sum_{l=0}^L \widehat{Y}^{MC}_{h,M_l} = \sum_{l=0}^L \frac{1}{M_l} \sum_{i=1}^{M_l} Y_l(\omega_{i}).
\end{equation*}
\end{itemize}
\end{frame}
\begin{frame}{Multilevel Monte Carlo method}
\begin{itemize}
\item The RMSE is then given by
\begin{equation*}
e(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L})^2 = \E\left[( \widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L} - \E[Q])^2 \right] = \underbrace{\sum_{l=0}^L \frac{1}{M_l} \V[Y_l]}_{\text{estimator error}} + \underbrace{\left( \E[Q_h - Q] \right)^2}_{\text{FEM error}}.
\end{equation*}
\item This leads leads to a better computational cost since:
\begin{itemize}
\item Assume $Q_h \rightarrow Q$, then $\V[\left( Q_{h_l}(\omega_i) - Q_{h_{l-1}}(\omega_i) \right)] \rightarrow 0$.
\item Even with increasing required accuracy samples of $Q_{h_0}$ are not getting more expensive.
\item The optimal choice for the sequence $M_l$ is given by
\begin{equation*}
M_l = \left\lceil 2 \epsilon^{-2} \sqrt{\frac{\V[Y_l]}{\mathcal{C}_l}} \left( \sum_{l=0}^L \sqrt{\V[Y_l] \mathcal{C}_l} \right) \right\rceil.
\end{equation*}
\end{itemize}
\item This gives an overall cost of
\begin{equation*}
\mathcal{C}(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) = \sum_{l=0}^L M_l \mathcal{C}_l, \quad \mathcal{C}(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) \lesssim \sum_{l=0}^L \sqrt{\V[Y_l] \mathcal{C}_l}.
\end{equation*}
\begin{itemize}
\item The RMSE is then given by
\begin{equation*}
e(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L})^2 = \E\left[( \widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L} - \E[Q])^2 \right] = \underbrace{\sum_{l=0}^L \frac{1}{M_l} \V[Y_l]}_{\text{estimator error}} + \underbrace{\left( \E[Q_h - Q] \right)^2}_{\text{FEM error}}.
\end{equation*}
\item This leads leads to a better computational cost since:
\begin{itemize}
\item Assume $Q_h \rightarrow Q$, then $\V[\left( Q_{h_l}(\omega_i) - Q_{h_{l-1}}(\omega_i) \right)] \rightarrow 0$.
\item Even with increasing required accuracy samples of $Q_{h_0}$ are not getting more expensive.
\item The optimal choice for the sequence $M_l$ is given by
\begin{equation*}
M_l = \left\lceil 2 \epsilon^{-2} \sqrt{\frac{\V[Y_l]}{\mathcal{C}_l}} \left( \sum_{l=0}^L \sqrt{\V[Y_l] \mathcal{C}_l} \right) \right\rceil.
\end{equation*}
\end{itemize}
\item This gives an overall cost of
\begin{equation*}
\mathcal{C}(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) = \sum_{l=0}^L M_l \mathcal{C}_l, \quad \mathcal{C}(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) \lesssim \sum_{l=0}^L \sqrt{\V[Y_l] \mathcal{C}_l}.
\end{equation*}
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}{Multilevel Monte Carlo method}
\begin{theorem}[Multilevel Monte Carlo method] \label{MLMC_theorem}
{\footnotesize Suppose that there are positive constants $\alpha, \beta, \gamma, c_1, c_2, c_3 > 0$ such that $\alpha \geq \frac{1}{2} \min(\beta, \gamma)$ and
\begin{enumerate}
{\footnotesize
\item $\left| \E[Q_{h_l} - Q] \right| \leq c_1 h_l^\alpha$
\item $\V[Q_{h_l} - Q_{h_{l-1}}] \leq c_2 h_l^\beta$
\item $\mathcal{C}_l \leq c_3 h_l^{- \gamma}$.}
\end{enumerate}
Then, for any $0 < \epsilon < \frac{1}{e}$, there exists an $L$ and a sequence $\{ M_l \}_{l=0}^L$, such that
\begin{equation*}
e(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L})^2 = \E\left[( \widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L} - \E[Q])^2 \right] < \epsilon^2
\end{equation*}
and
\begin{equation*}
\mathcal{C}_\epsilon(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) \lesssim \begin{cases} \epsilon^{-2}, &\text{if } \beta > \gamma \\
\epsilon^{-2}\log(\epsilon)^2, &\text{if } \beta = \gamma \\
\epsilon^{-2-(\gamma - \beta)/\alpha}, &\text{if } \beta < \gamma \end{cases},
\end{equation*}
where the hidden constant depends on $c_1, c_2, c_3$.}
\end{theorem}
\begin{theorem}[Multilevel Monte Carlo method]
\label{MLMC_theorem}
{\footnotesize Suppose that there are positive constants $\alpha, \beta, \gamma, c_1, c_2, c_3 > 0$ such that $\alpha \geq \frac{1}{2} \min(\beta, \gamma)$ and
\begin{enumerate}
{\footnotesize
\item $\left| \E[Q_{h_l} - Q] \right| \leq c_1 h_l^\alpha$
\item $\V[Q_{h_l} - Q_{h_{l-1}}] \leq c_2 h_l^\beta$
\item $\mathcal{C}_l \leq c_3 h_l^{- \gamma}$.}
\end{enumerate}
Then, for any $0 < \epsilon < \frac{1}{e}$, there exists an $L$ and a sequence $\{ M_l \}_{l=0}^L$, such that
\begin{equation*}
e(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L})^2 = \E\left[( \widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L} - \E[Q])^2 \right] < \epsilon^2
\end{equation*}
and
\begin{equation*}
\mathcal{C}_\epsilon(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) \lesssim \begin{cases}
\epsilon^{-2}, &\text{if } \beta > \gamma \\
\epsilon^{-2}\log(\epsilon)^2, &\text{if } \beta = \gamma \\
\epsilon^{-2-(\gamma - \beta)/\alpha}, &\text{if } \beta < \gamma
\end{cases},
\end{equation*}
where the hidden constant depends on $c_1, c_2, c_3$.}
\end{theorem}
\end{frame}
\begin{frame}{Multilevel Monte Carlo method - Algorithm}
\begin{itemize}
\item Challenge in using the MLMC method is showing that the assumptions hold
\item The MLMC approach can be parallelized in an easy manner
\end{itemize}
\begin{itemize}
\item Challenge in using the MLMC method is showing that the assumptions hold
\item The MLMC approach can be parallelized in an easy manner
\end{itemize}
\vspace{-0.4cm}
\vspace{-0.4cm}
\begin{algorithm}[H]
\caption{Multilevel Monte Carlo method}
\begin{algorithmic}[1] \label{MLMC algorithm}
{\footnotesize
\STATE Set $l_0 = 3$, $L_0 = 5$ and the initial number of samples $M_0 = \{ 200, 100, 50 \}$
\STATE Set range of levels $\{l_0, \dots, L_0 \}$ and the number of needed samples $\{ \Delta M_l = M_0 \}_{l = 0}^{L}$
\WHILE {$ \Delta M_l > 0$ on any level}
\FOR {levels with needed samples}
\STATE Retrieve functionals and cost: $Y_l, \, \mathcal{C}_l \leftarrow \texttt{SubroutineEstimator}(\Delta M_l, l)$
\STATE Update statistics: $\mathcal{C}_l$, $|\E[Y_l]|$, $\V[Y_l]$ and set: $M_l = \Delta M_l$, $\Delta M_l = 0$
\ENDFOR
\STATE Estimate exponents $\alpha$, $\beta$, $\gamma$ with the assumptions of the previous Theorem
\STATE Estimate optimal $M_l$, $l = 0, \dots, L$ with $M_l = \left\lceil 2 \epsilon^{-2} \sqrt{\frac{\V[Y_l]}{\mathcal{C}_l}} \left( \sum_{l=0}^L \sqrt{\V[Y_l] \mathcal{C}_l} \right) \right\rceil$
\STATE Test for weak convergence with $|\E[Q_{h_L} - Q_{h_{L-1}}]| < (2^\alpha - 1) \frac{\epsilon}{\sqrt{2}}$
\STATE If not converged, increase range of levels by one level and initialize new $M_L$
\ENDWHILE}
\end{algorithmic}
\end{algorithm}
\begin{algorithm}[H]
\caption{Multilevel Monte Carlo method}
\begin{algorithmic}[1]
\label{MLMC algorithm}
{\footnotesize
\STATE Set $l_0 = 3$, $L_0 = 5$ and the initial number of samples $M_0 = \{ 200, 100, 50 \}$
\STATE Set range of levels $\{l_0, \dots, L_0 \}$ and the number of needed samples $\{ \Delta M_l = M_0 \}_{l = 0}^{L}$
\WHILE {$ \Delta M_l > 0$ on any level}
\FOR {levels with needed samples}
\STATE Retrieve functionals and cost: $Y_l, \, \mathcal{C}_l \leftarrow \texttt{SubroutineEstimator}(\Delta M_l, l)$
\STATE Update statistics: $\mathcal{C}_l$, $|\E[Y_l]|$, $\V[Y_l]$ and set: $M_l = \Delta M_l$, $\Delta M_l = 0$
\ENDFOR
\STATE Estimate exponents $\alpha$, $\beta$, $\gamma$ with the assumptions of the previous Theorem
\STATE Estimate optimal $M_l$, $l = 0, \dots, L$ with $M_l = \left\lceil 2 \epsilon^{-2} \sqrt{\frac{\V[Y_l]}{\mathcal{C}_l}} \left( \sum_{l=0}^L \sqrt{\V[Y_l] \mathcal{C}_l} \right) \right\rceil$
\STATE Test for weak convergence with $|\E[Q_{h_L} - Q_{h_{L-1}}]| < (2^\alpha - 1) \frac{\epsilon}{\sqrt{2}}$
\STATE If not converged, increase range of levels by one level and initialize new $M_L$
\ENDWHILE}
\end{algorithmic}
\end{algorithm}
\end{frame}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment