Commit b588c292 authored by niklas.baumgarten's avatar niklas.baumgarten
Browse files

worked on slides

parent e73467ab
......@@ -40,9 +40,13 @@
% Statistical Operators
\newcommand{\EE}{\mathbb{E}}
\newcommand{\VV}{\mathbb{V}}
\DeclareMathOperator{\Cov}{Cov}
% Algebraic Operators
\renewcommand{\dim}{\operatorname{dim}}
\DeclareMathOperator{\diag}{diag}
\DeclareMathOperator{\spann}{span}
% Integration
\renewcommand{\d}{\, \mathrm{d}}
......@@ -62,10 +66,26 @@
% Other
\newcommand{\abs}[1]{{\left|#1\right|}}
\newcommand{\norm}[1]{\lVert#1\rVert}
\newcommand{\mat}[1]{\begin{pmatrix}#1\end{pmatrix}}
\newcommand{\mat}[1]{\begin{pmatrix}
#1
\end{pmatrix}}
\newcommand{\set}[1]{\{#1\}}
\newcommand{\sprod}[1]{{\langle#1\rangle}}
\newcommand{\defeq}{\mathrel{\vcenter{\baselineskip0.5ex \lineskiplimit0pt
\hbox{\scriptsize.}\hbox{\scriptsize.}}}=}
\newcommand{\eqdef}{=\mathrel{\vcenter{\baselineskip0.5ex \lineskiplimit0pt
\hbox{\scriptsize.}\hbox{\scriptsize.}}}}
\ No newline at end of file
\hbox{\scriptsize.}\hbox{\scriptsize.}}}}
\DeclareMathOperator{\Toe}{Toe}
\DeclareMathOperator{\BToe}{BToe}
\DeclareMathOperator{\ToeSym}{ToeSym}
\DeclareMathOperator{\BToeSym}{BToeSym}
\DeclareMathOperator{\Circ}{Circ}
\DeclareMathOperator{\BCirc}{BCirc}
\DeclareMathOperator{\DFT}{DFT}
\DeclareMathOperator{\IDFT}{IDFT}
\DeclareMathOperator{\FFT}{FFT}
No preview for this file type
......@@ -13,38 +13,43 @@
\titlepage
\end{frame}
\input{src/abstract}
% \input{src/abstract}
\begin{frame}{Outline}
\tableofcontents
\end{frame}
\section*{Monte Carlo methods}
\input{src/mlmc}
% \input{src/experimental_setup}
\section{Introduction and Example I}
\input{src/introduction.tex}
\section*{Stochastic Linear Transport problem}
\section{Assumptions}
\input{src/assumptions.tex}
\section*{Stochastic Convection-Diffusion-Reaction problem}
\section{Monte Carlo Estimator and Example II}
\input{src/mc-estimator.tex}
\section*{Acoustic wave propagation}
% \input{src/alternative_acoustic}
\section{Multilevel Monte Carlo Estimator and Example III}
\input{src/mlmc-estimator.tex}
\section*{Outlook and Conclusion}
% \input{src/outlook}
\section{Example IV - WIP}
\input{src/example-iv.tex}
\begin{frame}{References}
\section{Outlook and Conclusion}
\input{src/outlook.tex}
\section{References}
\input{src/references.tex}
% \begin{frame}{References}
% \bibliographystyle{acm}
% \tiny{\bibliography{lit}}
\end{frame}
% \end{frame}
\section*{Backup}
% \input{src/the_random_field_model}
% \input{src/circulant_embedding}
% \input{src/weak_formulation}
% \input{src/existence}
% \input{src/regularity}
% \input{src/finite_element_error}
% \input{src/mlmc_algorithm}
\input{src/mlmc-algorithm.tex}
\input{src/random-field-creation.tex}
\input{src/transport-est-solution.tex}
% \input{src/old/regularity}
% \input{src/old/finite_element_error}
\end{document}
\ No newline at end of file
\documentclass[18pt]{beamer}
%----------------------------------------------------------------------------------------
%% SLIDE FORMAT
%----------------------------------------------------------------------------------------
\usepackage{templates/beamerthemekit} % 4:3
% \usepackage{templates/beamerthemekitwide} % (16:9)
%----------------------------------------------------------------------------------------
% PACKAGES
%----------------------------------------------------------------------------------------
\usepackage{amsmath,amssymb,amsthm}
\usepackage{stmaryrd}
\usepackage[utf8]{inputenc}
\usepackage{subfigure}
\usepackage[english]{babel}
\usepackage{latexsym}
\usepackage{mathtools}
\usepackage{grffile}
\usepackage{tabto}
\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{pythonhighlight}
\usepackage{color}
\usepackage{url}
%----------------------------------------------------------------------------------------
% USER DEFINED SYMBOLES, ENVIROMENTS AND NUMBERING
%----------------------------------------------------------------------------------------
\setbeamertemplate{itemize/enumerate body begin}{\small}
\setbeamertemplate{itemize/enumerate subbody begin}{\footnotesize}
\newcommand{\C}{\mathbb{C}}
\newcommand{\K}{\mathbb{K}}
\newcommand{\R}{\mathbb{R}}
\newcommand{\Q}{\mathbb{Q}}
\newcommand{\Z}{\mathbb{Z}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\E}{\mathbb{E}}
\newcommand{\PP}{\mathbb{P}}
\newcommand{\V}{\mathbb{V}}
\newcommand{\amin}{a_{\text{min}}}
\newcommand{\amax}{a_{\text{max}}}
\DeclareMathOperator{\essinf}{essinf}
\DeclareMathOperator{\esssup}{esssup}
\DeclareMathOperator{\divergence}{div}
\DeclareMathOperator{\Cov}{Cov}
\DeclareMathOperator{\diag}{diag}
\DeclareMathOperator{\conv}{conv}
\DeclareMathOperator{\spann}{span}
\DeclareMathOperator{\diam}{diam}
\DeclareMathOperator{\Toe}{Toe}
\DeclareMathOperator{\BToe}{BToe}
\DeclareMathOperator{\ToeSym}{ToeSym}
\DeclareMathOperator{\BToeSym}{BToeSym}
\DeclareMathOperator{\Circ}{Circ}
\DeclareMathOperator{\BCirc}{BCirc}
\DeclareMathOperator{\MCE}{MCE}
\DeclareMathOperator{\MOCE}{MOCE}
\DeclareMathOperator{\DFT}{DFT}
\DeclareMathOperator{\IDFT}{IDFT}
\DeclareMathOperator{\FFT}{FFT}
%----------------------------------------------------------------------------------------
% START PRESENTATION
%----------------------------------------------------------------------------------------
\title{Multilevel Monte Carlo Applications}
\subtitle{Review and motivation}
\author{Niklas Baumgarten, Christian Wieners}
\institute{Institute for Applied and Numerical Mathematics}
\begin{document}
%title page
\begin{frame}
\titlepage
\end{frame}
%table of contents
\begin{frame}{Outline}
\tableofcontents
\end{frame}
%----------------------------------------%
\section{Elliptic model problem}
\input{model_problem}
\input{example_fields2}
%----------------------------------------%
%----------------------------------------%
\section{Monte Carlo methods}
\input{monte_carlo_methods}
\input{experimental_setup}
\input{numerical_results}
%----------------------------------------%
%----------------------------------------%
\section{Acoustic wave propagation}
\input{alternative_acoustic}
\section{Outlook and Conclusion}
\input{outlook}
\begin{frame}{References}
\bibliographystyle{acm}
\tiny{\bibliography{lit}}
\end{frame}
\section*{Backup}
\input{the_random_field_model}
\input{circulant_embedding}
\input{weak_formulation}
\input{existence}
\input{regularity}
\input{finite_element_error}
\input{mlmc_algorithm}
\end{document}
\begin{frame}{Circulant embedding}
\begin{itemize}
\item The circulant embedding approach is a tool to create efficiently and independently identical distributed samples.
\item The covariance matrix corresponding to (\ref{covariance_function}) has block symmetric Toeplitz structure, possibly with symmetric blocks (BST(S)TB).
\begin{figure}
\centering
\subfigure{\includegraphics[width=0.35\linewidth]{./pictures/Matrices/Covariance matrix.png}}
\hspace{1cm}
\subfigure{\includegraphics[width=0.35\linewidth]{./pictures/Matrices/Embedded matrix.png}}
\end{figure}
\item This BST(S)TB matrix can be described with its first block rows and its first block columns.
\end{itemize}
\end{frame}
\begin{frame}{Circulant embedding}
\begin{itemize}
\item Goal: Try to decompose the covariance matrix in an efficient way for
\begin{equation*}
X = RZ, \quad \text{where } A = RR^T.
\end{equation*}
\item This is done by embedding the BST(S)TB matrix in an block circulant matrix with circulant blocks.%
\item For circulant matrices it holds
\begin{equation*}
B = W_{N,N} \Lambda W^*_{N,N},
\end{equation*}
where $W_{N,N}$ is the Fourier matrix for 2 dimensions and $\Lambda = \diag(\lambda)$ with $\lambda$ being the vector of eigenvalues. The eigenvalues can be computed with $\lambda = N W_{N,N} b^r$, where here $b^r$ is a $N \times N$ matrix containing the first block rows.
\end{itemize}
\end{frame}
\begin{frame}{Circulant embedding - Algorithm}
\begin{algorithm}[H]
\caption{Circulant embedding.}
\label{Circulant embedding algorithm}
\begin{algorithmic}[1]
{\footnotesize
\STATE Compute first rows and first columns of covariance matrix: $a^r$ and $a^c$ \hfill $\mathcal{O}(N^2)$
\STATE Embed $a^r$ and $a^c$ to create a circulant matrix: $b^r \leftarrow \texttt{MOCE}(a^r, a^c)$
\STATE Compute eigenvalues: $\lambda \leftarrow \texttt{FFT}(b^r)$ \hfill $\mathcal{O}(\tilde{N}^2 \log \tilde{N})$
\IF{all $\lambda \geq 0$}
\STATE Compute and save square root: $\sqrt{\Lambda}$
\WHILE{sample needed}
\STATE Generate complex random matrix: $\underline{Z} \leftarrow \texttt{Rnd}(\text{seed})$
\STATE Compute complex field: $\underline{X} \leftarrow \texttt{FFT}(\sqrt{\Lambda} \odot \underline{Z})$ \hfill $\mathcal{O}(\tilde{N}^2 \log \tilde{N})$
\STATE Yield the two independent samples: $X_1 = \text{Re}\{\underline{X}[0:N, 0:N]\}$ and \\ \hspace{1cm} $X_2 = \text{Im}\{\underline{X}[0:N, 0:N]\}$
\STATE Add $c(x)$ and compute log-normal field if desired
\ENDWHILE
\ELSE
\STATE Compute padding: $\overline{a}^r, \overline{a}^c \leftarrow \texttt{Pad}(a^r, a^c)$
\STATE Set $a^r, a^c = \overline{a}^r, \overline{a}^c$ and go back to line 2.
\ENDIF
}
\end{algorithmic}
\end{algorithm}
\end{frame}
\ No newline at end of file
\begin{frame}{Existence}
\begin{lemma}[Ellipticity]
{\footnotesize For almost all $\omega \in \Omega$, the bilinear form $b_\omega(u,v)$ is bounded and coercive in $H_0^1(D)$ with respect to the norm $\vert \cdot \vert_{H^1(D)}$ with the constants $\amax(\omega)$ and $\amin(\omega)$, respectively. Moreover, there exists a unique solution $u(\omega, \cdot) \in H_0^1(D)$ to the variational problem (\ref{weak_formulation}) and
\begin{equation*}
\vert u(\omega, \cdot) \vert_{H^1(D)} \lesssim \frac{\Vert f \Vert_{H^{t-1}(D)}}{\amin(\omega)}.
\end{equation*}}
\end{lemma}
\begin{itemize}
\item Proof idea: Straight forward calculations with assumptions and Lax-Milgram.
\end{itemize}
\begin{theorem}[Existence]
{\footnotesize The weak solution $u$ of (\ref{eq:model_problem}-\ref{boundary_conditions2}) is unique and belongs to $L^p(\Omega, H_0^1(D))$ for all $p < p_*$.}
\end{theorem}
\begin{itemize}
\item Proof idea: Extend the Lemma above on Bochner spaces with Hölder's inequality and the assumptions.
\end{itemize}
\end{frame}
\ No newline at end of file
\begin{frame}{Introduction and Example I}
\begin{itemize}
\item \underline{Problem:} Let $u(\omega, x) \in V$ be a random
PDE solution on $\Omega$ and $D$.
Let $\goal(\omega)$ be some functional of $u(\omega, x)$.
Estimate $\EE[\goal(\omega)]$
\item \underline{Example (1D Elliptic Model Problem):} Let $D \subset \RR^{d=1}$.
Search for $u \in V$, such that
\begin{align*}
- (\kappa(\omega,x) u'(\omega,x))' = 0, \quad
u'(0) = 1, \quad
u(1) = 0
\end{align*}
with $\kappa(\omega, x) = \log(g(\omega, x))$, where $g$ is a Gaussian field
\end{itemize}
\end{frame}
\begin{frame}{Assumptions}
\begin{itemize}
\item \underline{FEM (Finite Element Method:)} Let $u_h(\omega, x) \in V_h$ be the
corresponding FEM solution to $u(\omega, x)$ and $\goal_h(\omega)$ be the
functional
\item \underline{Assumptions:} The FEM method is convergent with convergence
rate $\alpha > 0$, i.e.
\begin{equation}
\label{eq:alpha-assumption}
\abs{\EE[\goal_h - \goal]} \lesssim h^\alpha, \quad
\abs{\EE[\goal_h - \goal]} \lesssim N^{-\alpha / d}, \quad
N = \dim(V_h)
\end{equation}
The cost for one sample can be bounded with $\gamma > 0$ by
\begin{equation}
\label{eq:gamma-assumption}
\cost(\goal_h(\omega_m)) \lesssim h^{-\gamma}, \quad
\cost(\goal_h(\omega_m)) \lesssim N^{\gamma / d}, \quad
\omega_m \in \Omega
\end{equation}
The variance of $\goal_l - \goal_{l-1}$ decays with $\beta > 0$
\begin{equation}
\label{eq:beta-assumption}
\abs{\VV[\goal_l - \goal_{l-1}]} \lesssim h^\beta, \quad
\abs{\VV[\goal_l - \goal_{l-1}]} \lesssim N^{-\beta / d}
\end{equation}
\end{itemize}
\end{frame}
\begin{frame}{Monte Carlo Estimator}
\begin{itemize}
\item \underline{MC Estimator:} Draw $\omega_m \in\Omega$ and compute
\begin{align*}
\widehat{\goal}_{h,M}^{MC} = \frac{1}{M} \sum_{m=1}^M \goal_h(\omega_m)
\end{align*}
\item \underline{RMSE (Root Mean Square Error):}
\begin{align*}
e(\widehat{\goal}^{MC}_{h,M})^2 =
\EE \left[ (\widehat{\goal}^{MC}_{h,M} - \EE[\goal])^2 \right] =
\underbrace{M^{-1} \VV[\goal_h]}_{\text{Estimator error}} +
\underbrace{\left( \EE[\goal_h - \goal] \right)^2}_{\text{FEM error}}
\end{align*}
\item \underline{Total Cost:}
\begin{align*}
\cost(\widehat{\goal}^{MC}_{h,M}) \lesssim M \cdot N^\gamma, \quad
\cost_{\epsilon}(\widehat{\goal}^{MC}_{h,M}) \lesssim
\epsilon^{-2-\frac{\gamma}{\alpha}}
\end{align*}
Here, $\cost_\epsilon(\widehat{\goal}_h)$ is the cost to achieve
$e(\widehat{\goal}_h) < \epsilon$
\end{itemize}
\end{frame}
\begin{frame}{Example II}
\begin{itemize}
\item \underline{Example (2D Elliptic Model Problem):} Let $D \subset \RR^{d=2}$.
Search for $u \in V$, such that
\begin{align*}
- \div(\kappa(\omega,x) \nabla u(\omega,x)) \overset{\text{on }D}{=} 0, \quad
\nabla u(x) \cdot n \overset{\text{on }\Gamma_N}{=} -1, \quad
u(x) \overset{\text{on }\Gamma_D}{=} 0
\end{align*}
with $\kappa(\omega, x) = \log(g(\omega, x))$, where $g$ is a Gaussian field
with the covariance function
$C(x, y) = \sigma^2 \exp(- \norm{x- y}_2^s / \lambda^s)$
\end{itemize}
\begin{figure}
\label{fig:2d-elliptic-model-problem}
\centering
\subfigure{\includegraphics[width=0.99\linewidth]{img/perm_potential.png}}
\end{figure}
\end{frame}
\begin{frame}{Multilevel Monte Carlo Estimator I}
\begin{itemize}
\item \underline{Main Idea:} Draw samples from several levels $l \in \{ 0,
\dots L \} $
and balance $\cost_l$ with $M_l$
\item Set $\goal_l - \goal_{l-1} \defeq \dgoal_l$ and $\goal_0 \defeq \dgoal_0$:
\begin{align*}
\EE[\goal_L] = \EE[\goal_0] + \sum_{l=1}^L \EE[\goal_l - \goal_{l-1}] =
\sum_{l=0}^L \EE[\dgoal_l]
\end{align*}
\item Estimate each $\dgoal_l$ with the MC method:
\begin{align*}
\widehat{\dgoal}^{MC}_{h,M_l} =
\frac{1}{M_l} \sum_{m=1}^{M_l} \left( \goal_l(\omega_m) -
\goal_{l-1} (\omega_m) \right), \quad
\widehat{\dgoal}^{MC}_{h,M_0} =
\frac{1}{M_0} \sum_{m=1}^{M_0} \goal_0 (\omega_m)
\end{align*}
\item \underline{MLMC Estimator:}
\begin{align*}
\widehat{\goal}^{MLMC}_{h,\{ M_l \}_{l=0}^L} =
\sum_{l=0}^L \widehat{\dgoal}^{MC}_{h,M_l} =
\sum_{l=0}^L \frac{1}{M_l} \sum_{m=1}^{M_l} \dgoal_l(\omega_{m})
\end{align*}
\end{itemize}
\end{frame}
\begin{frame}{Multilevel Monte Carlo Estimator II}
\begin{itemize}
\item \underline{RMSE (Root Mean Square Error):}
\begin{align*}
e(\widehat{\goal}^{MLMC}_{h,\{ M_l \}_{l=0}^L})^2 =
\underbrace{\sum_{l=0}^L
\frac{1}{M_l} \VV[\dgoal_l]}_{\text{Estimator error}} +
\underbrace{\left( \EE[\goal_L - \goal] \right)^2}_{\text{FEM error}}
\end{align*}
\item \underline{Total Cost:}
\begin{align*}
\cost(\widehat{\goal}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) \lesssim
\sum_{l=0}^L M_l \cost_l, \quad
\cost_{\epsilon}(\widehat{\goal}^{MLMC}_{h,\{M_l \}_{l=0}^L}) \lesssim
\epsilon^{-2-(\gamma - \beta)/\alpha}
\end{align*}
Here, we assumed $\beta < \gamma$
\end{itemize}
\end{frame}
\begin{frame}{Multilevel Monte Carlo Estimator III}
\begin{algorithm}[H]
\caption{Multilevel Monte Carlo Estimator}
\begin{algorithmic}[1]
\label{alg:mlmc}
{\footnotesize
\STATE Choose $h_{l=0}^\text{init}, h_{l=1}^\text{init}, \dots,
h_{l=L}^\text{init}$ and
$M_{l=0}^\text{init}, M_{l=1}^\text{init}, \dots, M_{l=L}^\text{init}$
\STATE Set $\{{\vartriangle} M_l = M_l^\text{init}\}_{l = 0}^{L}$
and $\{M_l = 0\}_{l = 0}^{L}$
\WHILE {${\vartriangle} M_l > 0$ on any level}
\FOR {levels with ${\vartriangle} M_l > 0$}
\STATE $\dgoal_l, \, \mathcal{C}_l \leftarrow \texttt{MonteCarlo}
({\vartriangle} M_l, l)$
\STATE Update $\mathcal{C}_l$, $|\mathbb{E}[\dgoal_l]|$
and $\mathbb{V}[\dgoal_l]$
\STATE Set $M_l \leftarrow M_l + {\vartriangle} M_l$,
${\vartriangle} M_l = 0$
\ENDFOR
\STATE Estimate $\alpha$, $\beta$, $\gamma$
with~\eqref{eq:alpha-assumption}, ~\eqref{eq:gamma-assumption}
and~\eqref{eq:beta-assumption}
\STATE Estimate $\{M_l^\text{opt}\}_{l = 0}^{L} with$ %~\eqref{eq:optimal-Ml}
%\label{equation}
$M_l \approx 2 \varepsilon^{-2}
\sqrt{\frac{\VV[\dgoal_l]}{\mathcal{C}_l}} \left( \sum_{l=0}^L
\sqrt{\mathbb{V}[\dgoal_l] \cost_l} \right)$
% \end{equation}
\STATE Update $\{{\vartriangle} M_l\}_{l = 0}^{L}
= \{ M_l^\text{opt} - M_l\}_{l = 0}^{L}$
\STATE Test for weak convergence %~\eqref{eq:convergence-test}
%\begin{equation} \label{eq:convergence-test}
$|\EE[{\goal}_L - {\goal}_{L-1}]| \lesssim
(2^\alpha - 1) \frac{\varepsilon}{\sqrt{2}}$
%\end{equation}
\IF {not converged}
\STATE Set $L \leftarrow L + 1$ and update $\{{\vartriangle} M_l\}_{l = 0}^{L}$
\ENDIF
\ENDWHILE}
\end{algorithmic}
\end{algorithm}
\end{frame}
\begin{frame}{Example III}
\begin{itemize}
\item \underline{Example (2D Darcy Transport):} (...)
\end{itemize}
\end{frame}
\begin{frame}{Multilevel Monte Carlo method}
\begin{itemize}
\item Main idea MLMC: Sample from several approximation $Q_h$ on different fine triangulations.
\item With the linearity of the expectation operator it holds
\begin{equation*}
\E[Q_h] = \E[Q_{h_0}] + \sum_{l=1}^L \E[Q_{h_l} - Q_{h_{l-1}}] = \sum_{l=1}^L \E[Y_l].
\end{equation*}
\item Now estimate each $Y_l$ with the classical MC method, thus
\begin{equation*}
\widehat{Y}^{MC}_{h,M_l} = \frac{1}{M_l} \sum_{i=1}^{M_l} \left( Q_{h_l}(\omega_i) - Q_{h_{l-1}}(\omega_i) \right), \quad
\widehat{Y}^{MC}_{h,M_0} = \frac{1}{M_0} \sum_{i=1}^{M_0} Q_{h_0}(\omega_i)
\end{equation*}
\item This gives the MLMC estimator
\begin{equation*}
\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L} = \sum_{l=0}^L \widehat{Y}^{MC}_{h,M_l} = \sum_{l=0}^L \frac{1}{M_l} \sum_{i=1}^{M_l} Y_l(\omega_{i}).
\end{equation*}
\end{itemize}
\end{frame}
\begin{frame}{Multilevel Monte Carlo method}
\begin{itemize}
\item The RMSE is then given by
\begin{equation*}
e(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L})^2 = \E\left[( \widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L} - \E[Q])^2 \right] = \underbrace{\sum_{l=0}^L \frac{1}{M_l} \V[Y_l]}_{\text{estimator error}} + \underbrace{\left( \E[Q_h - Q] \right)^2}_{\text{FEM error}}.
\end{equation*}
\item This leads leads to a better computational cost since:
\begin{itemize}
\item Assume $Q_h \rightarrow Q$, then $\V[\left( Q_{h_l}(\omega_i) - Q_{h_{l-1}}(\omega_i) \right)] \rightarrow 0$.
\item Even with increasing required accuracy samples of $Q_{h_0}$ are not getting more expensive.
\item The optimal choice for the sequence $M_l$ is given by
\begin{equation*}
M_l = \left\lceil 2 \epsilon^{-2} \sqrt{\frac{\V[Y_l]}{\mathcal{C}_l}} \left( \sum_{l=0}^L \sqrt{\V[Y_l] \mathcal{C}_l} \right) \right\rceil.
\end{equation*}
\end{itemize}
\item This gives an overall cost of
\begin{equation*}
\mathcal{C}(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) = \sum_{l=0}^L M_l \mathcal{C}_l, \quad \mathcal{C}(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) \lesssim \sum_{l=0}^L \sqrt{\V[Y_l] \mathcal{C}_l}.
\end{equation*}
\end{itemize}
\end{frame}
\begin{frame}{Multilevel Monte Carlo method}
\begin{theorem}[Multilevel Monte Carlo method]
\label{MLMC_theorem}
{\footnotesize Suppose that there are positive constants $\alpha, \beta, \gamma, c_1, c_2, c_3 > 0$ such that $\alpha \geq \frac{1}{2} \min(\beta, \gamma)$ and
\begin{enumerate}
{\footnotesize
\item $\left| \E[Q_{h_l} - Q] \right| \leq c_1 h_l^\alpha$
\item $\V[Q_{h_l} - Q_{h_{l-1}}] \leq c_2 h_l^\beta$
\item $\mathcal{C}_l \leq c_3 h_l^{- \gamma}$.}
\end{enumerate}
Then, for any $0 < \epsilon < \frac{1}{e}$, there exists an $L$ and a sequence $\{ M_l \}_{l=0}^L$, such that
\begin{equation*}
e(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L})^2 = \E\left[( \widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L} - \E[Q])^2 \right] < \epsilon^2
\end{equation*}
and
\begin{equation*}
\mathcal{C}_\epsilon(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) \lesssim \begin{cases}
\epsilon^{-2}, &\text{if } \beta > \gamma \\
\epsilon^{-2}\log(\epsilon)^2, &\text{if } \beta = \gamma \\
\epsilon^{-2-(\gamma - \beta)/\alpha}, &\text{if } \beta < \gamma
\end{cases},
\end{equation*}
where the hidden constant depends on $c_1, c_2, c_3$.}
\end{theorem}
\end{frame}
\begin{frame}{Multilevel Monte Carlo method - Algorithm}
\begin{itemize}
\item Challenge in using the MLMC method is showing that the assumptions hold
\item The MLMC approach can be parallelized in an easy manner
\end{itemize}
\vspace{-0.4cm}
\begin{algorithm}[H]
\caption{Multilevel Monte Carlo method}
\begin{algorithmic}[1]
\label{MLMC algorithm}
{\footnotesize