Commit 4a071a53 by niklas.baumgarten

### worked on slides

parent 9382fe7f
 ... ... @@ -54,12 +54,17 @@ \newcommand{\da}{\, \mathrm{d} a} % Naming convcention \newcommand{\goal}{\, \mathcal{Q}} \newcommand{\cost}{\, \mathcal{C}} \newcommand{\goal}{\mathcal{Q}} \newcommand{\dgoal}{\mathcal{Y}} \newcommand{\cost}{\mathcal{C}} % Other \newcommand{\abs}[1]{{\left|#1\right|}} \newcommand{\norm}[1]{\lVert#1\rVert} \newcommand{\mat}[1]{\begin{pmatrix}#1\end{pmatrix}} \newcommand{\set}[1]{\{#1\}} \newcommand{\sprod}[1]{{\langle#1\rangle}} \ No newline at end of file \newcommand{\sprod}[1]{{\langle#1\rangle}} \newcommand{\defeq}{\mathrel{\vcenter{\baselineskip0.5ex \lineskiplimit0pt \hbox{\scriptsize.}\hbox{\scriptsize.}}}=} \newcommand{\eqdef}{=\mathrel{\vcenter{\baselineskip0.5ex \lineskiplimit0pt \hbox{\scriptsize.}\hbox{\scriptsize.}}}} \ No newline at end of file
 ... ... @@ -3,12 +3,12 @@ \item Goal: Estimate the expectation $\EE[\goal(\omega)]$, where $\goal$ is some functional of the random solution $u(\omega, x)$. \item Assume: $u_h(\omega, x)$ is the corresponding FEM solution with the \item Assume: $u_h(\omeg, x)$ is the corresponding FEM solution with the convergence rate $\alpha > 0$, i.e. \label{eq:alpha-assumption} \abs{\EE[\goal_h - \goal]} \lesssim h^\alpha, \quad \abs{\EE[\goal_h - \goal]} \lesssim N^{-\alpha}, \quad \abs{\EE[\goal_h - \goal]} \lesssim N^{-\alpha / d}, \quad N = \dim(V_h) and that the cost for one sample can be bounded with $\gamma > 0$ by ... ... @@ -18,61 +18,72 @@ \cost(\goal_h) \lesssim N^{\gamma / d} and the variance of the difference $\goal_{h_l} - \goal_{h_{l-1}}$ decays with and the variance of the difference $\goal_l - \goal_{l-1}$ decays with \label{eq:beta-assumption} \abs{\mathbb{V}[\goal_{h_l} - \goal_{h_{l-1}}]} \lesssim h^\beta, \quad \abs{\mathbb{V}[\goal_{h_l} - \goal_{h_{l-1}}]} \lesssim N^{-\beta / d} \abs{\VV[\goal_l - \goal_{l-1}]} \lesssim h^\beta, \quad \abs{\VV[\goal_l - \goal_{l-1}]} \lesssim N^{-\beta / d} \end{itemize} \end{frame} \begin{frame}{Multilevel Monte Carlo Method I} \begin{frame}{Multilevel Monte Carlo Method II} \begin{itemize} \item The Monte Carlo estimator for the approximated functional is \begin{equation*} \item Monte Carlo (MC) estimator: \begin{align*} \widehat{\goal}_{h,M}^{MC} = \frac{1}{M} \sum_{m=1}^M \goal_h(\omega_m) \end{equation*} \item The root mean square error (RMSE) is then given by \begin{equation*} \end{align*} \item Root mean square error (RMSE): \begin{align*} e(\widehat{\goal}^{MC}_{h,M})^2 = \EE \left[ (\widehat{\goal}^{MC}_{h,M} - \EE[\goal])^2 \right] = \underbrace{M^{-1} \VV[\goal_h]}_{\text{estimator error}} + \underbrace{\left( \EE[Q_h - Q] \right)^2}_{\text{FEM error}}. \end{equation*} \item This yields a total cost of ($\cost_\epsilon(\widehat{Q}_h)$ is the cost to \item achieve $e(\widehat{Q}_h) < \epsilon$) \begin{equation*} \cost(\widehat{Q}^{MC}_{h,M}) \lesssim M \cdot N^\gamma, \quad \cost_{\epsilon}(\widehat{Q}^{MC}_{h,M}) \lesssim \epsilon^{-2 -\frac{\gamma}{\alpha}}. \end{equation*} \underbrace{\left( \EE[\goal_h - \goal] \right)^2}_{\text{FEM error}} \end{align*} \item Total cost: \begin{align*} \cost(\widehat{\goal}^{MC}_{h,M}) \lesssim M \cdot N^\gamma, \quad \cost_{\epsilon}(\widehat{\goal}^{MC}_{h,M}) \lesssim \epsilon^{-2-\frac{\gamma}{\alpha}} \end{align*} Here, $\cost_\epsilon(\widehat{\goal}_h)$ is the cost to achieve $e(\widehat{\goal}_h) < \epsilon$ \end{itemize} \end{frame} \begin{frame}{Multilevel Monte Carlo Methods I} \begin{frame}{Multilevel Monte Carlo Methods III} \begin{itemize} \item The multilevel Monte Carlo (MLMC) method finds a way to balance the cost of individual samples and the total amount (confer \cite{charrier2013finite, giles2008multilevel, teckentrup2013further}). \item Main idea MLMC: Sample from several approximation $Q_{h_l}$. \item With the linearity of the expectation operator it holds \begin{equation*} \mathbb{E}[Q_{h_L}] = \mathbb{E}[Q_{h_0}] + \sum_{l=1}^L \mathbb{E}[Q_{h_l} - Q_{h_{l-1}}] = \sum_{l=0}^L \mathbb{E}[Y_l]. \end{equation*} \item Now estimate each $Y_l$ with the classical MC method, thus \begin{equation*} \widehat{Y}^{MC}_{h,M_l} = \frac{1}{M_l} \sum_{i=1}^{M_l} \left( Q_{h_l}(\omega_i) - Q_{h_{l-1}}(\omega_i) \right), \quad \widehat{Y}^{MC}_{h,M_0} = \frac{1}{M_0} \sum_{i=1}^{M_0} Q_{h_0}(\omega_i). \end{equation*} \item This gives the MLMC estimator \begin{equation*} \widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L} = \sum_{l=0}^L \widehat{Y}^{MC}_{h,M_l} = \sum_{l=0}^L \frac{1}{M_l} \sum_{i=1}^{M_l} Y_l(\omega_{i}). \end{equation*} \item Main idea: Sample from several approximation levels and balance cost per level $\cost_l$ with total sample amount per level $M_l$ \item Set $\goal_l - \goal_{l-1} \defeq \dgoal_l$ and $\goal_0 \defeq \dgoal_0$: \begin{align*} \EE[\goal_L] = \EE[\goal_0] + \sum_{l=1}^L \EE[\goal_l - \goal_{l-1}] = \sum_{l=0}^L \EE[\dgoal_l] \end{align*} \item Estimate each $\dgoal_l$ with the MC method: \begin{align*} \widehat{\dgoal}^{MC}_{h,M_l} = \frac{1}{M_l} \sum_{m=1}^{M_l} \left( \goal_l(\omega_m) - \goal_{l-1} (\omega_m) \right), \quad \widehat{\dgoal}^{MC}_{h,M_0} = \frac{1}{M_0} \sum_{m=1}^{M_0} \goal_0 (\omega_m) \end{align*} \item MLMC estimator: \begin{align*} \widehat{\goal}^{MLMC}_{h,\{ M_l \}_{l=0}^L} = \sum_{l=0}^L \widehat{\dgoal}^{MC}_{h,M_l} = \sum_{l=0}^L \frac{1}{M_l} \sum_{m=1}^{M_l} \dgoal_l(\omega_{m}) \end{align*} \end{itemize} \end{frame} \begin{frame}{Multilevel Monte Carlo Methods II} \begin{itemize} \item The RMSE is then given by \item Root mean square error (RMSE): \begin{equation*} e(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L})^2 = \mathbb{E}\left[( \widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L} - \mathbb{E}[Q])^2 \right] = \underbrace{\sum_{l=0}^L \frac{1}{M_l} \VV[Y_l]}_{\text{estimator error}} + \underbrace{\left( \mathbb{E}[Q_h - Q] \right)^2}_{\text{FEM error}}. e(\widehat{\goal}^{MLMC}_{h,\{ M_l \}_{l=0}^L})^2 = \underbrace{\sum_{l=0}^L \frac{1}{M_l} \VV[\dgoal_l]}_{\text{estimator error}} + \underbrace{\left( \EE[\goal_L - \goal] \right)^2}_{\text{FEM error}}. \end{equation*} \item This leads leads to a better computational cost since: \begin{itemize} ... ...