Commit 70177437 by niklas.baumgarten

### worked on slides

parent cd7ac3d6

95.1 KB

 \begin{frame}{Introduction and Example I} \begin{itemize} \item \underline{Problem:} Let $u(\omega)$ be a random PDE solution on $\Omega$ and let $\goal(\omega)$ be some functional of $u(\omega)$. \item \underline{Problem:} Let $u(\omega, x) \in V$ be a random PDE solution on $\Omega$ and $D$. Let $\goal(\omega)$ be some functional of $u(\omega, x)$. Estimate $\EE[\goal(\omega)]$ \item \underline{Example (1D Elliptic Model Problem):} Let $D \subset \RR^{d=1}$. Search for $u \in V$, such that \begin{align*} ... ... @@ -12,8 +12,6 @@ u(1) = 0 \end{align*} with $\kappa(\omega, x) = \log(g(\omega, x))$, where $g$ is a Gaussian field \item \end{itemize} \end{frame} ... ... @@ -56,10 +54,10 @@ \begin{align*} e(\widehat{\goal}^{MC}_{h,M})^2 = \EE \left[ (\widehat{\goal}^{MC}_{h,M} - \EE[\goal])^2 \right] = \underbrace{M^{-1} \VV[\goal_h]}_{\text{estimator error}} + \underbrace{M^{-1} \VV[\goal_h]}_{\text{Estimator error}} + \underbrace{\left( \EE[\goal_h - \goal] \right)^2}_{\text{FEM error}} \end{align*} \item \underline{Total cost:} \item \underline{Total Cost:} \begin{align*} \cost(\widehat{\goal}^{MC}_{h,M}) \lesssim M \cdot N^\gamma, \quad \cost_{\epsilon}(\widehat{\goal}^{MC}_{h,M}) \lesssim ... ... @@ -75,18 +73,26 @@ \item \underline{Example (2D Elliptic Model Problem):} Let $D \subset \RR^{d=2}$. Search for $u \in V$, such that \begin{align*} - \div(\kappa(\omega,x) \nabla u(\omega,x)) = 0, \quad \nabla u(x) \cdot n = -1, \quad u(x) = 0 - \div(\kappa(\omega,x) \nabla u(\omega,x)) \overset{\text{on }D}{=} 0, \quad \nabla u(x) \cdot n \overset{\text{on }\Gamma_N}{=} -1, \quad u(x) \overset{\text{on }\Gamma_D}{=} 0 \end{align*} with $\kappa(\omega, x) = \log(g(\omega, x))$, where $g$ is a Gaussian field with the covariance function $C(x, y) = \sigma^2 \exp(- \norm{x- y}_2^s / \lambda^s)$ \end{itemize} \begin{figure} \centering \subfigure{\includegraphics[width=0.99\linewidth]{img/perm_potential.png}} \end{figure} \end{frame} \begin{frame}{Multilevel Monte Carlo Methods I} \begin{frame}{Multilevel Monte Carlo Estimator I} \begin{itemize} \item \underline{Main idea:} Draw samples from several approximation levels and balance cost per level $\cost_l$ with total sample amount per level $M_l$ \item \underline{Main Idea:} Draw samples from several levels $l \in \{ 0, \dots L \}$ and balance $\cost_l$ with $M_l$ \item Set $\goal_l - \goal_{l-1} \defeq \dgoal_l$ and $\goal_0 \defeq \dgoal_0$: \begin{align*} \EE[\goal_L] = \EE[\goal_0] + \sum_{l=1}^L \EE[\goal_l - \goal_{l-1}] = ... ... @@ -100,7 +106,7 @@ \widehat{\dgoal}^{MC}_{h,M_0} = \frac{1}{M_0} \sum_{m=1}^{M_0} \goal_0 (\omega_m) \end{align*} \item \underline{MLMC estimator:} \item \underline{MLMC Estimator:} \begin{align*} \widehat{\goal}^{MLMC}_{h,\{ M_l \}_{l=0}^L} = \sum_{l=0}^L \widehat{\dgoal}^{MC}_{h,M_l} = ... ... @@ -109,83 +115,84 @@ \end{itemize} \end{frame} \begin{frame}{Multilevel Monte Carlo Methods II} \begin{frame}{Multilevel Monte Carlo Estimator II} \begin{itemize} \item \underline{RMSE (Root Mean Square Error):} \begin{align*} e(\widehat{\goal}^{MLMC}_{h,\{ M_l \}_{l=0}^L})^2 = \underbrace{\sum_{l=0}^L \frac{1}{M_l} \VV[\dgoal_l]}_{\text{estimator error}} + \underbrace{\sum_{l=0}^L \frac{1}{M_l} \VV[\dgoal_l]}_{\text{Estimator error}} + \underbrace{\left( \EE[\goal_L - \goal] \right)^2}_{\text{FEM error}} \end{align*} \item \underline{Total cost:} \item \underline{Total Cost:} \begin{align*} \cost(\widehat{\goal}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) \lesssim \sum_{l=0}^L M_l \cost_l, \quad \cost_{\epsilon}(\widehat{\goal}^{MLMC}_{h,\{M_l \}_{l=0}^L}) \lesssim \epsilon^{-2-(\gamma - \beta)/\alpha} \end{align*} Here, we assumed $\beta < \gamma$. \item Arguments (...) Here, we assumed $\beta < \gamma$ \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame}{Multilevel Monte Carlo method} \begin{theorem}[Multilevel Monte Carlo method] \label{MLMC_theorem} {\footnotesize Suppose that there are positive constants $\alpha, \beta, \gamma, c_1, c_2, c_3 > 0$ such that $\alpha \geq \frac{1}{2} \min(\beta, \gamma)$ and \begin{enumerate} {\footnotesize \item $\left| \E[Q_{h_l} - Q] \right| \leq c_1 h_l^\alpha$ \item $\V[Q_{h_l} - Q_{h_{l-1}}] \leq c_2 h_l^\beta$ \item $\mathcal{C}_l \leq c_3 h_l^{- \gamma}$.} \end{enumerate} Then, for any $0 < \epsilon < \frac{1}{e}$, there exists an $L$ and a sequence $\{ M_l \}_{l=0}^L$, such that \begin{equation*} e(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L})^2 = \E\left[( \widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L} - \E[Q])^2 \right] < \epsilon^2 \end{equation*} and \begin{equation*} \mathcal{C}_\epsilon(\widehat{Q}^{MLMC}_{h,\{ M_l \}_{l=0}^L}) \lesssim \begin{cases} \epsilon^{-2}, &\text{if } \beta > \gamma \\ \epsilon^{-2}\log(\epsilon)^2, &\text{if } \beta = \gamma \\ \epsilon^{-2-(\gamma - \beta)/\alpha}, &\text{if } \beta < \gamma \end{cases}, \end{equation*} where the hidden constant depends on $c_1, c_2, c_3$.} \end{theorem} \end{frame} \begin{frame}{Multilevel Monte Carlo method - Algorithm} \begin{itemize} \item Challenge in using the MLMC method is showing that the assumptions hold \item The MLMC approach can be parallelized in an easy manner \end{itemize} \vspace{-0.4cm} \begin{frame}{Multilevel Monte Carlo Estimator III} \begin{algorithm}[H] \caption{Multilevel Monte Carlo method} \caption{Multilevel Monte Carlo Estimator} \begin{algorithmic}[1] \label{MLMC algorithm} \label{alg:mlmc} {\footnotesize \STATE Set $l_0 = 3$, $L_0 = 5$ and the initial number of samples $M_0 = \{ 200, 100, 50 \}$ \STATE Set range of levels $\{l_0, \dots, L_0 \}$ and the number of needed samples $\{ \Delta M_l = M_0 \}_{l = 0}^{L}$ \WHILE {$\Delta M_l > 0$ on any level} \FOR {levels with needed samples} \STATE Retrieve functionals and cost: $Y_l, \, \mathcal{C}_l \leftarrow \texttt{SubroutineEstimator}(\Delta M_l, l)$ \STATE Update statistics: $\mathcal{C}_l$, $|\E[Y_l]|$, $\V[Y_l]$ and set: $M_l = \Delta M_l$, $\Delta M_l = 0$ \STATE Choose $h_{l=0}^\text{init}, h_{l=1}^\text{init}, \dots, h_{l=L}^\text{init}$ and $M_{l=0}^\text{init}, M_{l=1}^\text{init}, \dots, M_{l=L}^\text{init}$ \STATE Set $\{{\vartriangle} M_l = M_l^\text{init}\}_{l = 0}^{L}$ and $\{M_l = 0\}_{l = 0}^{L}$ \WHILE {${\vartriangle} M_l > 0$ on any level} \FOR {levels with ${\vartriangle} M_l > 0$} \STATE $\dgoal_l, \, \mathcal{C}_l \leftarrow \texttt{MonteCarlo} ({\vartriangle} M_l, l)$ \STATE Update $\mathcal{C}_l$, $|\mathbb{E}[\dgoal_l]|$ and $\mathbb{V}[\dgoal_l]$ \STATE Set $M_l \leftarrow M_l + {\vartriangle} M_l$, ${\vartriangle} M_l = 0$ \ENDFOR \STATE Estimate exponents $\alpha$, $\beta$, $\gamma$ with the assumptions of the previous Theorem \STATE Estimate optimal $M_l$, $l = 0, \dots, L$ with $M_l = \left\lceil 2 \epsilon^{-2} \sqrt{\frac{\V[Y_l]}{\mathcal{C}_l}} \left( \sum_{l=0}^L \sqrt{\V[Y_l] \mathcal{C}_l} \right) \right\rceil$ \STATE Test for weak convergence with $|\E[Q_{h_L} - Q_{h_{L-1}}]| < (2^\alpha - 1) \frac{\epsilon}{\sqrt{2}}$ \STATE If not converged, increase range of levels by one level and initialize new $M_L$ \STATE Estimate $\alpha$, $\beta$, $\gamma$ with~\eqref{eq:alpha-assumption}, ~\eqref{eq:gamma-assumption} and~\eqref{eq:beta-assumption} \STATE Estimate $\{M_l^\text{opt}\}_{l = 0}^{L} with$ %~\eqref{eq:optimal-Ml} %\label{equation} $M_l \approx 2 \varepsilon^{-2} \sqrt{\frac{\VV[\dgoal_l]}{\mathcal{C}_l}} \left( \sum_{l=0}^L \sqrt{\mathbb{V}[\dgoal_l] \cost_l} \right)$ % \STATE Update $\{{\vartriangle} M_l\}_{l = 0}^{L} = \{ M_l^\text{opt} - M_l\}_{l = 0}^{L}$ \STATE Test for weak convergence %~\eqref{eq:convergence-test} % \label{eq:convergence-test} $|\EE[{\goal}_L - {\goal}_{L-1}]| \lesssim (2^\alpha - 1) \frac{\varepsilon}{\sqrt{2}}$ % \IF {not converged} \STATE Set $L \leftarrow L + 1$ and update $\{{\vartriangle} M_l\}_{l = 0}^{L}$ \ENDIF \ENDWHILE} \end{algorithmic} \end{algorithm} \end{frame} \begin{frame}{Example III} \begin{itemize} \item \underline{Example (2D Darcy Transport):} (...) \end{itemize} \end{frame}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!