Commit 8a3e9de9 authored by niklas.baumgarten's avatar niklas.baumgarten
Browse files

Merge branch 'MLMC-TP' into feature

parents bf7eedc3 168f07cf
......@@ -4,5 +4,5 @@ sprng5/
results/
Todo.txt
cmake-build-debug/
*/__pycache__/*
*/.ipynb_checkpoints/*
\ No newline at end of file
notebooks/__pycache__/
notebooks/.idea/
......@@ -41,4 +41,4 @@ add_executable(MLMC-M++ mlmc/src/Main.cpp)
# Linking
target_link_libraries(MLMC-M++ MLMC sprng SRC LIB_PS ${SUPERLU}
${BLAS_LIBRARIES} ${LAPACK_LIBRARIES} fftw3 m)
#---------------------------------------------------------------------------------------#
\ No newline at end of file
#---------------------------------------------------------------------------------------#
## Getting Started
This project combines MLMC methods and the parallel PDE solving software M++. This guide
walks you through the installation process.
walks you through the installation process and presents some example computations.
### Prerequisites
* This version of M++ uses CMake (https://cmake.org/download/) as building tool.
......@@ -49,8 +49,8 @@ Transform in the West http://fftw.org/). To install it run:
```cmake ..```
```make -j```
### MLMC-M++ Experiments
You can find jupyter notebooks with experiments in the
in the ```notebooks``` directory.
### MLMC-M++ Exercises
You can find a PDF with exercises in the ```doc``` directory
and a complementing jupyter notebook in the ```notebook``` directory.
Model = DGTransport
Overlap = dG1
flux_alpha = 1 #Upwind: 1, Central: 0
deg = 2 #DG
degree = 2
Degree = 2
#Problem = StochasticPollution1D
#Problem = DeterministicPollution1D
#Problem = StochasticPollution2D
#Problem = DeterministicPollution2D
#Problem = StochasticPollutionCosHat2D
#Problem = DeterministicPollutionCosHat2D
#Problem = DeterministicPollutionCosHatSquare500
Problem = StochasticPollutionMollifiedBar2D
scaling = 8
TimeSeries = uniform
startTime = 0.0
endTime = 1.0
Kmax = 250
Keps = 1e-5
functional = Mass
#funcitonal = Energy
#functional = Outflow
#rkorder = 0 #Exponential integrator
#rkorder = 1 #Explicit Euler
#rkorder = 2 #Heun
#rkorder = 2 #Runge
rkorder = -2 #Implicit MP-rule
#rkorder = 1000 #Polynomial Krylov-Arnoldi method
gamma = 0.01
StochasticField = LogNormal
#StochasticField = Gaussian
Experiment = ConvergenceTest
#Experiment = MLMCExperiment
#Experiment = MLMCOverEpsilon
initLevels = 4, 5, 6, 7, 8
initSampleAmount = 100, 100, 100, 100, 100
uniformSampleAmount = 100
epsilon = 0.01
mcOnly = false
epsilonList = 0.01, 0.005, 0.003, 0.001
maxLevel = 9
plevel = 3
MainVerbose = 1
MCVerbose = 0
MLMCVerbose = 1
PDEVerbose = 0
GeneratorVerbose = 0
AssembleVerbose = 0
GeneratorPlotting = 0
MCPlotting = 0
MLMCPlotting = 0
# Stochastic Field
mean = 1.0
sigma = 1.0
norm_p = 2
lambda1 = 0.1
lambda2 = 0.1
smoothing = 1.9
evtol = 1e-10
# Other
Distribution = RCB
LinearSolver = GMRES
Preconditioner = LIB_PS
#Preconditioner = SGS
#Preconditioner = GaussSeidel
#Preconditioner = Jacobi
LinearReduction = 1e-12
LinearEpsilon = 1e-10
LinearSteps = 3000
NewtonSteps = 1
NewtonLineSearchSteps = 0
ConfigVerbose = 3
TimeLevel = -1
LinearVerbose = -1
MuteLevel = -1
NewtonVerbose = -1
DebugLevel = -1
......@@ -2,6 +2,9 @@ Model = DGTransport
Overlap = dG1
flux_alpha = 1 #Upwind: 1, Central: 0
deg = 2 #DG
degree = 2
Degree = 2
#Problem = StochasticPollution1D
#Problem = DeterministicPollution1D
......@@ -12,13 +15,13 @@ flux_alpha = 1 #Upwind: 1, Central: 0
#Problem = DeterministicPollution2D
#Problem = StochasticPollutionCosHat2D
#Problem = DeterministicPollutionCosHat2D
#Problem = DeterministicPollutionCosHatSquare500
Problem = StochasticPollutionMollifiedBar2D
#Problem = DeterministicPollutionMollifiedBar2D
scaling = 8
TimeSeries = uniform
startTime = 0.0
endTime = 0.5
endTime = 1.0
Kmax = 250
Keps = 1e-5
......@@ -42,27 +45,28 @@ StochasticField = LogNormal
Experiment = MLMCExperiment
#Experiment = MLMCOverEpsilon
initLevels = [4, 5, 6]
initSampleAmount = [12, 6, 2]
uniformSampleAmount = 1
epsilon = 0.01
initLevels = 4, 5, 6
initSampleAmount = 12, 6, 2
uniformSampleAmount = 100
epsilon = 0.003
mcOnly = false
epsilonList = 0.01, 0.005, 0.003, 0.001
maxLevel = 7
maxLevel = 9
plevel = 3
degree = 2
MainVerbose = 1
MainVerbose = 2
MCVerbose = 2
MLMCVerbose = 2
PDEVerbose = 1
GeneratorVerbose = 0
AssembleVerbose = 1
GeneratorPlotting = 1
MCPlotting = 1
MLMCPlotting = 1
GeneratorPlotting = 4
MCPlotting = 3
MLMCPlotting = 2
# Stochastic Field
mean = 1.0
......@@ -70,7 +74,7 @@ sigma = 1.0
norm_p = 2
lambda1 = 0.1
lambda2 = 0.1
smmothing = 1.8
smoothing = 1.9
evtol = 1e-10
# Other
......@@ -88,9 +92,9 @@ LinearSteps = 3000
NewtonSteps = 1
NewtonLineSearchSteps = 0
ConfigVerbose = 0
ConfigVerbose = 2
TimeLevel = -1
LinearVerbose = -1
MuteLevel = -1
NewtonVerbose = -1
DebugLevel = -1
\ No newline at end of file
DebugLevel = -1
#include "MonteCarloLogger.h"
IndentedLogger *IndentedLogger::instance = nullptr;
#ifndef MLMC__MULTILEVELMONTECARLO__HPP
#define MLMC__MULTILEVELMONTECARLO__HPP
#include "m++.h"
class IndentedLogger {
static IndentedLogger *instance;
string indent;
string innerIndent;
const string defaultIndent = " ";
vector<Date> timestamps;
IndentedLogger() {
indent = "";
}
public:
static IndentedLogger *GetInstance() {
if (!instance)
instance = new IndentedLogger;
return instance;
}
int DecreaseIndent() {
indent = indent.erase(0, defaultIndent.length());
}
void IncreaseIndent() {
indent = indent.append(defaultIndent);
}
string GetIndent() {
return indent;
}
void StartMethod(const string& msg, int verbose) {
Date start;
timestamps.push_back(start);
vout (1) << indent << "<" << msg << ">" << endl;
IncreaseIndent();
}
void EndMethod(int verbose) {
DecreaseIndent();
vout (1) << indent << "<End after " << Date() - timestamps.back() << ">" << endl;
timestamps.erase(timestamps.end());
}
void InnerMsg(const string& msg, int verbose) {
vout (1) << indent << msg << endl;
}
void LogMsgFlush(const string &msg, int verbose) {
vout(1) << "\r" << indent << msg << flush;
}
};
#endif //MLMC__MULTILEVELMONTECARLO__HPP
......@@ -58,7 +58,7 @@ void MultilevelMonteCarlo::Method(const double eps) {
updateSampleAmount(eps);
if (checkForNewLevel()) {
estimateNumericalError();
if (numErr > eps / 2) {
if (numErr > eps/2) {
appendLevel(eps);
} else {
estimateStatisticalError();
......
import unittest
import numpy as np
import os
import shutil
from vtk_utilities import *
from unstructured_mesh import *
minimal_scalar = "# vtk DataFile Version 2.0\nUnstructured Grid by M++\nASCII\nDATASET UNSTRUCTURED_GRID\nPOINTS 4 float\n0.000000 0.000000 0\n1.000000 0.000000 0\n1.000000 1.000000 0\n0.000000 1.000000 0\nCELLS 1 5\n4 0 1 2 3\nCELL_TYPES 1\n8\nCELL_DATA 1\nSCALARS scalar_value float 1\nLOOKUP_TABLE default\n1.0"
minimal_vector = "# vtk DataFile Version 2.0\nUnstructured Grid by M++\nASCII\nDATASET UNSTRUCTURED_GRID\nPOINTS 4 float\n0.000000 0.000000 0\n1.000000 0.000000 0\n1.000000 1.000000 0\n0.000000 1.000000 0\nCELLS 1 5\n4 0 1 2 3\nCELL_TYPES 1\n8\nCELL_DATA 1\nVECTORS vector_value float\n-1.000000 1.000000 0"
new_coords = np.array([[0.0, 0.0, 0.0], [0.5, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.5, 0.0],
[0.5, 0.5, 0.0], [0.0, 0.5, 0.0], [0.0, 1.0, 0.0], [0.5, 1.0, 0.5],
[1.0, 1.0, 0.0]])
new_cells = np.array([int(i) for i in "4 0 1 4 5 4 1 2 3 4 4 5 4 7 6 4 4 3 8 7".split(" ")])
class TestVtk(unittest.TestCase):
@classmethod
def tearDown(cls):
try:
shutil.rmtree("Testsamples")
except Exception as e:
print(e)
@classmethod
def setUp(cls):
try:
os.mkdir("Testsamples")
with open("Testsamples/vector.vtk", 'w') as file:
file.write(minimal_vector)
for i in range(10):
with open("Testsamples/scalar.%04d.vtk" % i, 'w') as file:
file.write(minimal_scalar)
except:
pass
class TestReader(TestVtk):
def test_scalar_reader(self):
mesh = VtkScalarReader("scalar.0000.vtk", "Testsamples/")
coords, cells, values = mesh.to_numpy()
cell_coords = get_cell_coords(cells, coords)
np.testing.assert_array_equal(values, np.array([1.0]))
np.testing.assert_array_equal(np.array(cells), np.array([4, 0, 1, 2, 3]))
np.testing.assert_array_equal(np.array(coords),
np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0]]))
for i in range(4):
np.testing.assert_array_equal(cell_coords[0][i], np.array(coords[i]))
def test_multi_scalar_reader(self):
mesh = VtkGroupScalarReader("scalar.", "Testsamples/")
coords, cells, values = mesh.to_numpy()
cell_coords = get_cell_coords(cells, coords)
np.testing.assert_array_equal(values, np.array([[1.0] for i in range(10)]))
np.testing.assert_array_equal(np.array(cells), np.array([4, 0, 1, 2, 3]))
np.testing.assert_array_equal(np.array(coords),
np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0]]))
for i in range(4):
np.testing.assert_array_equal(cell_coords[0][i], np.array(coords[i]))
def test_vector_reader(self):
mesh = VtkVectorReader("vector.vtk", "Testsamples/")
coords, cells, values = mesh.to_numpy()
cell_coords = get_cell_coords(cells, coords)
np.testing.assert_array_equal(values, np.array([[-1.0, 1.0, 0.0]]))
np.testing.assert_array_equal(np.array(cells), np.array([4, 0, 1, 2, 3]))
np.testing.assert_array_equal(np.array(coords),
np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0]]))
for i in range(4):
np.testing.assert_array_equal(cell_coords[0][i], np.array(coords[i]))
class TestPlotting(TestVtk):
def test_genplot(self):
self.assertEqual(os.path.isfile("Testsamples/output.png"), False)
s = VtkPlot()
s.set_wd("Testsamples/")
s.save("Testsamples/output.png")
self.assertEqual(os.path.isfile("Testsamples/output.png"), True)
def test_pcolormesh(self):
s = VtkPlot()
s.set_wd("Testsamples/")
s.add_pcolormesh("scalar.0000.vtk")
s.save("Testsamples/output.png")
self.assertEqual(os.path.isfile("Testsamples/output.png"), True)
def test_quivers(self):
s = VtkPlot()
s.set_wd("Testsamples/")
s.add_quivers("vector.vtk")
s.save("Testsamples/output.png")
self.assertEqual(os.path.isfile("Testsamples/output.png"), True)
def test_imshow(self):
s = VtkPlot()
s.set_wd("Testsamples/")
s.add_imshow("scalar.0000.vtk")
s.save("Testsamples/output.png")
self.assertEqual(os.path.isfile("Testsamples/output.png"), True)
class TestScalarMeshes(TestVtk):
def test_read(self):
mesh = UnstructuredScalarMesh("Testsamples/scalar.0000.vtk")
np.testing.assert_array_equal(mesh.values, np.array([1.0]))
np.testing.assert_array_equal(np.array(mesh.cells), np.array([4, 0, 1, 2, 3]))
np.testing.assert_array_equal(np.array(mesh.coordinates),
np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0]]))
def test_scaling(self):
mesh = UnstructuredScalarMesh("Testsamples/scalar.0000.vtk")
mesh.scaling(2.0)
np.testing.assert_array_equal(mesh.values, np.array([2.0]))
def test_cell_coord(self):
mesh = UnstructuredScalarMesh("Testsamples/scalar.0000.vtk")
for i in range(4):
np.testing.assert_array_equal(mesh.cell_coords[0][i], np.array(mesh.coordinates[i]))
def test_add(self):
mesh1 = UnstructuredScalarMesh("Testsamples/scalar.0000.vtk")
mesh2 = UnstructuredScalarMesh("Testsamples/scalar.0000.vtk")
mesh3 = mesh1 + mesh2
np.testing.assert_array_equal(mesh3.values, np.array([2.0]))
mesh4 = sum([mesh1, mesh2, mesh3])
np.testing.assert_array_equal(mesh4.values, np.array([4.0]))
def test_sub(self):
mesh1 = UnstructuredScalarMesh("Testsamples/scalar.0000.vtk")
mesh2 = UnstructuredScalarMesh("Testsamples/scalar.0000.vtk")
mesh1.scaling(3.0)
mesh3 = mesh1 - mesh2
np.testing.assert_array_equal(mesh3.values, np.array([2.0]))
def test_upscale(self):
mesh = UnstructuredScalarMesh("Testsamples/scalar.0000.vtk")
new_mesh = mesh.Upscale_into(new_coords, new_cells)
np.testing.assert_array_equal(new_mesh.coordinates, new_coords)
np.testing.assert_array_equal(new_mesh.cells, new_cells)
np.testing.assert_array_equal(new_mesh.values, np.array([1.0, 1.0, 1.0, 1.0]))
class TestVectorMeshes(TestVtk):
def test_read(self):
mesh = UnstructuredVectorMesh("Testsamples/vector.vtk")
np.testing.assert_array_equal(mesh.values, np.array([[-1.0, 1.0, 0.0]]))
np.testing.assert_array_equal(np.array(mesh.cells), np.array([4, 0, 1, 2, 3]))
np.testing.assert_array_equal(np.array(mesh.coordinates),
np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0]]))
def test_scaling(self):
mesh = UnstructuredVectorMesh("Testsamples/vector.vtk")
mesh.scaling(2.0)
np.testing.assert_array_equal(mesh.values, np.array([[-2.0, 2.0, 0.0]]))
def test_cell_coord(self):
mesh = UnstructuredVectorMesh("Testsamples/vector.vtk")
for i in range(4):
np.testing.assert_array_equal(mesh.cell_coords[0][i], np.array(mesh.coordinates[i]))
def test_add(self):
mesh1 = UnstructuredVectorMesh("Testsamples/vector.vtk")
mesh2 = UnstructuredVectorMesh("Testsamples/vector.vtk")
mesh3 = mesh1 + mesh2
np.testing.assert_array_equal(mesh3.values, np.array([[-2.0, 2.0, 0.0]]))
mesh4 = sum([mesh1, mesh2, mesh3])
np.testing.assert_array_equal(mesh4.values, np.array([[-4.0, 4.0, 0.0]]))
def test_sub(self):
mesh1 = UnstructuredVectorMesh("Testsamples/vector.vtk")
mesh2 = UnstructuredVectorMesh("Testsamples/vector.vtk")
mesh1.scaling(3.0)
mesh3 = mesh1 - mesh2
np.testing.assert_array_equal(mesh3.values, np.array([[-2.0, 2.0, 0.0]]))
def test_upscale(self):
mesh = UnstructuredVectorMesh("Testsamples/vector.vtk")
new_mesh = mesh.Upscale_into(new_coords, new_cells)
np.testing.assert_array_equal(new_mesh.coordinates, new_coords)
np.testing.assert_array_equal(new_mesh.cells, new_cells)
np.testing.assert_array_equal(new_mesh.values, np.array([[-1.0, 1.0, 0.0] for i in range(4)]))
class TestMultiScalarMeshes(TestVtk):
def test_read(self):
mesh = UnstructuredMultiScalarMesh("Testsamples/scalar.")
np.testing.assert_array_equal(mesh.values, np.array([[1.0] for i in range(10)]))
np.testing.assert_array_equal(np.array(mesh.cells), np.array([4, 0, 1, 2, 3]))
np.testing.assert_array_equal(np.array(mesh.coordinates),
np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0]]))
def test_scaling(self):
mesh = UnstructuredMultiScalarMesh("Testsamples/scalar.")
mesh.scaling(2.0)
np.testing.assert_array_equal(mesh.values, np.array([[2.0] for i in range(10)]))
def test_cell_coord(self):
mesh = UnstructuredMultiScalarMesh("Testsamples/scalar.")
for i in range(4):
np.testing.assert_array_equal(mesh.cell_coords[0][i], np.array(mesh.coordinates[i]))
def test_add(self):
mesh1 = UnstructuredMultiScalarMesh("Testsamples/scalar.")
mesh2 = UnstructuredMultiScalarMesh("Testsamples/scalar.")
mesh1.scaling(1.0)
mesh3 = mesh1 + mesh2
np.testing.assert_array_equal(mesh3.values, np.array([[2.0] for i in range(10)]))
mesh4 = sum([mesh1, mesh2, mesh3])
np.testing.assert_array_equal(mesh4.values, np.array([[4.0] for i in range(10)]))
def test_sub(self):
mesh1 = UnstructuredMultiScalarMesh("Testsamples/scalar.")
mesh2 = UnstructuredMultiScalarMesh("Testsamples/scalar.")
mesh1.scaling(3.0)
mesh3 = mesh1 - mesh2
np.testing.assert_array_equal(mesh3.values, np.array([[2.0] for i in range(10)]))
def test_upscale(self):
mesh = UnstructuredMultiScalarMesh("Testsamples/scalar.")
new_mesh = mesh.Upscale_into(new_coords, new_cells)
np.testing.assert_array_equal(new_mesh.coordinates, new_coords)
np.testing.assert_array_equal(new_mesh.cells, new_cells)
np.testing.assert_array_equal(new_mesh.values, np.array([[1.0, 1.0, 1.0, 1.0] for i in range(20)]))
if __name__ == '__main__':
unittest.main()
This diff is collapsed.
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Multilevel Monte Carlo method applied to ellipitc problems"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"import subprocess\n",
"import os\n",
"working_dir = \"../build\"\n",
"os.chdir(working_dir)\n",
"\n",
"def buildMpp():\n",
" subprocess.run([\"cmake\", \"..\"], cwd=working_dir)\n",
" subprocess.run([\"make\", \"-j\"], cwd=working_dir)\n",
"\n",
"def runMpp(kernels, *args, conf=\"\"): \n",
" runParameters = [\"mpirun\", \"-np\", str(kernels), \"MLMC-M++\"]\n",
" for arg in args:\n",
" runParameters.append(arg)\n",
" \n",
" stdout = subprocess.run(runParameters, stdout=subprocess.PIPE, cwd=working_dir)\n",
" return stdout.stdout.decode(\"utf-8\")\n",
"\n",
"buildMpp()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"stdout = runMpp(4)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},