Commit 60a44560 authored by jannick.wolters's avatar jannick.wolters
Browse files

purged parmetis


Former-commit-id: d420677c
parent 218f3f71
......@@ -7,9 +7,6 @@
[submodule "code/ext/blaze"]
path = code/ext/blaze
url = https://bitbucket.org/blaze-lib/blaze.git
[submodule "code/ext/parmetis"]
path = code/ext/parmetis
url = https://github.com/slydex/parmetis.git
[submodule "code/ext/neuralEntropy"]
path = code/ext/neuralEntropy
url = https://github.com/ScSteffen/neuralEntropy.git
......@@ -49,17 +49,10 @@ endif()
include( blaze-cache-config )
include_directories( ${CMAKE_SOURCE_DIR}/ext/blaze )
add_compile_definitions( METIS_EXPORT= )
set( DISABLE_PARMETIS_PROGRAMS ON )
set( ParMETIS_PATH ${CMAKE_CURRENT_SOURCE_DIR}/ext/parmetis )
include_directories( ${ParMETIS_PATH}/include )
include_directories( ${ParMETIS_PATH}/metis/include )
add_subdirectory( ${ParMETIS_PATH} )
include_directories( ${CMAKE_SOURCE_DIR}/ext/cpptoml/include )
include_directories( ${CMAKE_SOURCE_DIR}/ext/spdlog/include )
set( CORE_LIBRARIES ${Python3_LIBRARIES} ${LAPACK_LIBRARIES} ${BLAS_LIBRARIES} ${MPI_LIBRARIES} ${VTK_LIBRARIES} OpenMP::OpenMP_CXX parmetis -lstdc++fs )
set( CORE_LIBRARIES ${Python3_LIBRARIES} ${LAPACK_LIBRARIES} ${BLAS_LIBRARIES} ${MPI_LIBRARIES} ${VTK_LIBRARIES} OpenMP::OpenMP_CXX -lstdc++fs )
#################################################
......
Subproject commit 263c94a262be616484b0bbeb33a6677411f25fbb
......@@ -10,8 +10,6 @@
#include <omp.h>
#include <vector>
#include "metis.h"
#include "parmetis.h"
#include "toolboxes/errormessages.h"
#include "toolboxes/reconstructor.h"
......@@ -42,13 +40,11 @@ class Mesh
std::vector<std::vector<Vector>> _cellNormals;
/*! @brief Tags each cell with its boundary type. None means no boundary. dimension: numCells */
std::vector<BOUNDARY_TYPE> _cellBoundaryTypes;
std::vector<unsigned> _colors; /*!< @brief Color of each cell (for MPI mesh partitioning). dimension: numCells */
blaze::CompressedMatrix<bool> _nodeNeighbors; /*!< @brief neighborshood relationship of nodes for (par-)metis */
void ComputeCellAreas(); /*!< @brief Computes only the areas of the mesh cells. Write to _cellAreas. */
void ComputeCellMidpoints(); /*!< @brief Compute only the midpoints of the cells. Write to _cellMidPoints*/
void ComputeConnectivity(); /*!< @brief Computes _cellNeighbors and _nodeNeighbors, i.e. neighborship relation in mesh*/
void ComputePartitioning(); /*!< @brief Computes local partitioning for openMP */
/*! @brief Computes outward facing normal of two neighboring nodes nodeA and nodeB with common cellCellcenter.
* Normals are scaled with their respective edge length
......@@ -90,10 +86,6 @@ class Mesh
* @return dimension: numCells */
const std::vector<double>& GetCellAreas() const;
/*! @brief Return the color/ID of the mesh partition
* @return dimension: numCells */
const std::vector<unsigned>& GetPartitionIDs() const;
/*! @brief Returns the neighbor cell IDs for every cell
* @return dimension: numCells x numNodes */
const std::vector<std::vector<unsigned>>& GetNeighbours() const;
......
......@@ -17,7 +17,6 @@ Mesh::Mesh( std::vector<Vector> nodes,
ComputeCellAreas();
ComputeCellMidpoints();
ComputeConnectivity();
// ComputePartitioning();
ComputeBounds();
}
......@@ -219,142 +218,6 @@ Vector Mesh::ComputeOutwardFacingNormal( const Vector& nodeA, const Vector& node
return n;
}
void Mesh::ComputePartitioning() {
int comm_size, comm_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Comm_size( comm, &comm_size );
MPI_Comm_rank( comm, &comm_rank );
unsigned ompNumThreads = omp_get_max_threads();
// only run coloring if multiple OpenMP threads are present
if( ompNumThreads > 1 ) {
// setup adjacency relationship of nodes
blaze::CompressedMatrix<bool> adjMatrix( _numNodes, _numNodes );
for( unsigned i = 0; i < _numNodes; ++i ) {
for( unsigned j = 0; j < _numCells; ++j ) {
for( unsigned k = 0; k < _numNodesPerCell; ++k ) {
if( i == _cells[j][k] ) {
if( k == 0 ) {
adjMatrix.set( i, _cells[j][_numNodesPerCell - 1], true );
adjMatrix.set( i, _cells[j][1], true );
}
else if( k == _numNodesPerCell - 1 ) {
adjMatrix.set( i, _cells[j][_numNodesPerCell - 2], true );
adjMatrix.set( i, _cells[j][0], true );
}
else {
adjMatrix.set( i, _cells[j][k - 1], true );
adjMatrix.set( i, _cells[j][k + 1], true );
}
}
}
}
}
// for xadj and adjncy structure see parmetis documentation
std::vector<int> xadj( _numNodes + 1 );
int ctr = 0;
std::vector<int> adjncy;
for( unsigned i = 0; i < _numNodes; ++i ) {
xadj[i] = ctr;
for( unsigned j = 0; j < _numNodes; ++j ) {
if( adjMatrix( i, j ) ) {
adjncy.push_back( static_cast<int>( j ) );
ctr++;
}
}
}
xadj[_numNodes] = ctr;
std::vector<int> partitions;
int edgecut = 0;
int ncon = 1;
int nparts = ompNumThreads;
real_t ubvec = static_cast<real_t>( 1.05 ); // parameter taken from SU2
if( comm_size > 1 ) { // if multiple MPI threads -> use parmetis
// split adjacency information for each MPI thread -> see 'local' variables
std::vector<int> local_xadj{ 0 };
unsigned xadjChunk = std::ceil( static_cast<float>( _numNodes ) / static_cast<float>( comm_size ) );
unsigned xadjStart = comm_rank * xadjChunk + 1;
unsigned adjncyStart;
for( unsigned i = 0; i < xadjChunk; ++i ) {
if( i == 0 ) adjncyStart = xadj[i + xadjStart - 1];
local_xadj.push_back( xadj[i + xadjStart] );
}
unsigned adjncyEnd = local_xadj.back();
for( unsigned i = 1; i < local_xadj.size(); ++i ) {
local_xadj[i] -= xadj[xadjStart - 1];
}
std::vector<int> local_adjncy( adjncy.begin() + adjncyStart, adjncy.begin() + adjncyEnd );
// vtxdist describes what nodes are on which processor - see parmetis doc
std::vector<int> vtxdist{ 0 };
for( unsigned i = 1; i <= static_cast<unsigned>( comm_size ); i++ ) {
vtxdist.push_back( std::min( i * xadjChunk, _numNodes ) );
}
// weights setup set as in SU2
real_t* tpwgts = new real_t[nparts];
for( unsigned i = 0; i < static_cast<unsigned>( nparts ); ++i ) {
tpwgts[i] = 1.0 / static_cast<real_t>( nparts );
}
// setup as in SU2
int wgtflag = 0;
int numflag = 0;
int options[1];
options[0] = 0;
unsigned chunkSize = local_xadj.size() - 1;
int* part = new int[chunkSize];
ParMETIS_V3_PartKway( vtxdist.data(),
local_xadj.data(),
local_adjncy.data(),
nullptr,
nullptr,
&wgtflag,
&numflag,
&ncon,
&nparts,
tpwgts,
&ubvec,
options,
&edgecut,
part,
&comm );
partitions.resize( chunkSize * comm_size );
MPI_Allgather( part, chunkSize, MPI_INT, partitions.data(), chunkSize, MPI_INT, comm ); // gather all information in partitions
partitions.resize( _numNodes );
// free memory
delete[] tpwgts;
}
else { // with a singular MPI thread -> use metis
int options[METIS_NOPTIONS];
METIS_SetDefaultOptions( options );
int nvtxs = _numNodes;
int vsize;
partitions.resize( _numNodes );
METIS_PartGraphKway(
&nvtxs, &ncon, xadj.data(), adjncy.data(), nullptr, &vsize, nullptr, &nparts, nullptr, &ubvec, options, &edgecut, partitions.data() );
}
// extract coloring information
_colors.resize( _numCells );
for( unsigned i = 0; i < _numCells; ++i ) {
std::map<unsigned, int> occurances;
for( unsigned j = 0; j < _numNodesPerCell; ++j ) {
++occurances[partitions[_cells[i][j]]];
}
_colors[i] = occurances.rbegin()->first;
}
}
else {
_colors.resize( _numCells, 0u );
}
}
void Mesh::ComputeSlopes( unsigned nq, VectorVector& psiDerX, VectorVector& psiDerY, const VectorVector& psi ) const {
for( unsigned k = 0; k < nq; ++k ) {
for( unsigned j = 0; j < _numCells; ++j ) {
......@@ -516,7 +379,6 @@ const std::vector<Vector>& Mesh::GetNodes() const { return _nodes; }
const std::vector<Vector>& Mesh::GetCellMidPoints() const { return _cellMidPoints; }
const std::vector<std::vector<unsigned>>& Mesh::GetCells() const { return _cells; }
const std::vector<double>& Mesh::GetCellAreas() const { return _cellAreas; }
const std::vector<unsigned>& Mesh::GetPartitionIDs() const { return _colors; }
const std::vector<std::vector<unsigned>>& Mesh::GetNeighbours() const { return _cellNeighbors; }
const std::vector<std::vector<Vector>>& Mesh::GetNormals() const { return _cellNormals; }
const std::vector<BOUNDARY_TYPE>& Mesh::GetBoundaryTypes() const { return _cellBoundaryTypes; }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment