Commits (12)
......@@ -57,7 +57,7 @@ mpitest-mpp:
image: ${REGISTRY}/${IMAGE_NAME_MPP}
script:
- cd /mpp/build
- python3 mppyrun.py --mpi_tests=1 --mute=0
- python3 mppyrun.py --mpi_tests=1 --mute=1
dependencies: [ "build-mpp" ]
tags: [ docker ]
......
......@@ -274,6 +274,50 @@ MACRO(add_mpp_subdirectory subdir_path)
ENDMACRO()
#---------------------------------------------------------------------------------------#
#########################################
## Find TIRCP (for HoreKa)
#########################################
#
find_package(PkgConfig QUIET)
pkg_check_modules(PC_TIRPC libtirpc)
find_path(TIRPC_INCLUDE_DIRS
NAMES netconfig.h
PATH_SUFFIXES tirpc
HINTS ${PC_TIRPC_INCLUDE_DIRS}
)
find_library(TIRPC_LIBRARIES
NAMES tirpc
HINTS ${PC_TIRPC_LIBRARY_DIRS}
)
set(TIRPC_VERSION ${PC_TIRPC_VERSION})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(TIRPC
REQUIRED_VARS TIRPC_LIBRARIES TIRPC_INCLUDE_DIRS
VERSION_VAR TIRPC_VERSION
)
mark_as_advanced(TIRPC_INCLUDE_DIRS TIRPC_LIBRARIES)
###########################################
## End TIRCP
###########################################
if (TIRPC_FOUND)
include_directories(${TIRPC_INCLUDE_DIRS})
endif()
#if(EXISTS "/usr/include/tirpc")
# include_directories("/usr/include/tirpc")
#endif()
#include_directories("/usr/include/tirpc/rpc")
# Subdirectories
add_mpp_subdirectory(${PROJECT_MPP_DIR}/src)
#---------------------------------------------------------------------------------------#
......@@ -289,4 +333,4 @@ enable_testing()
add_mpp_subdirectory(${PROJECT_MPP_DIR}/tests)
#---------------------------------------------------------------------------------------#
file(COPY ${PROJECT_MPP_DIR}/python/mppyrun.py DESTINATION ${PROJECT_BINARY_DIR})
\ No newline at end of file
file(COPY ${PROJECT_MPP_DIR}/python/mppyrun.py DESTINATION ${PROJECT_BINARY_DIR})
......@@ -26,6 +26,12 @@ endif ()
add_library(LIB_BASIC STATIC ${basic_src})
target_link_libraries(LIB_BASIC ${LAPACK_LIBRARIES} ${BLAS_LIBRARIES})
if (TIRPC_FOUND)
target_link_libraries(LIB_BASIC ${TIRPC_LIBRARIES})
endif()
if (USE_CXSC)
target_link_libraries(LIB_BASIC cxsc)
endif ()
......@@ -4,59 +4,11 @@
using namespace std;
ParallelSolver::ParallelSolver() : min_matrix_size(20),
psize(0), maxP(0), PS_cd(true),
onecal(2.5e-10), sod(4e-8), latency(2e-6),
steps(0), PSM(0) {
ParallelSolver::ParallelSolver() : min_matrix_size(20), maxP(0), PS_cd(true),
steps(nullptr), PSM(0) {
config.get("PSMsize", min_matrix_size);
config.get("PS_maxP", maxP);
config.get("PS_checkdiagonal", PS_cd);
config.get("PS_one_calc", onecal);
config.get("PS_send_one_double", sod);
config.get("PS_latency", latency);
}
double ParallelSolver::estimate_COMM_time(int s, int *sol, int *schur) {
double t = 0;
double t2 = 0;
int P = int(pow(2, s));
if (s == 0) {
t2 = (4.);
t2 *= latency;
t += t2;
t2 = 2 * pow(schur[s], 2.);
t2 *= sod;
t += t2;
} else {
t += pow(sol[s], 2.) + sol[s] * schur[s];
t *= sod;
t2 += (3. * P + 2. + 4.);
t2 *= latency;
t += t2;
t2 = 2 * pow(schur[s], 2.);
t2 *= sod;
t += t2 / s;
}
return t;
}
double ParallelSolver::estimate_COMP_time(int s, int *sol, int *schur) {
double t = 0;
int P = int(pow(2, s));
if (s == 0) {
t += pow(sol[s], 7. / 3);
t += pow(sol[s], 4. / 3) * schur[s];
t += pow(sol[s], 2.) * schur[s];
t *= onecal;
} else {
t += 2. * pow(sol[s], 3) / pow(P, 2);
t += pow(sol[s], 3.) / P;
t += pow(sol[s], 2.) * schur[s] / pow(P, 2.);
t += 2. * pow(sol[s], 2) * schur[s] / P;
t += sol[s] * pow(schur[s], 2.) / P;
t *= onecal;
}
return t;
}
void ParallelSolver::Construct(const Matrix &A) {
......@@ -67,9 +19,6 @@ void ParallelSolver::Construct(const Matrix &A) {
if (PS_cd) S.CheckDiagonal();
Date Start;
double starttime, endtime;
starttime = MPI_Wtime();
steps = new ParallelSolverAllSteps(A);
int size = steps->size();
......@@ -79,36 +28,12 @@ void ParallelSolver::Construct(const Matrix &A) {
PSM[i] = new ParallelSolverMatrix(steps->get_step(i), min_matrix_size, maxP);
PSM[0]->Set(S);
endtime = MPI_Wtime();
tout(1) << "--- presettings for PS in seconds: " << endtime - starttime << endl;
starttime = MPI_Wtime();
for (int i = 0; i < size - 1; ++i) {
double startsteptime = MPI_Wtime();
PSM[i]->makeLU();
PSM[i]->SetNext_Matrix(*PSM[i + 1]);
double endsteptime = MPI_Wtime();
if (TimeLevel > 2) {
int Solsize = PSM[i]->solsize();
int Schursize = PSM[i]->schursize();
mout << "--- SOL: " << Solsize << "; SCH: " << Schursize << "; parSol: "
<< steps->parallel_size(i) <<
"; time in step " << i << ": " << endsteptime - startsteptime << endl;
}
}
PSM[size - 1]->makeLU();
endtime = MPI_Wtime();
tout(1) << "--- factorization with PS (size: " << steps->parallel_size() << ") in seconds: "
<< endtime - starttime << endl;
tout(1) << "ParallelMatrixSolver: total time (N = " << A.pSize() << " unknowns):"
<< Date() - Start << endl;
}
......@@ -117,22 +42,16 @@ void ParallelSolver::Destruct() {
for (int i = PSM.size() - 1; i >= 0; --i) {
delete PSM[i];
}
PSM.clear();
if (steps) delete steps;
steps = NULL;
steps = nullptr;
}
void ParallelSolver::multiply(Vector &u, const Vector &b) const {
int slaveproc;
MPI_Comm_rank(MPI_COMM_WORLD, &slaveproc);
double starttime, endtime;
int size = steps->size();
starttime = MPI_Wtime();
for (int i = 0; i < size; ++i)
PSM[i]->Create_rhs();
PSM[0]->Set_rhs(b());
for (int i = 0; i < size - 1; ++i) {
......@@ -146,27 +65,12 @@ void ParallelSolver::multiply(Vector &u, const Vector &b) const {
PSM[i]->SetNext_rhs_RIGHT(*PSM[i - 1]);
}
PSM[0]->SolveU();
PSM[0]->Write_rhs(u());
endtime = MPI_Wtime();
if (TimeLevel > 3) {
if (!slaveproc)
cout << "--- solving with PS in seconds (size: " << steps->parallel_size() << "): "
<< endtime - starttime
<< "\n";
}
}
void ParallelSolver::multiply(Vectors &U, const Vectors &B) const {
int slaveproc;
MPI_Comm_rank(MPI_COMM_WORLD, &slaveproc);
double starttime, endtime;
int size = steps->size();
starttime = MPI_Wtime();
int nrhs = U.size();
for (int i = 0; i < size; ++i)
PSM[i]->Create_rhs(nrhs);
......@@ -188,11 +92,4 @@ void ParallelSolver::multiply(Vectors &U, const Vectors &B) const {
for (int i = 0; i < nrhs; ++i)
PSM[0]->Write_rhs(U[i](), i);
endtime = MPI_Wtime();
if (TimeLevel > 3) {
if (!slaveproc)
cout << "--- solving " << size << " rhs with PS in seconds: " << endtime - starttime << "\n";
}
}
......@@ -11,15 +11,6 @@ class ParallelSolver : public Preconditioner {
int min_matrix_size;
int maxP;
bool PS_cd;
int psize;
double onecal; // time for one calculation
double sod; // sending one double
double latency;
double estimate_COMM_time(int, int *, int *);
double estimate_COMP_time(int, int *, int *);
public:
ParallelSolver();
......
......@@ -154,8 +154,8 @@ int ParallelSolverOneStep::total_Solsize() {
ParallelSolverAllSteps::ParallelSolverAllSteps(const Matrix &A) : s(0) {
vps = new VectorProcSet(A.GetVector());
vps->combine_procs();
int P = PPM->size();
vps->clear_position();
s.resize(1);
......