broadcast.h 3.66 KB
Newer Older
Lorenz Hübschle-Schneider's avatar
Import    
Lorenz Hübschle-Schneider committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
#pragma once

#include <mpi.h>
#include <errno.h>

#include <cstdint>
#include <vector>

#include <boost/mpi/communicator.hpp>
#include <boost/mpi/collectives.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/mpi/packed_iarchive.hpp>
#include <boost/mpi/packed_oarchive.hpp>

#include <boost/serialization/string.hpp>
#include <boost/serialization/utility.hpp>
#include <boost/serialization/vector.hpp>

#include "common.h"

namespace unsafe_mpi {
template <typename T, typename transmit_type = uint64_t>
void broadcast(const boost::mpi::communicator &comm, std::vector<T> &data, int root) {
    const bool trivial = is_trivial_enough<T>::value;
    static_assert(!trivial || boost::mpi::is_mpi_datatype<T>() ||
        ((sizeof(T)/sizeof(transmit_type)) * sizeof(transmit_type) == sizeof(T)),
        "Invalid transmit_type for element type (sizeof(transmit_type) is not a multiple of sizeof(T))");

    if (comm.size() < 2) return;
    if (trivial) {
        // MPI only supports "int" as size type, and the MPI Forum's reply to
        // the issue can be summed up as "deal with it" (they refer to user-
        // defined contiguous data types, e.g. ones that hold 1024 elements)
        // MPI really hasn't moved on since the 90's...
        int size = static_cast<int>(data.size());
        // broadcast size and allocate space
        boost::mpi::broadcast<int>(comm, size, root);
        data.resize(size); // harmless on root, required on others
        // broadcast elements as transmit_type
        auto ptr = reinterpret_cast<transmit_type*>(data.data());
        size = static_cast<int>(size * sizeof(T)/sizeof(transmit_type));
        boost::mpi::broadcast(comm, ptr, size, root);
    } else if (boost::mpi::is_mpi_datatype<T>()) {
        // We can use Boost.MPI directly to transmit MPI datatypes
        // But send size and data separately to avoid vector serialization
        // I don't think this codepath will ever be called, as all
        // native MPI datatypes should be trivial enough.
        int size = static_cast<int>(data.size());
        boost::mpi::broadcast(comm, size, root);
        data.resize(size); // harmless on root, required on others
        boost::mpi::broadcast<T>(comm, data.data(), size, root);
    } else {
        // Boost.MPI doesn't use MPI_Broadcast for types it doesn't know. WTF.
        // Therefore, we need to do the archive broadcast ourselves.
        if (comm.rank() == root) {
            // Serialize data
            boost::mpi::packed_oarchive oa(comm);
            oa << data;
            // Broadcast archive size
            int archive_size = static_cast<int>(oa.size());
            boost::mpi::broadcast(comm, archive_size, root);
            // Broadcast archive data
            auto sendptr = const_cast<void*>(oa.address());
            int status = MPI_Bcast(sendptr, archive_size, MPI_PACKED, root, comm);
            if (status != 0) {
                ERR << "MPI_Bcast returned non-zero value " << status
                    << ", errno: " << errno << std::endl;
            }
        } else {
            // Receive archive size and allocate space
            int archive_size;
            boost::mpi::broadcast(comm, archive_size, root);
            boost::mpi::packed_iarchive ia(comm);
            ia.resize(archive_size);
            // Receive broadcast archive data
            auto recvptr = ia.address();
            MPI_Bcast(recvptr, archive_size, MPI_PACKED, root, comm);
            if (status != 0) {
                ERR << "MPI_Bcast returned non-zero value " << status
                    << ", errno: " << errno << std::endl;
                return;
            }
            // Unpack received data
            ia >> data;
        }
    }
}
}