Commit 6849bfea authored by Thomas Steinreiter's avatar Thomas Steinreiter
Browse files

* renaming to more save convention

parent 395099d0
#include "CollectiveCommunicator.hpp"
void CollectiveCommunicator::Communicate(State* model) {
if (_commDistGraph == MPI_COMM_NULL)
if (commDistGraph_ == MPI_COMM_NULL)
MpiReportErrorAbort("Communicator not initialized");
MPI_Neighbor_alltoallw(model, // sendbuf
_sizes.data(), // sendcounts
_sendDisplacements.data(), // sdispl
_sendTypes.data(), // sendtypes
sizes_.data(), // sendcounts
sendDisplacements_.data(), // sdispl
sendTypes_.data(), // sendtypes
model, // recvbuf
_sizes.data(), // recvcounts
_recvDisplacements.data(), // rdispls
_recvTypes.data(), // recvtypes
_commDistGraph // comm
sizes_.data(), // recvcounts
recvDisplacements_.data(), // rdispls
recvTypes_.data(), // recvtypes
commDistGraph_ // comm
);
}
Communicator::MpiRequest CollectiveCommunicator::AsyncCommunicate(State* model) {
if (_commDistGraph == MPI_COMM_NULL)
if (commDistGraph_ == MPI_COMM_NULL)
MpiReportErrorAbort("Communicator not initialized");
MPI_Request req;
MPI_Ineighbor_alltoallw(model, // sendbuf
_sizes.data(), // sendcounts
_sendDisplacements.data(), // sdispl
_sendTypes.data(), // sendtypes
sizes_.data(), // sendcounts
sendDisplacements_.data(), // sdispl
sendTypes_.data(), // sendtypes
model, // recvbuf
_sizes.data(), // recvcounts
_recvDisplacements.data(), // rdispls
_recvTypes.data(), // recvtypes
_commDistGraph, // comm
sizes_.data(), // recvcounts
recvDisplacements_.data(), // rdispls
recvTypes_.data(), // recvtypes
commDistGraph_, // comm
&req); // request
return MpiRequest{{req}};
}
......@@ -5,11 +5,11 @@
#include "Communicator.hpp"
Communicator::MpiRequest::MpiRequest(DoubleVector<MPI_Request> reqs)
: _reqs(reqs) {}
: reqs_(reqs) {}
void Communicator::MpiRequest::Wait() {
MPI_Waitall(static_cast<int>(_reqs.size()), //
_reqs.data(), //
MPI_Waitall(static_cast<int>(reqs_.size()), //
reqs_.data(), //
MPI_STATUSES_IGNORE); //
finished = true;
}
......@@ -21,24 +21,24 @@ Communicator::MpiRequest::~MpiRequest() {
Communicator::Communicator(const MpiEnvironment& env,
CommunicationMode commMode, const Size& procsSize,
const Size& tileSize)
: _commMode(commMode) {
: commMode_(commMode) {
// Begin definition of basic types
MPI_Type_contiguous(static_cast<int>(tileSize.Cols), MPI_CHAR,
&_haloRowType);
MPI_Type_commit(&_haloRowType);
&haloRowType_);
MPI_Type_commit(&haloRowType_);
MPI_Type_vector(static_cast<int>(tileSize.Rows), 1, static_cast<int>(tileSize.Cols + 2), MPI_CHAR,
&_haloColumnType);
MPI_Type_commit(&_haloColumnType);
&haloColumnType_);
MPI_Type_commit(&haloColumnType_);
// End definition of basic types
// Begin definition of types/displacements for a general cell somewhere in
// the middle of the procs grid
const std::array<MPI_Datatype, NoNeighbors> generalSendTypes{{
_haloCornerType, _haloRowType, _haloCornerType, //
_haloColumnType, _haloColumnType, //
_haloCornerType, _haloRowType, _haloCornerType //
haloCornerType_, haloRowType_, haloCornerType_, //
haloColumnType_, haloColumnType_, //
haloCornerType_, haloRowType_, haloCornerType_ //
}};
const auto tCols = tileSize.Cols;
const auto tRows = tileSize.Rows;
......@@ -97,49 +97,49 @@ Communicator::Communicator(const MpiEnvironment& env,
for (std::size_t i{0}; i < NoNeighbors; ++i) {
const auto nbrCoord = generalNeighborCoords[i];
if (isInsideProcsGrid(nbrCoord)) {
_neighbors.push_back(coord2rank(nbrCoord));
_sendTypes.push_back(generalSendTypes[i]);
_sendDisplacements.push_back(generalSendDisplacements[i]);
_recvDisplacements.push_back(generalRecvDisplacements[i]);
_sizes.push_back(generalSizes[i]);
neighbors_.push_back(coord2rank(nbrCoord));
sendTypes_.push_back(generalSendTypes[i]);
sendDisplacements_.push_back(generalSendDisplacements[i]);
recvDisplacements_.push_back(generalRecvDisplacements[i]);
sizes_.push_back(generalSizes[i]);
}
}
MPI_Dist_graph_create_adjacent(
MPI_COMM_WORLD, // comm_old
static_cast<int>(_neighbors.size()), // indegree
_neighbors.data(), // sources
static_cast<int>(neighbors_.size()), // indegree
neighbors_.data(), // sources
reinterpret_cast<int*>(MPI_UNWEIGHTED), // sourceweights
static_cast<int>(_neighbors.size()), // outdegree
_neighbors.data(), // destinations
static_cast<int>(neighbors_.size()), // outdegree
neighbors_.data(), // destinations
reinterpret_cast<int*>(MPI_UNWEIGHTED), // destweights
MPI_INFO_NULL, // info
0, // reorder
&_commDistGraph // comm_dist_graph
&commDistGraph_ // comm_dist_graph
);
// End definition of datastructures for this particular cell
}
Communicator::~Communicator() {
if (_commDistGraph != MPI_COMM_NULL) {
MPI_Comm_free(&_commDistGraph);
MPI_Type_free(&_haloColumnType);
MPI_Type_free(&_haloRowType);
if (commDistGraph_ != MPI_COMM_NULL) {
MPI_Comm_free(&commDistGraph_);
MPI_Type_free(&haloColumnType_);
MPI_Type_free(&haloRowType_);
}
}
void Communicator::swap(Communicator& first, Communicator& second) {
using std::swap;
swap(first._commMode, second._commMode);
swap(first._neighbors, second._neighbors);
swap(first._sizes, second._sizes);
swap(first._sendTypes, second._sendTypes);
swap(first._sendDisplacements, second._sendDisplacements);
swap(first._recvDisplacements, second._recvDisplacements);
swap(first._commDistGraph, second._commDistGraph);
swap(first._haloRowType, second._haloRowType);
swap(first._haloColumnType, second._haloColumnType);
swap(first._haloCornerType, second._haloCornerType);
swap(first.commMode_, second.commMode_);
swap(first.neighbors_, second.neighbors_);
swap(first.sizes_, second.sizes_);
swap(first.sendTypes_, second.sendTypes_);
swap(first.sendDisplacements_, second.sendDisplacements_);
swap(first.recvDisplacements_, second.recvDisplacements_);
swap(first.commDistGraph_, second.commDistGraph_);
swap(first.haloRowType_, second.haloRowType_);
swap(first.haloColumnType_, second.haloColumnType_);
swap(first.haloCornerType_, second.haloCornerType_);
}
Communicator::Communicator(Communicator&& other) noexcept {
......
......@@ -26,7 +26,7 @@ class Communicator {
boost::container::static_vector<T, NoNeighbors * 2>;
private:
DoubleVector<MPI_Request> _reqs;
DoubleVector<MPI_Request> reqs_;
bool finished{};
public:
......@@ -40,21 +40,21 @@ class Communicator {
};
protected:
CommunicationMode _commMode;
CommunicationMode commMode_;
// data members for graph topology
Vector<int> _neighbors;
Vector<int> _sizes;
Vector<MPI_Datatype> _sendTypes;
const Vector<MPI_Datatype>& _recvTypes{_sendTypes};
Vector<MPI_Aint> _sendDisplacements;
Vector<MPI_Aint> _recvDisplacements;
MPI_Comm _commDistGraph{MPI_COMM_NULL};
Vector<int> neighbors_;
Vector<int> sizes_;
Vector<MPI_Datatype> sendTypes_;
const Vector<MPI_Datatype>& recvTypes_{sendTypes_};
Vector<MPI_Aint> sendDisplacements_;
Vector<MPI_Aint> recvDisplacements_;
MPI_Comm commDistGraph_{MPI_COMM_NULL};
// data types
MPI_Datatype _haloRowType{};
MPI_Datatype _haloColumnType{};
MPI_Datatype _haloCornerType{MPI_CHAR};
MPI_Datatype haloRowType_{};
MPI_Datatype haloColumnType_{};
MPI_Datatype haloCornerType_{MPI_CHAR};
public:
Communicator() = default;
......
......@@ -78,54 +78,54 @@ TileInfo FileIO::GetTileInfo(Size globalSize, Size procsSize,
FileIO::Tile::Tile(const std::string& path, HeaderInfo header, Size procsSize,
std::size_t rank, State* buf)
: _path(path), _headerLength(header.HeaderLength),
_srcSize(header.GlobalSize), _procsSize(procsSize), _buf(buf),
_tileInfo(FileIO::GetTileInfo(header.GlobalSize, procsSize, rank)),
_tileSize(_tileInfo.Size), _tileCoord(_tileInfo.GlobalCoord),
_tileType(
MpiSubarray({{header.GlobalSize.Rows, _tileSize.Rows, 0},
{header.GlobalSize.Cols + LF, _tileSize.Cols, 0}})),
_bufType(MpiSubarray({{_tileSize.Rows + 2, _tileSize.Rows, 1},
{_tileSize.Cols + 2, _tileSize.Cols, 1}})),
_displ(header.HeaderLength +
(header.GlobalSize.Cols + LF) * _tileCoord.Y + _tileCoord.X) {}
: path_(path), headerLength_(header.HeaderLength),
srcSize_(header.GlobalSize), procsSize_(procsSize), buf_(buf),
tileInfo_(FileIO::GetTileInfo(header.GlobalSize, procsSize, rank)),
tileSize_(tileInfo_.Size), tileCoord_(tileInfo_.GlobalCoord),
tileType_(
MpiSubarray({{header.GlobalSize.Rows, tileSize_.Rows, 0},
{header.GlobalSize.Cols + LF, tileSize_.Cols, 0}})),
bufType_(MpiSubarray({{tileSize_.Rows + 2, tileSize_.Rows, 1},
{tileSize_.Cols + 2, tileSize_.Cols, 1}})),
displ_(header.HeaderLength +
(header.GlobalSize.Cols + LF) * tileCoord_.Y + tileCoord_.X) {}
void FileIO::Tile::Read() {
MPI_File file;
MPI_File_open(MPI_COMM_WORLD, _path.c_str(),
MPI_File_open(MPI_COMM_WORLD, path_.c_str(),
MPI_MODE_RDONLY | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &file);
MPI_File_set_view(file, static_cast<MPI_Offset>(_displ), MPI_CHAR,
_tileType.type(), "native", MPI_INFO_NULL);
MPI_File_set_view(file, static_cast<MPI_Offset>(displ_), MPI_CHAR,
tileType_.type(), "native", MPI_INFO_NULL);
MPI_File_read_all(file, _buf, 1, _bufType.type(), MPI_STATUS_IGNORE);
MPI_File_read_all(file, buf_, 1, bufType_.type(), MPI_STATUS_IGNORE);
MPI_File_close(&file);
}
void FileIO::Tile::Write() const {
MPI_File file;
MPI_File_open(MPI_COMM_WORLD, _path.c_str(),
MPI_File_open(MPI_COMM_WORLD, path_.c_str(),
MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file);
MPI_File_set_view(file, static_cast<MPI_Offset>(_displ), MPI_CHAR,
_tileType.type(), "native", MPI_INFO_NULL);
MPI_File_set_view(file, static_cast<MPI_Offset>(displ_), MPI_CHAR,
tileType_.type(), "native", MPI_INFO_NULL);
MPI_File_write_all(file, _buf, 1, _bufType.type(), MPI_STATUS_IGNORE);
MPI_File_write_all(file, buf_, 1, bufType_.type(), MPI_STATUS_IGNORE);
/// fix line feeds
// this is done with an collective call, but only the rightmost
// ranks actually write line feeds
// are we a rightMost tile?
const auto rightMost = _tileInfo.ProcCoord.X == _procsSize.Cols - 1;
const auto noLfNeeded = rightMost ? _tileSize.Rows : 0;
const auto rightMost = tileInfo_.ProcCoord.X == procsSize_.Cols - 1;
const auto noLfNeeded = rightMost ? tileSize_.Rows : 0;
const auto lfType = MpiSubarray( // subsize must be > 0
{{_srcSize.Rows, std::max<std::size_t>(noLfNeeded, 1), 0},
{_srcSize.Cols + LF, 1, 0}});
{{srcSize_.Rows, std::max<std::size_t>(noLfNeeded, 1), 0},
{srcSize_.Cols + LF, 1, 0}});
const std::vector<char> lfs(noLfNeeded, '\n');
const auto lfDisp = _headerLength +
_tileInfo.GlobalCoord.Y * (_srcSize.Cols + LF) +
_srcSize.Cols;
const auto lfDisp = headerLength_ +
tileInfo_.GlobalCoord.Y * (srcSize_.Cols + LF) +
srcSize_.Cols;
MPI_File_set_view(file, static_cast<MPI_Offset>(lfDisp), MPI_CHAR,
lfType.type(), "native", MPI_INFO_NULL);
......
......@@ -26,18 +26,18 @@ struct FileIO {
// helper class to share commonly used data for reading and writing
class Tile {
static constexpr std::size_t LF = 1; // linefeed chars
const std::string& _path;
const std::size_t _headerLength;
const Size _srcSize;
const Size _procsSize;
State* _buf;
const TileInfo _tileInfo;
const Size _tileSize;
const Coord _tileCoord;
const MpiSubarray _tileType;
const MpiSubarray _bufType;
const std::size_t _displ;
const std::string& path_;
const std::size_t headerLength_;
const Size srcSize_;
const Size procsSize_;
State* buf_;
const TileInfo tileInfo_;
const Size tileSize_;
const Coord tileCoord_;
const MpiSubarray tileType_;
const MpiSubarray bufType_;
const std::size_t displ_;
public:
Tile(const std::string& path, HeaderInfo header, Size procsSize,
......
......@@ -5,21 +5,21 @@
void MpiEnvironment::swap(MpiEnvironment& first,
MpiEnvironment& second) noexcept {
using std::swap;
swap(first._worldRank, second._worldRank);
swap(first._worldSize, second._worldSize);
swap(first._isMaster, second._isMaster);
swap(first.worldRank_, second.worldRank_);
swap(first.worldSize_, second.worldSize_);
swap(first.isMaster_, second.isMaster_);
}
MpiEnvironment::MpiEnvironment(int& argc, char* argv[]) {
MPI_Init(&argc, &argv);
_worldRank = [] { int r; MPI_Comm_rank(MPI_COMM_WORLD, &r); return static_cast<std::size_t>(r); }();
_worldSize = [] { int s; MPI_Comm_size(MPI_COMM_WORLD, &s); return static_cast<std::size_t>(s); }();
_isMaster = {_worldRank == 0};
worldRank_ = [] { int r; MPI_Comm_rank(MPI_COMM_WORLD, &r); return static_cast<std::size_t>(r); }();
worldSize_ = [] { int s; MPI_Comm_size(MPI_COMM_WORLD, &s); return static_cast<std::size_t>(s); }();
isMaster_ = {worldRank_ == 0};
// We want the program to stop on I/O errors
MPI_File_set_errhandler(MPI_FILE_NULL, MPI_ERRORS_ARE_FATAL);
}
MpiEnvironment::~MpiEnvironment() {
if (_worldRank != std::numeric_limits<std::size_t>::max()) { MPI_Finalize(); }
if (worldRank_ != std::numeric_limits<std::size_t>::max()) { MPI_Finalize(); }
}
MpiEnvironment::MpiEnvironment(MpiEnvironment&& other) noexcept {
......
......@@ -5,14 +5,14 @@
#include <mpi.h>
class MpiEnvironment { // wrapper for creating and destroying the environment
std::size_t _worldRank{std::numeric_limits<std::size_t>::max()};
std::size_t _worldSize{0};
bool _isMaster{false};
std::size_t worldRank_{std::numeric_limits<std::size_t>::max()};
std::size_t worldSize_{0};
bool isMaster_{false};
public:
std::size_t worldRank() const { return _worldRank; }
std::size_t worldSize() const { return _worldSize; }
bool isMaster() const { return _isMaster; }
std::size_t worldRank() const { return worldRank_; }
std::size_t worldSize() const { return worldSize_; }
bool isMaster() const { return isMaster_; }
void swap(MpiEnvironment& first, MpiEnvironment& second) noexcept;
......
......@@ -3,15 +3,15 @@
SubarrayDefinition::SubarrayDefinition(
std::initializer_list<SubarrayDimensionDefinition> saDimDefs) {
for (const auto& dd : saDimDefs) {
_sizes.push_back(static_cast<int>(dd.size));
_subSizes.push_back(static_cast<int>(dd.subSize));
_starts.push_back(static_cast<int>(dd.start));
sizes_.push_back(static_cast<int>(dd.size));
subSizes_.push_back(static_cast<int>(dd.subSize));
starts_.push_back(static_cast<int>(dd.start));
}
}
void MpiSubarray::swap(MpiSubarray& first, MpiSubarray& second) noexcept {
using std::swap;
swap(first._type, second._type);
swap(first.type_, second.type_);
}
MpiSubarray::MpiSubarray(SubarrayDefinition sd) {
......@@ -21,12 +21,12 @@ MpiSubarray::MpiSubarray(SubarrayDefinition sd) {
sd.starts(), // array_of_starts
MPI_ORDER_C, // order
MPI_CHAR, // oldtype
&_type // newtype
&type_ // newtype
);
MPI_Type_commit(&_type);
MPI_Type_commit(&type_);
}
MpiSubarray::~MpiSubarray() {
if (_type != MPI_DATATYPE_NULL) { MPI_Type_free(&_type); }
if (type_ != MPI_DATATYPE_NULL) { MPI_Type_free(&type_); }
}
MpiSubarray::MpiSubarray(MpiSubarray&& other) noexcept { swap(*this, other); }
MpiSubarray& MpiSubarray::operator=(MpiSubarray&& other) noexcept {
......
......@@ -11,26 +11,26 @@ struct SubarrayDimensionDefinition { // helper container
};
class SubarrayDefinition { // helper container for MPI Datatype creation
std::vector<int> _sizes;
std::vector<int> _subSizes;
std::vector<int> _starts;
std::vector<int> sizes_;
std::vector<int> subSizes_;
std::vector<int> starts_;
public:
auto dims() const { return _sizes.size(); }
auto sizes() { return _sizes.data(); }
auto subSizes() { return _subSizes.data(); }
auto starts() { return _starts.data(); }
auto dims() const { return sizes_.size(); }
auto sizes() { return sizes_.data(); }
auto subSizes() { return subSizes_.data(); }
auto starts() { return starts_.data(); }
SubarrayDefinition(
std::initializer_list<SubarrayDimensionDefinition> saDimDefs);
};
class MpiSubarray { // wrapper for creating and destroying the type
MPI_Datatype _type{MPI_DATATYPE_NULL};
MPI_Datatype type_{MPI_DATATYPE_NULL};
public:
void swap(MpiSubarray& first, MpiSubarray& second) noexcept;
MPI_Datatype type() const { return _type; }
MPI_Datatype type() const { return type_; }
MpiSubarray(SubarrayDefinition sd);
~MpiSubarray();
......
......@@ -16,9 +16,9 @@
#include "Util.hpp"
void MpiWireworld::processArea(Coord start, Size size) {
auto& _model = _tile.model();
auto& _nextModel = _tile.nextModel();
const auto modelWidth = _tile.modelWidth();
auto& model_ = tile_.model();
auto& nextModel_ = tile_.nextModel();
const auto modelWidth = tile_.modelWidth();
// std::size_t is unsigned. modulo arithmetics is used for calculating the
// index
......@@ -31,8 +31,8 @@ void MpiWireworld::processArea(Coord start, Size size) {
for (std::size_t x{start.X}; x < start.X + size.Cols; ++x) {
const auto idx = y * modelWidth + x;
const auto currentState = _model[idx];
_nextModel[idx] = [&]() {
const auto currentState = model_[idx];
nextModel_[idx] = [&]() {
switch (currentState) {
case State::ElectronHead:
return State::ElectronTail;
......@@ -40,7 +40,7 @@ void MpiWireworld::processArea(Coord start, Size size) {
return State::Conductor;
case State::Conductor: {
const auto isHead = [&](std::size_t i) {
return _model[i] == State::ElectronHead ? 1 : 0;
return model_[i] == State::ElectronHead ? 1 : 0;
};
const auto headCount =
isHead(idx + leftOffset + upOffset) + //
......@@ -65,54 +65,54 @@ void MpiWireworld::processArea(Coord start, Size size) {
}
MpiWireworld::MpiWireworld(const MpiEnvironment& env, const Configuration& cfg)
: _tile(Tile::Read(cfg, env)),
_comm([&]() -> std::unique_ptr<Communicator> {
: tile_(Tile::Read(cfg, env)),
comm_([&]() -> std::unique_ptr<Communicator> {
switch (cfg.CommMode) {
case CommunicationMode::Collective:
return std::make_unique<CollectiveCommunicator>(
env, cfg.CommMode, cfg.Procs, _tile.tileSize());
env, cfg.CommMode, cfg.Procs, tile_.tileSize());
case CommunicationMode::P2P:
return std::make_unique<P2PCommunicator>(
env, cfg.CommMode, cfg.Procs, _tile.tileSize());
env, cfg.CommMode, cfg.Procs, tile_.tileSize());
}
}()) {
_comm->Communicate(_tile.model());
comm_->Communicate(tile_.model());
}
std::ostream& operator<<(std::ostream& out, const MpiWireworld& g) {
// for now, put only our local tile
out << g._tile;
out << g.tile_;
return out;
}
void MpiWireworld::write() const { _tile.write(); }
void MpiWireworld::write() const { tile_.write(); }
void MpiWireworld::simulateStep() {
auto& _model = _tile.model();
auto& _nextModel = _tile.nextModel();
const auto _tileSize = _tile.tileSize();
auto& model_ = tile_.model();
auto& nextModel_ = tile_.nextModel();
const auto tileSize_ = tile_.tileSize();
//// compute the border area first, then comm
//// and compute the core async
/// border area
// top
processArea({1, 1}, {_tileSize.Cols, 1});
processArea({1, 1}, {tileSize_.Cols, 1});
// left and right
processArea({1, 2}, {1, _tileSize.Rows - 2});
processArea({_tileSize.Cols, 2}, {1, _tileSize.Rows - 2});
processArea({1, 2}, {1, tileSize_.Rows - 2});
processArea({tileSize_.Cols, 2}, {1, tileSize_.Rows - 2});
// bottom
processArea({1, _tileSize.Rows}, {_tileSize.Cols, 1});
processArea({1, tileSize_.Rows}, {tileSize_.Cols, 1});
// start communication of border while processing the core
auto req = _comm->AsyncCommunicate(_nextModel);
auto req = comm_->AsyncCommunicate(nextModel_);
/// core
processArea({2, 2}, {_tileSize.Cols - 2, _tileSize.Rows - 2});
processArea({2, 2}, {tileSize_.Cols - 2, tileSize_.Rows - 2});
req.Wait();
std::swap(_model, _nextModel);
std::swap(model_, nextModel_);
}
......@@ -9,8 +9,8 @@
#include "Tile.hpp"
class MpiWireworld {
Tile _tile;
std::unique_ptr<Communicator> _comm;