Commit 1917780b authored by Thomas Steinreiter's avatar Thomas Steinreiter
Browse files

* added clang warnings

 * fixed most clang warnings
 * removed dead code
parent 110c8f90
......@@ -20,9 +20,11 @@ if (MPI_FOUND AND Boost_FOUND)
add_executable(${NAME} main.cpp Configuration.cpp Communicator.cpp FileIO.cpp MpiEnvironment.cpp MpiSubarray.cpp MpiWireworld.cpp Tile.cpp Util.cpp)
set(CMAKE_BUILD_TYPE RelWithDebInfo)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native -ftree-vectorize -flto -Wall -Wextra")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native -Wall -Wextra")
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native -Weverything -Wno-missing-prototypes -Wno-padded -Wno-c++98-compat -Wno-c++98-compat-pedantic")
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -xHost -std=c++14")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -xHost -std=c++14 -Wall")
endif()
set_target_properties(${NAME} PROPERTIES CXX_STANDARD 14 CXX_STANDARD_REQUIRED YES)
target_link_libraries(${NAME} ${MPI_LIBRARIES} ${Boost_LIBRARIES})
......
......@@ -8,7 +8,9 @@ Communicator::MpiRequest::MpiRequest(DoubleVector<MPI_Request> reqs)
: _reqs(reqs) {}
void Communicator::MpiRequest::Wait() {
MPI_Waitall(_reqs.size(), _reqs.data(), MPI_STATUSES_IGNORE);
MPI_Waitall(static_cast<int>(_reqs.size()), //
_reqs.data(), //
MPI_STATUSES_IGNORE); //
finished = true;
}
Communicator::MpiRequest::~MpiRequest() {
......@@ -21,10 +23,11 @@ Communicator::Communicator(const MpiEnvironment& env,
const Size& tileSize)
: _commMode(commMode) {
// Begin definition of basic types
MPI_Type_contiguous(tileSize.Cols, MPI_CHAR, &_haloRowType);
MPI_Type_contiguous(static_cast<int>(tileSize.Cols), MPI_CHAR,
&_haloRowType);
MPI_Type_commit(&_haloRowType);
MPI_Type_vector(tileSize.Rows, 1, tileSize.Cols + 2, MPI_CHAR,
MPI_Type_vector(static_cast<int>(tileSize.Rows), 1, static_cast<int>(tileSize.Cols + 2), MPI_CHAR,
&_haloColumnType);
MPI_Type_commit(&_haloColumnType);
......@@ -32,11 +35,11 @@ Communicator::Communicator(const MpiEnvironment& env,
// Begin definition of types/displacements for a general cell somewhere in
// the middle of the procs grid
const std::array<MPI_Datatype, NoNeighbors> generalSendTypes{
const std::array<MPI_Datatype, NoNeighbors> generalSendTypes{{
_haloCornerType, _haloRowType, _haloCornerType, //
_haloColumnType, _haloColumnType, //
_haloCornerType, _haloRowType, _haloCornerType //
};
}};
const auto tCols = tileSize.Cols;
const auto tRows = tileSize.Rows;
......@@ -45,23 +48,23 @@ Communicator::Communicator(const MpiEnvironment& env,
return static_cast<MPI_Aint>(y * (tCols + 2) + x);
};
const std::array<MPI_Aint, NoNeighbors> generalSendDisplacements{
const std::array<MPI_Aint, NoNeighbors> generalSendDisplacements{{
dp(1, 1), dp(1, 1), dp(tCols, 1), //
dp(1, 1), dp(tCols, 1), //
dp(1, tRows), dp(1, tRows), dp(tCols, tRows) //
};
}};
const std::array<MPI_Aint, NoNeighbors> generalRecvDisplacements{
const std::array<MPI_Aint, NoNeighbors> generalRecvDisplacements{{
dp(0, 0), dp(1, 0), dp(tCols + 1, 0), //
dp(0, 1), dp(tCols + 1, 1), //
dp(0, tRows + 1), dp(1, tRows + 1), dp(tCols + 1, tRows + 1) //
};
}};
const std::array<int, NoNeighbors> generalSizes{
const std::array<int, NoNeighbors> generalSizes{{
1, 1, 1, //
1, 1, //
1, 1, 1 //
};
}};
// End definition of datastructures for a general cell
// Begin definition of datastructures for this particular cell (handle the
......@@ -73,7 +76,7 @@ Communicator::Communicator(const MpiEnvironment& env,
};
};
const auto coord2rank = [&](Coord c) { return procsSize.Cols * c.Y + c.X; };
const auto coord2rank = [&](Coord c) { return static_cast<int>(procsSize.Cols * c.Y + c.X); };
const auto isInsideProcsGrid = [&](Coord c) {
return c.X < procsSize.Cols && c.Y < procsSize.Rows;
......@@ -104,10 +107,10 @@ Communicator::Communicator(const MpiEnvironment& env,
MPI_Dist_graph_create_adjacent(
MPI_COMM_WORLD, // comm_old
_neighbors.size(), // indegree
static_cast<int>(_neighbors.size()), // indegree
_neighbors.data(), // sources
reinterpret_cast<int*>(MPI_UNWEIGHTED), // sourceweights
_neighbors.size(), // outdegree
static_cast<int>(_neighbors.size()), // outdegree
_neighbors.data(), // destinations
reinterpret_cast<int*>(MPI_UNWEIGHTED), // destweights
MPI_INFO_NULL, // info
......@@ -127,6 +130,7 @@ Communicator::~Communicator() {
void Communicator::swap(Communicator& first, Communicator& second) {
using std::swap;
swap(first._commMode, second._commMode);
swap(first._neighbors, second._neighbors);
swap(first._sizes, second._sizes);
swap(first._sendTypes, second._sendTypes);
......@@ -167,9 +171,6 @@ void Communicator::Communicate(State* model) {
case CommunicationMode::P2P: {
AsyncCommunicate(model).Wait();
} break;
default:
MpiReportErrorAbort("Invalid Communication mode");
break;
}
}
......@@ -191,7 +192,7 @@ Communicator::MpiRequest Communicator::AsyncCommunicate(State* model) {
_commDistGraph, // comm
&req); // request
return MpiRequest{{req}};
} break;
};
case CommunicationMode::P2P: {
Communicator::MpiRequest::DoubleVector<MPI_Request> reqs;
for (std::size_t i{0}; i < _neighbors.size(); ++i) {
......@@ -220,9 +221,6 @@ Communicator::MpiRequest Communicator::AsyncCommunicate(State* model) {
}
}
return MpiRequest{reqs};
} break;
default:
MpiReportErrorAbort("Invalid Communication mode");
break;
};
}
}
......@@ -31,6 +31,10 @@ class Communicator {
public:
MpiRequest(DoubleVector<MPI_Request> reqs);
MpiRequest(const MpiRequest&) = default;
MpiRequest(MpiRequest&&) = default;
MpiRequest& operator=(const MpiRequest&) = default;
MpiRequest& operator=(MpiRequest&&) = default;
void Wait();
~MpiRequest();
};
......
......@@ -11,12 +11,14 @@
#include <boost/program_options.hpp>
// BEGIN helper functions to parse Communication Mode cmd args
namespace {
using namespace std::string_literals;
std::array<std::pair<std::string, CommunicationMode>, 2>
StringToCommunicationMode{
StringToCommunicationMode{{
std::make_pair("Collective"s, CommunicationMode::Collective), //
std::make_pair("P2P"s, CommunicationMode::P2P) //
};
}};
}
std::istream& operator>>(std::istream& in, CommunicationMode& comm) {
std::string buf;
......@@ -70,14 +72,14 @@ auto Configuration::parseArgs(int argc, char* argv[], const MpiEnvironment& env)
// if no dimensions given, use MPI_Dims_create
if (cfg.Procs.Cols < 1 || cfg.Procs.Rows < 1) {
std::array<int, 2> dims{static_cast<int>(cfg.Procs.Cols),
static_cast<int>(cfg.Procs.Rows)};
MPI_Dims_create(env.worldSize(), // nnodes
std::array<int, 2> dims{{static_cast<int>(cfg.Procs.Cols),
static_cast<int>(cfg.Procs.Rows)}};
MPI_Dims_create(static_cast<int>(env.worldSize()), // nnodes
2, // ndims
dims.data()); // dims
cfg.Procs.Cols = dims[0];
cfg.Procs.Rows = dims[1];
cfg.Procs.Cols = static_cast<std::size_t>(dims[0]);
cfg.Procs.Rows = static_cast<std::size_t>(dims[1]);
}
// validate
......
......@@ -55,7 +55,8 @@ void FileIO::WriteHeader(const HeaderInfo& header, const std::string& path,
MPI_File fh;
MPI_File_open(MPI_COMM_SELF, path.c_str(),
MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &fh);
MPI_File_write(fh, buf.data(), buf.size(), MPI_CHAR, MPI_STATUS_IGNORE);
MPI_File_write(fh, buf.data(), static_cast<int>(buf.size()), MPI_CHAR,
MPI_STATUS_IGNORE);
MPI_File_close(&fh);
}
......@@ -78,8 +79,7 @@ TileInfo FileIO::GetTileInfo(Size globalSize, Size procsSize,
FileIO::Tile::Tile(const std::string& path, HeaderInfo header, Size procsSize,
std::size_t rank, State* buf)
: _path(path), _headerLength(header.HeaderLength),
_srcSize(header.GlobalSize), _procsSize(procsSize), _rank(rank),
_buf(buf),
_srcSize(header.GlobalSize), _procsSize(procsSize), _buf(buf),
_tileInfo(FileIO::GetTileInfo(header.GlobalSize, procsSize, rank)),
_tileSize(_tileInfo.Size), _tileCoord(_tileInfo.GlobalCoord),
_tileType(
......@@ -95,8 +95,8 @@ void FileIO::Tile::Read() {
MPI_File file;
MPI_File_open(MPI_COMM_WORLD, _path.c_str(),
MPI_MODE_RDONLY | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &file);
MPI_File_set_view(file, _displ, MPI_CHAR, _tileType.type(), "native",
MPI_INFO_NULL);
MPI_File_set_view(file, static_cast<MPI_Offset>(_displ), MPI_CHAR,
_tileType.type(), "native", MPI_INFO_NULL);
MPI_File_read_all(file, _buf, 1, _bufType.type(), MPI_STATUS_IGNORE);
MPI_File_close(&file);
......@@ -106,8 +106,8 @@ void FileIO::Tile::Write() const {
MPI_File file;
MPI_File_open(MPI_COMM_WORLD, _path.c_str(),
MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file);
MPI_File_set_view(file, _displ, MPI_CHAR, _tileType.type(), "native",
MPI_INFO_NULL);
MPI_File_set_view(file, static_cast<MPI_Offset>(_displ), MPI_CHAR,
_tileType.type(), "native", MPI_INFO_NULL);
MPI_File_write_all(file, _buf, 1, _bufType.type(), MPI_STATUS_IGNORE);
......@@ -127,11 +127,11 @@ void FileIO::Tile::Write() const {
_tileInfo.GlobalCoord.Y * (_srcSize.Cols + LF) +
_srcSize.Cols;
MPI_File_set_view(file, lfDisp, MPI_CHAR, lfType.type(), "native",
MPI_INFO_NULL);
MPI_File_set_view(file, static_cast<MPI_Offset>(lfDisp), MPI_CHAR,
lfType.type(), "native", MPI_INFO_NULL);
// lfs is empty for non-rightmost ranks
MPI_File_write_all(file, lfs.data(), lfs.size(), MPI_CHAR,
MPI_File_write_all(file, lfs.data(), static_cast<int>(lfs.size()), MPI_CHAR,
MPI_STATUS_IGNORE);
MPI_File_close(&file);
......
......@@ -30,7 +30,6 @@ struct FileIO {
const std::size_t _headerLength;
const Size _srcSize;
const Size _procsSize;
const std::size_t _rank;
State* _buf;
const TileInfo _tileInfo;
......
......@@ -12,14 +12,14 @@ void MpiEnvironment::swap(MpiEnvironment& first,
MpiEnvironment::MpiEnvironment(int& argc, char* argv[]) {
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &_worldRank);
MPI_Comm_size(MPI_COMM_WORLD, &_worldSize);
_worldRank = [] { int r; MPI_Comm_rank(MPI_COMM_WORLD, &r); return static_cast<std::size_t>(r); }();
_worldSize = [] { int s; MPI_Comm_size(MPI_COMM_WORLD, &s); return static_cast<std::size_t>(s); }();
_isMaster = {_worldRank == 0};
// We want the program to stop on I/O errors
MPI_File_set_errhandler(MPI_FILE_NULL, MPI_ERRORS_ARE_FATAL);
}
MpiEnvironment::~MpiEnvironment() {
if (_worldRank != -1) { MPI_Finalize(); }
if (_worldRank != std::numeric_limits<std::size_t>::max()) { MPI_Finalize(); }
}
MpiEnvironment::MpiEnvironment(MpiEnvironment&& other) noexcept {
......
#pragma once
#include <limits>
#include <mpi.h>
class MpiEnvironment { // wrapper for creating and destroying the environment
int _worldRank{-1};
int _worldSize{-1};
std::size_t _worldRank{std::numeric_limits<std::size_t>::max()};
std::size_t _worldSize{0};
bool _isMaster{false};
public:
int worldRank() const { return _worldRank; }
int worldSize() const { return _worldSize; }
std::size_t worldRank() const { return _worldRank; }
std::size_t worldSize() const { return _worldSize; }
bool isMaster() const { return _isMaster; }
void swap(MpiEnvironment& first, MpiEnvironment& second) noexcept;
......
......@@ -15,7 +15,7 @@ void MpiSubarray::swap(MpiSubarray& first, MpiSubarray& second) noexcept {
}
MpiSubarray::MpiSubarray(SubarrayDefinition sd) {
MPI_Type_create_subarray(sd.dims(), // ndims
MPI_Type_create_subarray(static_cast<int>(sd.dims()), // ndims
sd.sizes(), // array_of_sizes
sd.subSizes(), // array_of_subsizes
sd.starts(), // array_of_starts
......
......@@ -16,10 +16,10 @@ class SubarrayDefinition { // helper container for MPI Datatype creation
std::vector<int> _starts;
public:
int dims() const { return _sizes.size(); }
int* sizes() { return _sizes.data(); }
int* subSizes() { return _subSizes.data(); }
int* starts() { return _starts.data(); }
auto dims() const { return _sizes.size(); }
auto sizes() { return _sizes.data(); }
auto subSizes() { return _subSizes.data(); }
auto starts() { return _starts.data(); }
SubarrayDefinition(
std::initializer_list<SubarrayDimensionDefinition> saDimDefs);
......
......@@ -5,6 +5,7 @@
#include <iostream>
#include <mpi.h>
#include <string>
#include <limits>
#include <vector>
#include "FileIO.hpp"
......@@ -19,7 +20,7 @@ void MpiWireworld::processArea(Coord start, Size size) {
// std::size_t is unsigned. modulo arithmetics is used for calculating the
// index
const std::size_t leftOffset = -1;
const std::size_t leftOffset = std::numeric_limits<std::size_t>::max(); // -1;
const std::size_t rightOffset = 1;
const std::size_t downOffset = modelWidth;
const std::size_t upOffset = -downOffset;
......@@ -33,13 +34,11 @@ void MpiWireworld::processArea(Coord start, Size size) {
switch (currentState) {
case State::ElectronHead:
return State::ElectronTail;
break;
case State::ElectronTail:
return State::Conductor;
break;
case State::Conductor: {
const auto isHead = [&](std::size_t idx) {
return _model[idx] == State::ElectronHead ? 1 : 0;
const auto isHead = [&](std::size_t i) {
return _model[i] == State::ElectronHead ? 1 : 0;
};
const auto headCount =
isHead(idx + leftOffset + upOffset) + //
......@@ -54,10 +53,9 @@ void MpiWireworld::processArea(Coord start, Size size) {
return (1 == headCount || headCount == 2)
? State::ElectronHead
: State::Conductor;
} break;
default:
};
case State::Empty:
return currentState;
break;
}
}();
}
......@@ -65,7 +63,7 @@ void MpiWireworld::processArea(Coord start, Size size) {
}
MpiWireworld::MpiWireworld(const MpiEnvironment& env, const Configuration& cfg)
: _env(env), _cfg(cfg), _tile(Tile::Read(cfg, env)),
: _tile(Tile::Read(cfg, env)),
_comm(env, cfg.CommMode, cfg.Procs, _tile.tileSize()) {
_comm.Communicate(_tile.model());
}
......
......@@ -8,8 +8,6 @@
#include "Tile.hpp"
class MpiWireworld {
const MpiEnvironment& _env;
const Configuration& _cfg;
Tile _tile;
Communicator _comm;
......
......@@ -35,10 +35,10 @@ void Tile::write() const {
}
std::ostream& operator<<(std::ostream& out, const Tile& t) {
const auto hline = [](auto& out, auto length) {
out << '+';
std::fill_n(std::ostream_iterator<char>(out), length, '-');
out << "+\n";
const auto hline = [](auto& os, auto length) {
os << '+';
std::fill_n(std::ostream_iterator<char>(os), length, '-');
os << "+\n";
};
hline(out, t.tileSize().Cols);
for (std::size_t y{1}; y <= t.tileSize().Rows; ++y) {
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment