Commit cf6ca9b8 authored by Thomas Steinreiter's avatar Thomas Steinreiter
Browse files

renamed grid to procs to match with the c-variant namings

parent 64d23777
......@@ -17,7 +17,7 @@ Communicator::MpiRequest::~MpiRequest() {
// defines types and graph topology
Communicator::Communicator(const MpiEnvironment& env,
CommunicationMode commMode, const Size& gridSize,
CommunicationMode commMode, const Size& procsSize,
const Size& tileSize)
: _commMode(commMode) {
// Begin definition of basic types
......@@ -31,7 +31,7 @@ Communicator::Communicator(const MpiEnvironment& env,
// End definition of basic types
// Begin definition of types/displacements for a general cell somewhere in
// the middle of the grid
// the middle of the procs grid
const std::array<MPI_Datatype, NoNeighbors> generalSendTypes{
_haloCornerType, _haloRowType, _haloCornerType, //
_haloColumnType, _haloColumnType, //
......@@ -68,15 +68,15 @@ Communicator::Communicator(const MpiEnvironment& env,
// border cases)
const auto rank2coord = [&](std::size_t rank) {
return Coord{
rank % gridSize.Cols, //
rank / gridSize.Cols //
rank % procsSize.Cols, //
rank / procsSize.Cols //
};
};
const auto coord2rank = [&](Coord c) { return gridSize.Cols * c.Y + c.X; };
const auto coord2rank = [&](Coord c) { return procsSize.Cols * c.Y + c.X; };
const auto isInsideGrid = [&](Coord c) {
return c.X < gridSize.Cols && c.Y < gridSize.Rows;
const auto isInsideProcsGrid = [&](Coord c) {
return c.X < procsSize.Cols && c.Y < procsSize.Rows;
};
const auto myCoord = rank2coord(env.worldRank());
......@@ -93,7 +93,7 @@ Communicator::Communicator(const MpiEnvironment& env,
for (std::size_t i{0}; i < NoNeighbors; ++i) {
const auto nbrCoord = generalNeighborCoords[i];
if (isInsideGrid(nbrCoord)) {
if (isInsideProcsGrid(nbrCoord)) {
_neighbors.push_back(coord2rank(nbrCoord));
_sendTypes.push_back(generalSendTypes[i]);
_sendDisplacements.push_back(generalSendDisplacements[i]);
......
......@@ -39,10 +39,10 @@ auto Configuration::parseArgs(int argc, char* argv[], const MpiEnvironment& env)
po::options_description desc{"Allowed options"};
desc.add_options()("help,h", "produce help message") //
("gridrows,r", po::value<std::size_t>(&cfg.Grid.Rows),
"number of rows in the grid") //
("gridcols,c", po::value<std::size_t>(&cfg.Grid.Cols),
"number of columns in the grid") //
("nprocs-x,x", po::value<std::size_t>(&cfg.Procs.Cols),
"number of processes in x-direction") //
("nprocs-y,y", po::value<std::size_t>(&cfg.Procs.Rows),
"number of processes in y-direction") //
("generations,g", po::value<std::size_t>(&cfg.Generations),
"number of generations simulated") //
("commmode,m", po::value<CommunicationMode>(&cfg.CommMode),
......@@ -69,19 +69,19 @@ auto Configuration::parseArgs(int argc, char* argv[], const MpiEnvironment& env)
} catch (const po::error& err) { MpiReportErrorAbort(err.what()); }
// if no dimensions given, use MPI_Dims_create
if (cfg.Grid.Cols < 1 || cfg.Grid.Rows < 1) {
std::array<int, 2> dims{static_cast<int>(cfg.Grid.Cols),
static_cast<int>(cfg.Grid.Rows)};
if (cfg.Procs.Cols < 1 || cfg.Procs.Rows < 1) {
std::array<int, 2> dims{static_cast<int>(cfg.Procs.Cols),
static_cast<int>(cfg.Procs.Rows)};
MPI_Dims_create(env.worldSize(), // nnodes
2, // ndims
dims.data()); // dims
cfg.Grid.Cols = dims[0];
cfg.Grid.Rows = dims[1];
cfg.Procs.Cols = dims[0];
cfg.Procs.Rows = dims[1];
}
// validate
const auto& totalCellCount = cfg.Grid.Cols * cfg.Grid.Rows;
const auto& totalCellCount = cfg.Procs.Cols * cfg.Procs.Rows;
const auto& worldSize = env.worldSize();
if (totalCellCount != static_cast<std::size_t>(worldSize)) {
MpiReportErrorAbort(boost::str(
......
......@@ -12,7 +12,7 @@ enum class CommunicationMode {
};
struct Configuration {
Size Grid{};
Size Procs{};
std::string InputFilePath;
std::string OutputFilePath;
std::size_t Generations{1000};
......
......@@ -59,28 +59,28 @@ void FileIO::WriteHeader(const HeaderInfo& header, const std::string& path,
MPI_File_close(&fh);
}
SizeCoord FileIO::GetTileSizeCoord(Size globalSize, Size gridSize,
SizeCoord FileIO::GetTileSizeCoord(Size globalSize, Size procsSize,
std::size_t rank) {
const auto tileX = rank % gridSize.Cols;
const auto tileY = rank / gridSize.Cols;
const auto tileX = rank % procsSize.Cols;
const auto tileY = rank / procsSize.Cols;
const auto xBeg = (tileX + 0) * globalSize.Cols / gridSize.Cols;
const auto xEnd = (tileX + 1) * globalSize.Cols / gridSize.Cols;
const auto xBeg = (tileX + 0) * globalSize.Cols / procsSize.Cols;
const auto xEnd = (tileX + 1) * globalSize.Cols / procsSize.Cols;
const auto yBeg = (tileY + 0) * globalSize.Rows / gridSize.Rows;
const auto yEnd = (tileY + 1) * globalSize.Rows / gridSize.Rows;
const auto yBeg = (tileY + 0) * globalSize.Rows / procsSize.Rows;
const auto yEnd = (tileY + 1) * globalSize.Rows / procsSize.Rows;
const auto tileSizeCols = xEnd - xBeg;
const auto tileSizeRows = yEnd - yBeg;
return {{tileSizeCols, tileSizeRows}, {xBeg, yBeg}};
}
FileIO::Tile::Tile(const std::string& path, HeaderInfo header, Size gridSize,
FileIO::Tile::Tile(const std::string& path, HeaderInfo header, Size procsSize,
std::size_t rank, State* buf)
: _path(path), _headerLength(header.HeaderLength),
_srcSize(header.GlobalSize), _gridSize(gridSize), _rank(rank), _buf(buf),
_srcSize(header.GlobalSize), _procsSize(procsSize), _rank(rank), _buf(buf),
_tileSizeCoord(
FileIO::GetTileSizeCoord(header.GlobalSize, gridSize, rank)),
FileIO::GetTileSizeCoord(header.GlobalSize, procsSize, rank)),
_tileSize(_tileSizeCoord.Size), _tileCoord(_tileSizeCoord.Coord),
_tileType(
MpiSubarray({{header.GlobalSize.Rows, _tileSize.Rows, 0},
......@@ -116,7 +116,7 @@ void FileIO::Tile::Write() const {
// ranks actually write line feeds
// are we a rightMost tile?
const auto rightMost = _tileCoord.X == _gridSize.Cols - 1;
const auto rightMost = _tileCoord.X == _procsSize.Cols - 1;
const auto noLfNeeded = rightMost ? _tileSize.Rows : 0;
const auto lfType = MpiSubarray( // subsize must be > 0
{{_srcSize.Rows, std::max<std::size_t>(noLfNeeded, 1), 0},
......
......@@ -20,7 +20,7 @@ struct FileIO {
static void WriteHeader(const HeaderInfo& header, const std::string& path,
const MpiEnvironment& env);
static SizeCoord GetTileSizeCoord(Size globalSize, Size gridSize, std::size_t rank);
static SizeCoord GetTileSizeCoord(Size globalSize, Size procsSize, std::size_t rank);
// helper class to share commonly used data for reading and writing
class Tile {
......@@ -28,7 +28,7 @@ struct FileIO {
const std::string& _path;
const std::size_t _headerLength;
const Size _srcSize;
const Size _gridSize;
const Size _procsSize;
const std::size_t _rank;
State* _buf;
......@@ -40,7 +40,7 @@ struct FileIO {
const std::size_t _displ;
public:
Tile(const std::string& path, HeaderInfo header, Size gridSize,
Tile(const std::string& path, HeaderInfo header, Size procsSize,
std::size_t rank, State* buf);
void Read();
......
......@@ -66,7 +66,7 @@ void MpiWireworld::processArea(Coord start, Size size) {
MpiWireworld::MpiWireworld(const MpiEnvironment& env, const Configuration& cfg)
: _env(env), _cfg(cfg), _tile(Tile::Read(cfg, env)),
_comm(env, cfg.CommMode, cfg.Grid, _tile.tileSize()) {
_comm(env, cfg.CommMode, cfg.Procs, _tile.tileSize()) {
_comm.Communicate(_tile.model());
}
......
# README - Wireworld Example
# README - Wireworld Example (C++ Version)
## Description
*Wireworld* is a simple cellular automaton, similar to the Game of Life. The game area is a 2-dimensional rectangle of cells. Each generation a set of rules are applied which define the next state of the cell depending on the neighboring cells.
This example shows how to distribute the simulation of Wireworld across multiple compute nodes. This includes Parallel IO for the input/output files and parallel computation of the next generation.localized.
The computation is distributed by spatially splitting the Wireworld rectangle into smaller chunks (*Tiles*). Where each compute node has one tile. At the border of each tile, there has to be communication with the neighboring tiles (**Halo Exchange**)
For a general description of *Wireworld* and the file format, see the `README.md` file in the parent directory.
This code sample demonstrates:
* Using **Collective IO** MPI functions for efficiently reading and writing from multiple nodes to the same File, i.e. `MPI_File_set_view`, `MPI_File_read_all`
......@@ -27,19 +23,7 @@ The code sample is structured as follows:
* `MpiWireworld.*`: Simulating a generation step, computing next state
* `Tile.*`: Represents a Tile, memory management, debugging
File Format:
The Wireworld file format is a text format. The first line is the header. The header has 2 positive integers, separated by space, which define the number of *colums*(width) and the number of *rows*(height) of the Wireworld.
In the following lines, the wireworld data is provided.
Each line is a row.
There are exactly *rows* lines and each line is exactly *colums* long.
The following characters are allowed:
* ` `: whitespace/empty
* `#`: conductor
* `@`: electron head
* `~`: electron tail
Note: Be aware that the line ending must match your operating systems convention.
## Release Date
......@@ -84,19 +68,19 @@ Follow the compilation instructions given in the main directory of the kernel sa
To run the program, use something similar to
mpiexec -n 8 ./5_structured_wireworld -g 10000 -f ../worlds/primes.wi -o ../worlds/primes.out.wi -m Collective
mpiexec -n [nprocs] ./5_structured_wireworld ../worlds/primes.wi
either on the command line or in your batch script, where `g` specifies the number of iterations, `f` the input file, `o` the output file and `m` the communication mode.
either on the command line or in your batch script, where an inputfile must be provided.
### Command line arguments
* `-r [ --gridrows ]`: number of rows in the grid to form the tiles (optional, automatically deduced)
* `-c [ --gridcols ]`: number of columns in the grid to form the tiles (optional, automatically deduced)
* `-x [ --nprocs-x ]`: number of processes in x-direction (optional, automatically deduced)
* `-r [ --nprocs-y ]`: number of processes in y-direction (optional, automatically deduced)
* `-g [ --generations ]`: number of generations simulated (default 1000)
* `-m [ --commmode ]`: Communication Mode. Collective or P2P (default Collective)
* `-m [ --commmode ]`: Communication Mode. `Collective` or `P2P` (default Collective)
* `-f [ --inputfile ]`: path to wireworld input file (mandatory, flag can be obmitted) The file dimension must be divisible by the *grid dimension.
* `-o [ --outputfile ]`: path to wireworld input file (optional, no writing)
* `-o [ --outputfile ]`: path to wireworld input file (optional)
### Example
......
......@@ -11,7 +11,7 @@
Tile::Tile(const Configuration& cfg, const MpiEnvironment& env)
: _env(env), _cfg(cfg), _header(FileIO::ReadHeader(cfg.InputFilePath)), //
_tileSize(FileIO::GetTileSizeCoord(_header.GlobalSize, cfg.Grid, env.worldRank()).Size),
_tileSize(FileIO::GetTileSizeCoord(_header.GlobalSize, cfg.Procs, env.worldRank()).Size),
_modelWidth(_tileSize.Cols + 2) {
const auto bufsize = (_tileSize.Cols + 2) * (_tileSize.Rows + 2);
_memoryA.resize(bufsize);
......@@ -20,7 +20,7 @@ Tile::Tile(const Configuration& cfg, const MpiEnvironment& env)
_model = _memoryA.data();
_nextModel = _memoryB.data();
FileIO::Tile(cfg.InputFilePath, _header, cfg.Grid, _env.worldRank(), _model)
FileIO::Tile(cfg.InputFilePath, _header, cfg.Procs, _env.worldRank(), _model)
.Read();
}
......@@ -31,7 +31,7 @@ Tile Tile::Read(const Configuration& cfg, const MpiEnvironment& env) {
void Tile::write() const {
const auto& path = _cfg.OutputFilePath;
FileIO::WriteHeader(_header, path, _env);
FileIO::Tile(path, _header, _cfg.Grid, _env.worldRank(), _model).Write();
FileIO::Tile(path, _header, _cfg.Procs, _env.worldRank(), _model).Write();
}
std::ostream& operator<<(std::ostream& out, const Tile& t) {
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment